diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 59a3ca6c4a3f..a36bf1e6ba92 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -330,8 +330,8 @@ }, { "ImportPath": "github.com/docker/distribution", - "Comment": "v2.0.1", - "Rev": "1341222284b3a6b4e77fb64571ad423ed58b0d34" + "Comment": "v2.1.1", + "Rev": "44c7fb9e6e7e58e88d38d93b5f1f6da6a54d28ff" }, { "ImportPath": "github.com/docker/docker/builder/command", @@ -811,6 +811,10 @@ "ImportPath": "github.com/spf13/pflag", "Rev": "b084184666e02084b8ccb9b704bf0d79c466eb1d" }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, { "ImportPath": "github.com/stretchr/objx", "Rev": "d40df0cc104c06eae2dfe03d7dddb83802d52f9a" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap index bcfe66352d5e..2c0af0605044 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap +++ b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap @@ -3,4 +3,5 @@ Stephen J Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland Josh Hawn Josh Hawn -Richard Scothern Richard \ No newline at end of file +Richard Scothern Richard +Richard Scothern Richard Scothern diff --git a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS index 3e3dfa019c66..5da5d1c628a9 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS +++ b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS @@ -1,27 +1,79 @@ +Aaron Lehmann +Adam Enger +Adrian Mouat Ahmet Alp Balkan +Alex Chan +Alex Elman Amy Lindburg Andrey Kostov Andy Goldstein Anton Tiurin +Antonio Mercado Arnaud Porterie +Ayose Cazorla +BadZen Ben Firshman +bin liu Brian Bland +burnettk +Chris Dillon Daisuke Fujita +Darren Shepherd +Dave Trombley +Dave Tucker David Lawrence +David Verhasselt +David Xia +davidli Derek McGowan Diogo Mónica Donald Huang +Doug Davis +Florentin Raud Frederick F. Kautz IV +Henri Gomez +Hu Keping +Ian Babrou +Jeff Nickoloff Jessie Frazelle +Jianqing Wang +Jon Poler +Jordan Liggitt Josh Hawn +Julien Fernandez +Kelsey Hightower Kenneth Lim +Li Yi +Luke Carpenter Mary Anthony +Matt Bentley +Matt Robenolt +Michael Prokop +moxiegirl Nathan Sullivan +nevermosby Nghia Tran +Oilbeater Olivier Gambier +Olivier Jacques +Patrick Devine +Philip Misiowiec Richard Scothern +Sebastiaan van Stijn +Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn +Spencer Rinehart Stephen J Day +Sylvain Baubeau +tgic +Thomas Sjögren Tianon Gravi +Tibor Vass +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King xiekeyang +Yann ROBERT +yuzou diff --git a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md index 7f36c94b3db9..1a9ecb744278 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md @@ -1,100 +1,140 @@ # Contributing to the registry -## Are you having issues? +## Before reporting an issue... -Please first try any of these support forums before opening an issue: +### If your problem is with... - * irc #docker on freenode (archives: [https://botbot.me/freenode/docker/]) - * https://forums.docker.com/ - * if your problem is with the "hub" (the website and other user-facing components), or about automated builds, then please direct your issues to https://support.docker.com + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue -## So, you found a bug? +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) -First check if your problem was already reported in the issue tracker. +### If you... -If it's already there, please refrain from adding "same here" comments - these don't add any value and are only adding useless noise. **Said comments will quite often be deleted at sight**. On the other hand, if you have any technical, relevant information to add, by all means do! + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is -Your issue is not there? Then please, create a ticket. +Then please do not open an issue here yet - you should first try one of the following support forums: -If possible the following guidelines should be followed: + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - * try to come up with a minimal, simple to reproduce test-case - * try to add a title that describe succinctly the issue - * if you are running your own registry, please provide: - * registry version - * registry launch command used - * registry configuration - * registry logs - * in all cases: - * `docker version` and `docker info` - * run your docker daemon in debug mode (-D), and provide docker daemon logs +## Reporting an issue properly -## You have a patch for a known bug, or a small correction? +By following these simple rules you will get better and faster feedback on your issue. -Basic github workflow (fork, patch, make sure the tests pass, PR). + - search the bugtracker for an already reported issue -... and some simple rules to ensure quick merge: +### If you found an issue that describes your problem: - * clearly point to the issue(s) you want to fix - * when possible, prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - * if you need to amend your PR following comments, squash instead of adding more commits + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. -## You want some shiny new feature to be added? +### If you have not found an existing issue that describes your problem: -Fork the project. + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry -version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry -Create a new proposal in the folder `open-design/specs`, named `DEP_MY_AWESOME_PROPOSAL.md`, using `open-design/specs/TEMPLATE.md` as a starting point. +## Contributing a patch for a known bug, or a small correction -Then immediately submit this new file as a pull-request, in order to get early feedback. +You should follow the basic GitHub workflow: -Eventually, you will have to update your proposal to accommodate the feedback you received. + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR -Usually, it's not advisable to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can be significantly altered (or rejected). +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: -Your implementation should then be submitted as a separate PR, that will be reviewed as well. + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` -## Issue and PR labels +Some simple rules to ensure quick merge: -To keep track of the state of issues and PRs, we've adopted a set of simple labels. The following are currently in use: + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits -
-
Backlog
-
Issues marked with this label are considered not yet ready for implementation. Either they are untriaged or require futher detail to proceed.
+## Contributing new features -
Blocked
-
If an issue requires further clarification or is blocked on an unresolved dependency, this label should be used.
+You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. -
Sprint
-
Issues marked with this label are being worked in the current sprint. All required information should be available and design details have been worked out.
+If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. -
In Progress
-
The issue or PR is being actively worked on by the assignee.
+Then you should submit your implementation, clearly linking to the issue (and possible proposal). -
Done
-
Issues marked with this label are complete. This can be considered a psuedo-label, in that if it is closed, it is considered "Done".
-
+Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. -These integrate with waffle.io to show the current status of the project. The project board is available at the following url: +It's mandatory to: -https://waffle.io/docker/distribution + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code -If an issue or PR is not labeled correctly or you believe it is not in the right state, please contact a maintainer to fix the problem. +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. -## Milestones +Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) -Issues and PRs should be assigned to relevant milestones. If an issue or PR is assigned a milestone, it should be available by that date. Depending on level of effort, items may be shuffled in or out of milestones. Issues or PRs that don't have a milestone are considered unscheduled. Typically, "In Progress" issues should have a milestone. +## Coding Style -## PR Titles +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. -PR titles should be lowercased, except for proper noun references (such a -method name or type). +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. -PR titles should be prefixed with affected directories, comma separated. For -example, if a specification is modified, the prefix would be "doc/spec". If -the modifications are only in the root, do not include it. If multiple -directories are modified, include each, separated by a comma and space. +The rules: -Here are some examples: - -- doc/spec: move API specification into correct position -- context, registry, auth, auth/token, cmd/registry: context aware logging +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile index 24d18722356a..041016f0924f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile +++ b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile @@ -1,12 +1,19 @@ FROM golang:1.4 +RUN apt-get update && \ + apt-get install -y librados-dev apache2-utils && \ + rm -rf /var/lib/apt/lists/* + ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH +ENV DOCKER_BUILDTAGS include_rados WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml RUN make PREFIX=/go clean binaries +VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] -CMD ["cmd/registry/config.yml"] +CMD ["/etc/docker/registry/config.yml"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json index ab255849d6f6..355596dffe8d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json @@ -5,27 +5,21 @@ "./..." ], "Deps": [ - { - "ImportPath": "code.google.com/p/go-uuid/uuid", - "Comment": "null-15", - "Rev": "35bc42037350f0078e3c974c6ea690f1926603ab" - }, { "ImportPath": "github.com/AdRoll/goamz/aws", - "Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99" + "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" }, { "ImportPath": "github.com/AdRoll/goamz/cloudfront", - "Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99" + "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" }, { "ImportPath": "github.com/AdRoll/goamz/s3", - "Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99" + "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" }, { - "ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/storage", - "Comment": "v1.2-43-gd90753b", - "Rev": "d90753bcad2ed782fcead7392d1e831df29aa2bb" + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" }, { "ImportPath": "github.com/Sirupsen/logrus", @@ -51,14 +45,17 @@ "Rev": "6086d7927ec35315964d9fea46df6c04e6d697c1" }, { - "ImportPath": "github.com/docker/docker/pkg/tarsum", - "Comment": "v1.4.1-863-g165ea5c", - "Rev": "165ea5c158cff3fc40d476ffe233a5ccc03e7d61" + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" }, { - "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar", - "Comment": "v1.4.1-863-g165ea5c", - "Rev": "165ea5c158cff3fc40d476ffe233a5ccc03e7d61" + "ImportPath": "github.com/docker/docker/pkg/tarsum", + "Comment": "v1.4.1-3932-gb63ec6e", + "Rev": "b63ec6e4b1f6f5c77a6a74a52fcea9564538c575" }, { "ImportPath": "github.com/docker/libtrust", @@ -78,15 +75,28 @@ }, { "ImportPath": "github.com/gorilla/handlers", - "Rev": "0e84b7d810c16aed432217e330206be156bafae0" + "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" }, { "ImportPath": "github.com/gorilla/mux", "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" }, { - "ImportPath": "github.com/jlhawn/go-crypto", - "Rev": "cd738dde20f0b3782516181b0866c9bb9db47401" + "ImportPath": "github.com/noahdesu/go-ceph/rados", + "Comment": "v.0.3.0-29-gb15639c", + "Rev": "b15639c44c05368348355229070361395d9152ee" + }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "22c8fa9fb5ba145b4d4e2cebb027e84b1a7b1296" }, { "ImportPath": "github.com/yvasiyarov/go-metrics", @@ -101,6 +111,14 @@ "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, { "ImportPath": "golang.org/x/net/context", "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/Makefile index 974d0191d96d..b9dbf66acdf0 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Makefile +++ b/Godeps/_workspace/src/github.com/docker/distribution/Makefile @@ -1,8 +1,16 @@ # Set an output prefix, which is the local directory if not specified PREFIX?=$(shell pwd) + # Used to populate version variable in main package. VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + GO_LDFLAGS=-ldflags "-X `go list ./version`.Version $(VERSION)" .PHONY: clean all fmt vet lint build test binaries @@ -18,20 +26,18 @@ version/version.go: ${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ./cmd/registry + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry ${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ./cmd/registry-api-descriptor-template - -${PREFIX}/bin/dist: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ./cmd/dist + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ -vet: +# Depends on binaries because vet will silently fail if it can't load compiled +# imports +vet: binaries @echo "+ $@" @go vet ./... @@ -46,36 +52,19 @@ lint: build: @echo "+ $@" - @go build -v ${GO_LDFLAGS} ./... + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... test: @echo "+ $@" - @go test -test.short ./... + @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... test-full: @echo "+ $@" @go test ./... -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template ${PREFIX}/bin/dist +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: @echo "+ $@" @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" - - -# Use the existing docs build cmds from docker/docker -# Later, we will move this into an import -DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -DOCSPORT := 8000 -DOCKER_DOCS_IMAGE := docker-docs-$(VERSION) -DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE - -docs: docs-build - $(DOCKER_RUN_DOCS) -p $(DOCSPORT):8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve - -docs-shell: docs-build - $(DOCKER_RUN_DOCS) -p $(DOCSPORT):8000 "$(DOCKER_DOCS_IMAGE)" bash - -docs-build: - docker build -t "$(DOCKER_DOCS_IMAGE)" -f docs/Dockerfile . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/README.md b/Godeps/_workspace/src/github.com/docker/distribution/README.md index ac7672580ffb..f75216694a3e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/README.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/README.md @@ -7,13 +7,14 @@ for storing and distributing Docker images. It supersedes the [docker/docker- registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. + + This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **dist** | An _experimental_ tool to provide distribution, oriented functionality without the `docker` daemon. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | @@ -61,12 +62,20 @@ if they have a [Docker Hub](https://hub.docker.com/) account. For some users and even companies, this default behavior is sufficient. For others, it is not. -For example, users with their own software products and may want to maintain an +For example, users with their own software products may want to maintain a registry for private, company images. Also, you may wish to deploy your own image repository for images used to test or in continuous integration. For these use cases and others, [deploying your own registry instance](docs/deploying.md) may be the better choice. +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). + ## Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute diff --git a/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md b/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md index fddc71b354d0..cbf53881eed1 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md @@ -1,11 +1,17 @@ # Roadmap -The Distribution Project consists of several components, some of which are still being defined. This document defines the high-level goals of the project, identifies the current components, and defines the release-relationship to the Docker Platform. +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. * [Distribution Goals](#distribution-goals) * [Distribution Components](#distribution-components) * [Project Planning](#project-planning): release-relationship to the Docker Platform. +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + ## Distribution Goals - Replace the existing [docker registry](github.com/docker/docker-registry) @@ -14,6 +20,7 @@ The Distribution Project consists of several components, some of which are still distribution package. - Define a strong data model for distributing docker images - Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models ## Distribution Components @@ -29,41 +36,216 @@ implementation. ### Registry -Registry 2.0 is the first release of the next-generation registry. This is primarily -focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/doc/spec/api.md), with -a focus on security and performance. +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. -#### Registry 2.0 +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. -Features: +#### Data Storage and Distribution First -- Faster push and pull -- New, more efficient implementation -- Simplified deployment -- Full API specification for V2 protocol -- Pluggable storage system (s3, azure, filesystem and inmemory supported) -- Immutable manifest references ([#46](https://github.com/docker/distribution/issues/46)) -- Webhook notification system ([#42](https://github.com/docker/distribution/issues/42)) -- Native TLS Support ([#132](https://github.com/docker/distribution/pull/132)) -- Pluggable authentication system -- Health Checks ([#230](https://github.com/docker/distribution/pull/230)) +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. -#### Registry 2.1 +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". -Planned Features: +#### Content Addressability -> **NOTE:** This feature list is incomplete at this time. +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. -- Support for Manifest V2, Schema 2 and explicit tagging objects ([#62](https://github.com/docker/distribution/issues/62), [#173](https://github.com/docker/distribution/issues/173)) -- Mirroring ([#19](https://github.com/docker/distribution/issues/19)) -- Flexible client package based on distribution interfaces ([#193](https://github.com/docker/distribution/issues/193) +#### Content Agnostic -#### Registry 2.2 +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. -TBD +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. -*** +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Mirroring and Pull-through Caching + +Mirroring and pull-through caching are related but slight different. We've +adopted the term _mirroring_ to be a proper mirror of a registry, meaning it +has all the content the upstream would have. Providing such mirrors in the +Docker ecosystem is dependent on a solid trust system, which is still in the +works. + +The more commonly helpful feature is _pull-through caching_, where data is +fetched from an upstream when not available in a local registry instance. + +Please see the following issues: + +- https://github.com/docker/distribution/issues/459 + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to a arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 ### Distribution Package diff --git a/Godeps/_workspace/src/github.com/docker/distribution/blobs.go b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go new file mode 100644 index 000000000000..36a01e768930 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go @@ -0,0 +1,214 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator allows to list blobs in storage. +type BlobEnumerator interface { + // Enumerate fills 'digests' with a lexicographically sorted list of + // digests up to the size of 'digests' and returns the value 'n' for the + // number of entries which were filled. 'last' contains an offset in the + // list. It's a string representation of digest. Empty string means 0 + // offset. 'err' will be set to io.OEF if there are no more entries to + // obtain. + Enumerate(ctx context.Context, digests []digest.Digest, last string) (n int, err error) +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteSeeker + io.ReaderFrom + io.Closer + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error + + // Get a reader to the blob being written by this BlobWriter + Reader() (io.ReadCloser, error) +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, enumerate, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobEnumerator + BlobDeleter +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml index b841cdeccb4b..7bd483736787 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml +++ b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml @@ -3,6 +3,9 @@ machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install ceph to test rados driver & create pool + - sudo -i ~/distribution/contrib/ceph/ci-setup.sh + - ceph osd pool create docker-distribution 1 post: # Install many go versions @@ -18,8 +21,11 @@ machine: BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_rados include_oss" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" + # Ceph config + RADOS_POOL: "docker-distribution" hosts: # Not used yet @@ -58,7 +64,9 @@ dependencies: # For the stable go version, additionally install linting tools - > gvm use stable && - go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + # Disabling goveralls for now + # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint test: pre: @@ -67,6 +75,11 @@ test: - gvm use stable && go version # - gvm use bleed && go version + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && godep go install ./...: + pwd: $BASE_STABLE + # FMT - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": pwd: $BASE_STABLE @@ -94,8 +107,7 @@ test: - echo "$CIRCLE_PAIN" > ~/goverage.report - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: pwd: $BASE_STABLE - - - gvm use stable; go list ./... | xargs -L 1 -I{} godep go test -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: + - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: timeout: 600 pwd: $BASE_STABLE diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go deleted file mode 100644 index e540d4d854ab..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import "github.com/codegangsta/cli" - -var ( - commandList = cli.Command{ - Name: "images", - Usage: "List available images", - Action: imageList, - } -) - -func imageList(c *cli.Context) { -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go deleted file mode 100644 index 34a2b514e776..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "dist" - app.Usage = "Package and ship Docker content" - - app.Action = commandList.Action - app.Commands = []cli.Command{ - commandList, - commandPull, - commandPush, - } - app.Run(os.Args) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go deleted file mode 100644 index 8f96129cde91..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import "github.com/codegangsta/cli" - -var ( - commandPull = cli.Command{ - Name: "pull", - Usage: "Pull and verify an image from a registry", - Action: imagePull, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "r,registry", - Value: "hub.docker.io", - Usage: "Registry to use (e.g.: localhost:5000)", - }, - }, - } -) - -func imagePull(c *cli.Context) { -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go deleted file mode 100644 index c39922aa4095..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import "github.com/codegangsta/cli" - -var ( - commandPush = cli.Command{ - Name: "push", - Usage: "Push an image to a registry", - Action: imagePush, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "r,registry", - Value: "hub.docker.io", - Usage: "Registry to use (e.g.: localhost:5000)", - }, - }, - } -) - -func imagePush(*cli.Context) { -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go index 5118d330c1a3..61eab8ecd41b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go @@ -4,7 +4,7 @@ // For example, to generate a new API specification, one would execute the // following command from the repo root: // -// $ registry-api-descriptor-template doc/spec/api.md.tmpl > doc/spec/api.md +// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md // // The templates are passed in the api/v2.APIDescriptor object. Please see the // package documentation for fields available on that object. The template @@ -20,6 +20,7 @@ import ( "regexp" "text/template" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" ) @@ -44,7 +45,18 @@ func main() { tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) - if err := tmpl.Execute(os.Stdout, v2.APIDescriptor); err != nil { + data := struct { + RouteDescriptors []v2.RouteDescriptor + ErrorDescriptors []errcode.ErrorDescriptor + }{ + RouteDescriptors: v2.APIDescriptor.RouteDescriptors, + ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), + // The following are part of the specification but provided by errcode default. + errcode.ErrorCodeUnauthorized.Descriptor(), + errcode.ErrorCodeUnsupported.Descriptor()), + } + + if err := tmpl.Execute(os.Stdout, data); err != nil { log.Fatalln(err) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-azure/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-azure/main.go deleted file mode 100644 index 8083d23b44c5..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-azure/main.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "os" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/storage/driver/azure" - "github.com/docker/distribution/registry/storage/driver/ipc" -) - -// An out-of-process Azure Storage driver, intended to be run by ipc.NewDriverClient -func main() { - parametersBytes := []byte(os.Args[1]) - var parameters map[string]interface{} - err := json.Unmarshal(parametersBytes, ¶meters) - if err != nil { - panic(err) - } - - driver, err := azure.FromParameters(parameters) - if err != nil { - panic(err) - } - - if err := ipc.StorageDriverServer(driver); err != nil { - log.Fatalln("driver error:", err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-filesystem/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-filesystem/main.go deleted file mode 100644 index 93756d2c010f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-filesystem/main.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "os" - - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/registry/storage/driver/filesystem" - "github.com/docker/distribution/registry/storage/driver/ipc" -) - -// An out-of-process filesystem driver, intended to be run by ipc.NewDriverClient -func main() { - parametersBytes := []byte(os.Args[1]) - var parameters map[string]string - err := json.Unmarshal(parametersBytes, ¶meters) - if err != nil { - panic(err) - } - - if err := ipc.StorageDriverServer(filesystem.FromParameters(parameters)); err != nil { - logrus.Fatalln(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-inmemory/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-inmemory/main.go deleted file mode 100644 index d957cc25405c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-inmemory/main.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build ignore - -package main - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/registry/storage/driver/ipc" -) - -// An out-of-process inmemory driver, intended to be run by ipc.NewDriverClient -// This exists primarily for example and testing purposes -func main() { - if err := ipc.StorageDriverServer(inmemory.New()); err != nil { - logrus.Fatalln(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-s3/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-s3/main.go deleted file mode 100644 index 8604fef076be..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-s3/main.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "os" - - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/registry/storage/driver/ipc" - "github.com/docker/distribution/registry/storage/driver/s3" -) - -// An out-of-process S3 driver, intended to be run by ipc.NewDriverClient -func main() { - parametersBytes := []byte(os.Args[1]) - var parameters map[string]string - err := json.Unmarshal(parametersBytes, ¶meters) - if err != nil { - panic(err) - } - - driver, err := s3.FromParameters(parameters) - if err != nil { - panic(err) - } - - if err := ipc.StorageDriverServer(driver); err != nil { - logrus.Fatalln(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml similarity index 84% rename from Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml rename to Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml index b1a8f48dceeb..0b524043d841 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml @@ -6,9 +6,9 @@ log: environment: development storage: cache: - layerinfo: inmemory + blobdescriptor: redis filesystem: - rootdirectory: /tmp/registry-dev + rootdirectory: /var/lib/registry-cache maintenance: uploadpurging: enabled: false @@ -42,4 +42,7 @@ notifications: threshold: 10 backoff: 1s disabled: true - \ No newline at end of file +proxy: + remoteurl: https://registry-1.docker.io + username: username + password: password diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml new file mode 100644 index 000000000000..3f4616d893f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml @@ -0,0 +1,60 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + debug: + addr: localhost:5001 +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml new file mode 100644 index 000000000000..cb91e63d6c22 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml @@ -0,0 +1,11 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + layerinfo: inmemory + filesystem: + rootdirectory: /var/lib/registry +http: + addr: :5000 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go index 52eecf8f217a..2832c0d76db2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go @@ -17,15 +17,21 @@ import ( "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" - _ "github.com/docker/distribution/health" + "github.com/docker/distribution/health" + _ "github.com/docker/distribution/registry/auth/htpasswd" _ "github.com/docker/distribution/registry/auth/silly" _ "github.com/docker/distribution/registry/auth/token" "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + _ "github.com/docker/distribution/registry/proxy" _ "github.com/docker/distribution/registry/storage/driver/azure" _ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/oss" _ "github.com/docker/distribution/registry/storage/driver/s3" + _ "github.com/docker/distribution/registry/storage/driver/swift" + "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" "github.com/yvasiyarov/gorelic" @@ -59,22 +65,53 @@ func main() { fatalf("error configuring logger: %v", err) } + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = context.GetLogger(ctx).Warnf + app := handlers.NewApp(ctx, *config) + app.RegisterHealthChecks() handler := configureReporting(app) + handler = panicHandler(handler) + handler = health.Handler(handler) handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) if config.HTTP.Debug.Addr != "" { go debugServer(config.HTTP.Debug.Addr) } - if config.HTTP.TLS.Certificate == "" { - context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) - if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { - context.GetLogger(app).Fatalln(err) - } - } else { + server := &http.Server{ + Handler: handler, + } + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + context.GetLogger(app).Fatalln(err) + } + defer ln.Close() + + if config.HTTP.TLS.Certificate != "" { tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, + ClientAuth: tls.NoClientCert, + NextProtos: []string{"http/1.1"}, + Certificates: make([]tls.Certificate, 1), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + context.GetLogger(app).Fatalln(err) } if len(config.HTTP.TLS.ClientCAs) != 0 { @@ -99,16 +136,14 @@ func main() { tlsConf.ClientCAs = pool } - context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) - server := &http.Server{ - Addr: config.HTTP.Addr, - Handler: handler, - TLSConfig: tlsConf, - } + ln = tls.NewListener(ln, tlsConf) + context.GetLogger(app).Infof("listening on %v, tls", ln.Addr()) + } else { + context.GetLogger(app).Infof("listening on %v", ln.Addr()) + } - if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { - context.GetLogger(app).Fatalln(err) - } + if err := server.Serve(ln); err != nil { + context.GetLogger(app).Fatalln(err) } } @@ -141,6 +176,8 @@ func resolveConfiguration() (*configuration.Configuration, error) { return nil, err } + defer fp.Close() + config, err := configuration.Parse(fp) if err != nil { return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) @@ -262,3 +299,17 @@ func debugServer(addr string) { log.Fatalf("error listening on debug interface: %v", err) } } + +// panicHandler add a HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go new file mode 100644 index 000000000000..e7ea770aebb2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go @@ -0,0 +1,5 @@ +// +build include_rados + +package main + +import _ "github.com/docker/distribution/registry/storage/driver/rados" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go index 074471b4fd08..502dab3ec9ec 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go @@ -29,6 +29,10 @@ type Configuration struct { // Fields allows users to specify static string fields to include in // the logger context. Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configurate the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` } // Loglevel is the level at which registry operations are logged. This is @@ -54,6 +58,9 @@ type Configuration struct { // Addr specifies the bind address for the registry instance. Addr string `yaml:"addr,omitempty"` + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + Prefix string `yaml:"prefix,omitempty"` // Secret specifies the secret key which HMAC tokens are created with. @@ -121,6 +128,49 @@ type Configuration struct { IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` } `yaml:"pool,omitempty"` } `yaml:"redis,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configurate email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure cerification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` } // v0_1Configuration is a Version 0.1 Configuration struct @@ -192,6 +242,10 @@ func (storage Storage) Type() string { // allow configuration of maintenance case "cache": // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect default: return k } @@ -223,6 +277,10 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { // allow for configuration of maintenance case "cache": // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect default: types = append(types, k) } @@ -374,6 +432,18 @@ type Middleware struct { Options Parameters `yaml:"options"` } +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + // Parse parses an input configuration yaml document into a Configuration struct // This should generally be capable of handling old configuration format versions // diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go index 5c5d68b3e834..24076e2cf503 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go @@ -20,6 +20,7 @@ var configStruct = Configuration{ Level Loglevel `yaml:"level"` Formatter string `yaml:"formatter,omitempty"` Fields map[string]interface{} `yaml:"fields,omitempty"` + Hooks []LogHook `yaml:"hooks,omitempty"` }{ Fields: map[string]interface{}{"environment": "test"}, }, @@ -61,6 +62,7 @@ var configStruct = Configuration{ }, HTTP: struct { Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` Prefix string `yaml:"prefix,omitempty"` Secret string `yaml:"secret,omitempty"` TLS struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go index 45a35ad176e9..23cbf5b5450e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go @@ -1,7 +1,9 @@ package context import ( - "code.google.com/p/go-uuid/uuid" + "sync" + + "github.com/docker/distribution/uuid" "golang.org/x/net/context" ) @@ -14,11 +16,19 @@ type Context interface { // provided as the main background context. type instanceContext struct { Context - id string // id of context, logged as "instance.id" + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id } func (ic *instanceContext) Value(key interface{}) interface{} { if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) return ic.id } @@ -27,7 +37,6 @@ func (ic *instanceContext) Value(key interface{}) interface{} { var background = &instanceContext{ Context: context.Background(), - id: uuid.New(), } // Background returns a non-nil, empty Context. The background context diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go index 91bcda95a9d0..97b1067fd270 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "code.google.com/p/go-uuid/uuid" log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" "github.com/gorilla/mux" ) @@ -79,7 +79,7 @@ func WithRequest(ctx Context, r *http.Request) Context { return &httpRequestContext{ Context: ctx, startedAt: time.Now(), - id: uuid.New(), // assign the request a unique. + id: uuid.Generate().String(), r: r, } } @@ -103,12 +103,21 @@ func GetRequestID(ctx Context) string { // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - irw := &instrumentedResponseWriter{ + irw := instrumentedResponseWriter{ ResponseWriter: w, Context: ctx, } - return irw, irw + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: irw, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + return &irw, &irw } // GetResponseWriter returns the http.ResponseWriter from the provided @@ -258,8 +267,17 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} { return ctx.Context.Value(key) } +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + // instrumentedResponseWriter provides response writer information in a -// context. +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. type instrumentedResponseWriter struct { http.ResponseWriter Context diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go index 1115fc1f65c8..af4f1351e91e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -4,7 +4,7 @@ import ( "runtime" "time" - "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/uuid" ) // WithTrace allocates a traced timing span in a new context. This allows a @@ -45,7 +45,7 @@ func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { f := runtime.FuncForPC(pc) ctx = &traced{ Context: ctx, - id: uuid.New(), + id: uuid.Generate().String(), start: time.Now(), parent: GetStringValue(ctx, "trace.id"), fnname: f.Name(), @@ -54,9 +54,14 @@ func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { } return ctx, func(format string, a ...interface{}) { - GetLogger(ctx, "trace.duration", "trace.id", "trace.parent.id", - "trace.func", "trace.file", "trace.line"). - Infof(format, a...) // info may be too chatty. + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh new file mode 100755 index 000000000000..d907cf5c30b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh @@ -0,0 +1,119 @@ +#! /bin/bash +# +# Ceph cluster setup in Circle CI +# + +set -x +set -e +set -u + +NODE=$(hostname) +CEPHDIR=/tmp/ceph + +mkdir cluster +pushd cluster + +# Install +retries=0 +until [ $retries -ge 5 ]; do + pip install ceph-deploy && break + retries=$[$retries+1] + sleep 30 +done + +retries=0 +until [ $retries -ge 5 ]; do + ceph-deploy install --release hammer $NODE && break + retries=$[$retries+1] + sleep 30 +done + +retries=0 +until [ $retries -ge 5 ]; do + ceph-deploy pkg --install librados-dev $NODE && break + retries=$[$retries+1] + sleep 30 +done + +echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys +ssh-keyscan $NODE >> ~/.ssh/known_hosts +ceph-deploy new $NODE + +cat >> ceph.conf < + +# compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + # For DIND + ca-certificates \ + curl \ + iptables \ + procps \ + e2fsprogs \ + xz-utils \ + # For build + build-essential \ + file \ + git \ + net-tools \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install Docker +ENV VERSION 1.7.1 +RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ + && chmod +x /usr/local/bin/docker + +# Install DIND +RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/master/hack/dind \ + && chmod +x /dind + +# Install bats +RUN cd /usr/local/src/ \ + && git clone https://github.com/sstephenson/bats.git \ + && cd bats \ + && ./install.sh /usr/local + +# Install docker-compose +RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ + && chmod +x /usr/local/bin/docker-compose + +RUN mkdir -p /go/src/github.com/docker/distribution +WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration + +VOLUME /var/lib/docker + +ENTRYPOINT ["/dind"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md new file mode 100644 index 000000000000..e12bec1ad51c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md @@ -0,0 +1,138 @@ +# Docker Registry Integration Testing + +These integration tests cover interactions between the Docker daemon and the +registry server. All tests are run using the docker cli. + +The compose configuration is intended to setup a testing environment for Docker +using multiple registry configurations. These configurations include different +combinations of a v1 and v2 registry as well as TLS configurations. + +## Running inside of Docker +### Get integration container +The container image to run the integation tests will need to be pulled or built +locally. + +*Building locally* +``` +$ docker build -t distribution/docker-integration . +``` + +### Run script + +Invoke the tests within Docker through the `run.sh` script. + +``` +$ ./run.sh +``` + +Run with aufs driver and tmp volume +**NOTE: Using a volume will prevent multiple runs from needing to +re-pull images** +``` +$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh +``` + +### Example developer flow + +These tests are useful for developing both as a registry and docker +core developer. The following setup may be used to do integration +testing between development versions + +Insert into your `.zshrc` or `.bashrc` + +``` +# /usr/lib/docker for Docker-in-Docker +# Set this directory to make each invocation run much faster, without +# the need to repull images. +export DOCKER_VOLUME=$HOME/.docker-test-volume + +# Use overlay for all Docker testing, try aufs if overlay not supported +export DOCKER_GRAPHDRIVER=overlay + +# Name this according to personal preference +function rdtest() { + if [ "$1" != "" ]; then + DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker + if [ ! -f $DOCKER_BINARY ]; then + current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` + echo "$DOCKER_BINARY does not exist" + echo "Current checked out docker version: $current_version" + echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" + return 1 + fi + fi + + $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh +} +``` + +Run with Docker release version +``` +$ rdtest +``` + +Run using local development version of docker +``` +$ cd $GOPATH/src/github.com/docker/docker +$ make binary +$ rdtest `cat VERSION` +``` + +## Running manually outside of Docker + +### Install Docker Compose + +[Docker Compose Installation Guide](http://docs.docker.com/compose/install/) + +### Start compose setup +``` +docker-compose up +``` + +### Install Certificates +The certificates must be installed in /etc/docker/cert.d in order to use TLS +client auth and use the CA certificate. +``` +sudo sh ./install_certs.sh +``` + +### Test with Docker +Tag an image as with any other private registry. Attempt to push the image. + +``` +docker pull hello-world +docker tag hello-world localhost:5440/hello-world +docker push localhost:5440/hello-world + +docker tag hello-world localhost:5441/hello-world +docker push localhost:5441/hello-world +# Perform login using user `testuser` and password `passpassword` +``` + +### Set /etc/hosts entry +Find the non-localhost ip address of local machine + +### Run bats +Run the bats tests after updating /etc/hosts, installing the certificates, and +running the `docker-compose` script. +``` +bats -p . +``` + +## Configurations + +Port | V2 | V1 | TLS | Authentication +--- | --- | --- | --- | --- +5000 | yes | yes | no | none +5001 | no | yes | no | none +5002 | yes | no | no | none +5011 | no | yes | yes | none +5440 | yes | yes | yes | none +5441 | yes | yes | yes | basic (testuser/passpassword) +5442 | yes | yes | yes | TLS client +5443 | yes | yes | yes | TLS client (no CA) +5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) +5445 | yes | yes | yes (no CA) | none +5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) +5447 | yes | yes | yes (no CA) | TLS client +5448 | yes | yes | yes (SSLv3) | none diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml new file mode 100644 index 000000000000..d664c7bde778 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml @@ -0,0 +1,27 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + - "5001:5001" + - "5002:5002" + - "5011:5011" + - "5440:5440" + - "5441:5441" + - "5442:5442" + - "5443:5443" + - "5444:5444" + - "5445:5445" + - "5446:5446" + - "5447:5447" + - "5448:5448" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry:0.9.1 + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash new file mode 100644 index 000000000000..60d96ae095c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash @@ -0,0 +1,21 @@ +# Start docker daemon +function start_daemon() { + # Drivers to use for Docker engines the tests are going to create. + STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} + EXEC_DRIVER=${EXEC_DRIVER:-native} + + docker --daemon --log-level=panic \ + --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & + DOCKER_PID=$! + + # Wait for it to become reachable. + tries=10 + until docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 + done +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh new file mode 100644 index 000000000000..a23d8bee5e58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh @@ -0,0 +1,42 @@ +#!/bin/sh +set -e + +# Set IP address in /etc/hosts for localregistry +IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') +echo "$IP localregistry" >> /etc/hosts + +hostname=$1 +if [ "$hostname" = "" ]; then + hostname="localhost" +fi + +mkdir -p /etc/docker/certs.d/$hostname:5011 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5440 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5441 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt + +mkdir -p /etc/docker/certs.d/$hostname:5442 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5443 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt +cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert +cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5444 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5447 +cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert +cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key + +mkdir -p /etc/docker/certs.d/$hostname:5448 +cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile new file mode 100644 index 000000000000..04515e8c41d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:1.9 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf +COPY registry-noauth.conf /etc/nginx/registry-noauth.conf +COPY registry-basic.conf /etc/nginx/registry-basic.conf +COPY test.passwd /etc/nginx/test.passwd +COPY ssl /etc/nginx/ssl diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf new file mode 100644 index 000000000000..65c4d7766e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf new file mode 100644 index 000000000000..5b1a2d580cf3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 +proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf new file mode 100644 index 000000000000..63cd180d6b13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf new file mode 100644 index 000000000000..3c629ae8b2c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf @@ -0,0 +1,13 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + include docker-registry-v2.conf; +} +location / { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + include docker-registry.conf; +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf new file mode 100644 index 000000000000..883a2d48bb3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf @@ -0,0 +1,8 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + include docker-registry-v2.conf; +} +location / { + include docker-registry.conf; +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf new file mode 100644 index 000000000000..b402eacbb8e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf @@ -0,0 +1,277 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + +# No client auth or TLS (V1 Only) +server { + listen 5001; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry.conf; + } +} + +# No client auth or TLS (V2 Only) +server { + listen 5002; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry-v2.conf; + } +} + +# TLS localhost (V1 Only) +server { + listen 5011; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + + client_max_body_size 0; + chunked_transfer_encoding on; + location / { + include docker-registry.conf; + } +} + +# TLS localregistry (V1 Only) +server { + listen 5011; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + + client_max_body_size 0; + chunked_transfer_encoding on; + location / { + include docker-registry.conf; + } +} + + + +# TLS Configuration chart +# Username/Password: testuser/passpassword +# | ca | client | basic | notes +# 5440 | yes | no | no | Tests CA certificate +# 5441 | yes | no | yes | Tests basic auth over TLS +# 5442 | yes | yes | no | Tests client auth with client CA +# 5443 | yes | yes | no | Tests client auth without client CA +# 5444 | yes | yes | yes | Tests using basic auth + tls auth +# 5445 | no | no | no | Tests insecure using TLS +# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS +# 5447 | no | yes | no | Tests client auth to insecure +# 5448 | yes | no | no | Bad SSL version + +server { + listen 5440; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + +# Add configuration for localregistry server_name +# Requires configuring /etc/hosts to use +# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing +# Docker secure/insecure registry features +server { + listen 5440; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem new file mode 100644 index 000000000000..8c9b1bca3273 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV +BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX +GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx +BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT +wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO +cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY +AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR +sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE +CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p +y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk +o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN +NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G +Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/ +BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z +PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU +lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU +GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna +XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi +QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK +UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w +Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH +sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF +AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib +KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA +KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w== +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem new file mode 100644 index 000000000000..a239939d0899 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy +MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/ +oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M +WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/ +MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II +hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4 +nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai +gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru +5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/ +Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO +nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j +NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB +twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG +CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn +XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw +Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw +C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx +jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm +Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx +/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1 +bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR ++xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38 +nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q +wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W +SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA== +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem new file mode 100644 index 000000000000..acfc9a487319 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z +WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT +2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky ++RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop +8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy +MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte +WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1 +ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2 +Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw +f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY +sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA +AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h +SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2 +wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN +X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA +02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as +zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c +dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1 ++ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG +y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY +PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh +lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK +I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+ +0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV +PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc +TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9 +0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux +eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq +IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF +GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0 +U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU +RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc +/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu +MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna +q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta +LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK +EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M +6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP +RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3 +esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU +49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk +9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2 +N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx +YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m +4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8 +9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7 +ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv +bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx +Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2 +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem new file mode 100644 index 000000000000..3ca47f45893c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCTCCAvOgAwIBAgIQdcXDOHrLsd2ENSfj5h8ZmjALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQwM1oXDTE4MDUxMDIwNTQwM1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV +BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2K +saEVcHq0eldu5kABbWtZsf9keK7lz8beVIowzOqp5IHpGlggtH7xDVeigA/sLdds +WTgKEOq3zsJzdgfEti5TNAjjmPqjMKkolqv3LXDJG0dZ2GZ8W/eBB6X1wB0LKr3i +ye3/5jb/wCZYVGGMQXj0VQxY8Qq+OHEp0effeheJqA0OYOj+RaZwi20OR/KmJRgY +wXU33bZyapuyT4krhFlFbtzXeKsKQPrT2ePWxPAceqUGUTIqyJySYIw6vb72YxjX +FNRw6Jg7B7RqVJaVCfBrVxtAv+rCLOhUOVYmWhgWEIODPXiqOGwB0VUApAVAYqfi +TYnJIZ7QYLlQx5VPNlzZuSJTUzKmHQLtLcTqdO5HmLxfxc0WuS/ftK916wy/jpSc +m2DiHjIy6aAEaHKGQrNgT+no68kp30xkYAVsIs0BFpl6Q2iNr5e0uKta82A0xU1Q +we7swSHOHCevuDZfFA/CqnBptOjvNUuVytcroCeCrV/ftp75w/Fd9zOcb6LGLxM2 +2UzhkSXl3II250xj74Q3q8T9TDxCLty7oiawhaYKI+8SDYc510EQ7MH46WMO+3Uq +JkpmmELd9POgnnZ1JrCFmf0flUKTi2CqU3wrBPpPMwFBxoFipp5iL87npACHc3DY +6uaoF4Pf9Et1Fd7HRon8RMsKkrSF92NFiBx5UvhZAgMBAAGjNjA0MA4GA1UdDwEB +/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq +hkiG9w0BAQsDggIBAC0F4ci1nqZ9KUhEEAmWmy8g89DovNNIGSC51r2WJ/COmYUX +X70TONscsBL/kx5MK4xoAmb+EN6Yy8i+z9NkNJd0B+2MjXPMFBpgGb0UiPv2wEmZ +5PAKyjwTxNIm6L/nFhkmVqfsQHfjHukXES4C0ff6fj6fuDpBfl5nTlVmc9LpP+hT +5RAwW10qumucGxAWGNBWW+K66cf8O7n/0nQykxJxYjBx16ZB80H2uvqFDKDVFqze +co5M4euXQq9KiXPRlcC9rab2a7FGLHd0TyPkq6TvfsqpxcryyKS4rIAz3sQh/tl/ +/qm1tBcZW2bce3UlF2Wb2dW9HqvIu1O84f6ptLqwgKcIdTbwgQZ0kbFoWE2kWJSV +w+eAFb7tz1LDTpF3NRlz+1K27pBQWRQgcqoIRoQXpC0LfQY9Mp70QIfUQdUh6tnO +8hmq5y623tfxiDwCxb/EOpwCmwK1Cp9cloZTDefVE1r6NkEJWeeHG79VljUGF1KT +NKzXWrrsFtge/hU9Pj+frcZO9qExxPCcsrdZcoK7Ll8s+pjulRvbnCnJkNpeOI3P +iz6+sdGmzKSKg2daRM67Zmy5tmlBEX/eV7kFqt+b3HsdUiLo3Ng2lyPLNNDfwUtB +EukgYGjVJoyqLjLXgsCxLJlk7X/ogVwf8SlAnQ7p6KuxGWm02vlUpEmJp+Hq +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem new file mode 100644 index 000000000000..baccc9eb807e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArYqxoRVwerR6V27mQAFta1mx/2R4ruXPxt5UijDM6qnkgeka +WCC0fvENV6KAD+wt12xZOAoQ6rfOwnN2B8S2LlM0COOY+qMwqSiWq/ctcMkbR1nY +Znxb94EHpfXAHQsqveLJ7f/mNv/AJlhUYYxBePRVDFjxCr44cSnR5996F4moDQ5g +6P5FpnCLbQ5H8qYlGBjBdTfdtnJqm7JPiSuEWUVu3Nd4qwpA+tPZ49bE8Bx6pQZR +MirInJJgjDq9vvZjGNcU1HDomDsHtGpUlpUJ8GtXG0C/6sIs6FQ5ViZaGBYQg4M9 +eKo4bAHRVQCkBUBip+JNickhntBguVDHlU82XNm5IlNTMqYdAu0txOp07keYvF/F +zRa5L9+0r3XrDL+OlJybYOIeMjLpoARocoZCs2BP6ejrySnfTGRgBWwizQEWmXpD +aI2vl7S4q1rzYDTFTVDB7uzBIc4cJ6+4Nl8UD8KqcGm06O81S5XK1yugJ4KtX9+2 +nvnD8V33M5xvosYvEzbZTOGRJeXcgjbnTGPvhDerxP1MPEIu3LuiJrCFpgoj7xIN +hznXQRDswfjpYw77dSomSmaYQt3086CednUmsIWZ/R+VQpOLYKpTfCsE+k8zAUHG +gWKmnmIvzuekAIdzcNjq5qgXg9/0S3UV3sdGifxEywqStIX3Y0WIHHlS+FkCAwEA +AQKCAgAtZw3V8P/+el1PpqoCsNzpqwvQn36bc3CKvPwtM1tJQa2Q92V3DQdr9rDg +7pjGkankpGorKScH4ZLseLy2h5aKRCZm9PS/DhbbCs1wrDhtO5AxeKYPGhYNiOpx +VvwuHQ/Pohfmdn7KgNrKrW1WIBW5CWN+2X4mq2Gk6aYLHgKZSeB3mf1st6mNRACW +RZg5OZKW3VMv0a/l3cVaeqooXwQ/PtUkXhMp3ILnnKly3Gulzi2gIyj3EQ5vODSe +O3gND/UZOJwwgGG6Aief4fnDc7an+c1OSgBr8OVC21Ys3dfQWWV0os9gVFhymX8k +2AgRf6jP93sFw2NSY34KvcGZpKG59oMDxWF1vPo8sOt17Ey0+qp3eUtB3FfE7Wtf +BaLaD/x4U91izIqOEMzQ6QiZAyvmUoBkUSo125CYuIkt8C8Q1lA1KjihETWF37QR +mr8LUk0A0x3SErtm4wVfeDEqVSfI9gKpk6i6rlUzuCjv58Rc0yyqoghXwBWM4CKj +5ZHYpBKAxj4bM6IrKnodAOcsyVk2c2zVTaMxPhoUj0fF7IE5Hy6YAQ/yBheZEM1v +fhsdBFyS6OqSCnN6UinhH268QPam82lfKTFjW5lOgsSDQZ9rhiWoyamhonJTq65I +nb08f4mzT6OGMwV13zq8dXio6WnUIQAhXdEYWrMBmxp5b6CxAQKCAQEA4kmwV3Nb +n3ZIzVAp2l+yGZwdg4YWzN2kcfdNkL8I+Pn8pWrOwv/uGQYmM0786ys9kB5lu4FR +TMcoEo3AaK/z8N49ro2Kl6HcTmxZgTMr+cl6iwetzqYdkRK7klxyCv5uVloDQDtc +AulDH6RkW9BfRERpi6XtlgiFdJj5jMvXMpwGHX69JVsXb83ZSQESjI2JfO9Y8+4M +a7hNKWW/W0ZBrGCcQQPbgpysfJ+PFKUF/yF1h8SSCdetW2Kv2ix16wL5uHKINYmZ +Y/Om+/AFnUOQlANycgThtgBI5mvg9Khq6W2i/RNcIL7bvwAzq1p+o6cGnImXo4bY +hC4fs2/aeX17UQKCAQEAxFQHSLBYDLal5CQYbHbNZ2sLjwRUraEd/+BA8XoERVVQ +JPihgEvTPEaHnWrFTw0qaGKgMZ5SZCZSWUIfXjYvQIUcEMhNUOHweXhJJhifO5sd +sTuvU7bWg76F69bRKfp8KM266m7qMYv+tNlQ6Kbz/1ImsW00xb86vCK2hPfhldtN +d/iBb4HVDu1uoATHUNuqsSGj/UvttKudQdg7MapzM4N+D4m6rPZUjQmtoMWOXt7R +LYrqEOHWfkxXKlVHw1cL9uzUpArvnR0VcYvGfXiYJFbXWsEB07VxIoLMPEtPbpH9 +YLY37KugrthEVnsbySmZIWCRDEqQuuAaa5o8S1naiQKCAQAiU/dybMebe0A0FVMk +E5xbEjnP+AmBbqZBu7iCmthrnNDc70UKg/TEyxAEfJkVu+uM72+TcFy6/wNvPR3R +Q9AH3E8TKdm6gw1+wCUb2n1zWUND0Bhn3v9hQKw/2dJbJJnsc59GoTqmHmjWZgPr +gcLSAmbYjoVqW0STmZlR6KJuxQiQdOeQwS7fASVTU9xSgi43S7/80UIFHWJnQ04y +NIhF9CoAGuuz9ryb80CraxVrzNGdlQ5qe9OKp3/x4wjIbB0iBA3xwTwJ066jTZgs +cVF/gr5b2a28BHMKsZbgxqPhYYZ2SfeR6CJB6W/tML9BaFcybBUa85vpAW5BtFg6 +UfThAoIBAAp1/71byBVFVimF0tdUrTUpewAv1uM5hoOvy0YSnk+jcBXIObLAV40K +pQc6PTEtHmlZd/es2+8CK7kd0NYQRQxHC2vJgHUi1NFkG2GwRivC5B4hdAId5+g1 +KqWaWKLH+f2imKcNKeVh9Dxmp+z9mFquYelqTDmNKvADWX5URuzZNpOB5kOuw098 +TzyvhH9GdR3jEP3aIdxSmJp9jwnibyj7hKgHSq8UoQSy01GRtThQ3wxyLm6f2fH4 +11wmFyDNbpHFpL7o5kOU3SOjsvvUhSbKiccIKbTCIjkYhxFfYegeV0Xj767opjMq +ytlgzeY2FTa2EoR5JKUQc9fv6+6H5yECggEAVVfnywPm8QXn+ByFDdUndZg3uEje +DGyvt1M3mIz5geyRZO8ECzgsZVzKgZC8jDB4lPKz3AGgNlUl/vyGHk6CtW6V6ePA +EXcmOkkMKJQMdopY2/cE6YlSpBGMCcnfothgL0HXxYoop4xVjb74k7tFcNrIDoRx +zp9dSalgxx9aMeaURRbMWf8AhWLZUAjJ/359M1SmcNW619SL3p8Q95Nptvdiltww +lWOCkBdgkjW0mel+Mi2+gY8UPmgNBMPrJ1z9b7b7529YCv5Oci8ABn/N202nhjCp +LupADooNknOMLDyqwRorEv4g6wRjuPIYTIhI9fO5ranu089x+mmGU2tCBw== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem new file mode 100644 index 000000000000..ee72c0c6e279 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFETCCAvugAwIBAgIQJ+iLgsp9gA0DmROqW+tHFzALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQxNloXDTE4MDUxMDIwNTQxNlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDHR/A6uiQ9X/Xh5ivmdjRr5XVr1D7+fU9Qu6ohArqtBuJsLr6t2RBTS9w6PIAf +xjQSMSFlrm/CY+hbfBMSgm9NeH23o3kYCgoEPhP/634A45W5xwUFno388U8/NHK7 +qwzSP1ezKXfXNvzuo1mZhT08aVdGMOrZUcZZZl8R3RPcIRw9XDSfXKVkMluH6egk +8iLdOxdIdRS58DeSI09FskWe3cIZ5kJmMqnKoIbYSJCVVeYPO0RFlIBi+zpdVyI/ +r9LG0r0plRdz/HJevbOitU2y93S1s9NWMNEkOFU1PFJmsF3ZzNqJFCySj00y/Hcs +jPULYwIxYdqcv16cTNmd3P6FegvuzLJLjNuGaLJGc1antv+p62P7ZdE3DyprFuxs +MJgDL9+NjDaIzoamFf0Uv7K3F7hxrrAHfvm1CMUOyQLg9J6Wl4mLsOy2ZhCbdNFs +T6dobAUGvz4Muj9V8V5pR+nFehjmsPENSsTcs5j0e8zTWtvMFISdS+NZAkpiz0s4 +PV8DLgk5Rp1ZG2V5OnRPLMOTgK0nngc5GVaxf7OYCrFHbBJ8tL93MXNQptNFeBpV +FhjUGqVFcz+6nbFX2NsFLZnghQRs9lej4TTG33NSAYusKqhVwpYFf8CsXCcvYuU6 +RlkCYjr3PB+nX1UDa0eUGm0zOabf9O3D1VzHQBpDuzSHQwIDAQABozowODAOBgNV +HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz +dHJ5MAsGCSqGSIb3DQEBCwOCAgEAaPfAs6saij4FZIPbzAb5M6ZVvfXBg+AfH52t +p3tFsnWUJCiOh9ywsc2NcmJdleKDc4/spElFMUarHqcE1ua6EH15O5GEnHWKj8EY +PVQFrPvf30UkRGNPl8eC7afZtCNk9MLllIATAzBr5Z1i+psV7MmgBKpbZ4B0TnhR +GXNT60QaCJ9RfUuc2z7RHJNo9XTn3Q44X7TFj+P3jHOWzTf8y6Mz6saTy2bugIUy +AfRgRgq/bB8hRjrazg55FIlrMv7dr3J0cIuqmaHfsw7Q2ECMCXW8oQXMBzfuIT0n +sG4u0oVxdNx4OdHsAubGjjwNDhxJvN5j8+YFqZMu03i8LbyamTwsrZg2C3QrRUq8 +SujQEEB+AmO0lpuJ24FsOOYVSYCpLy2ugrKOr2NUqbiBKZs8uBh6RGACfunMZlEw +4BntohiO7oZ5gjvhGZNUEqzMChw7knvVjZ+DkhFk9yE4qIL7VsJSUNI2ZJym/Xeq +jr/oT8CpP8/mFZspa6DFciPfhGLQqKcaZZohL7461pOYWY5C2vsJNR2ucBZzTFvD +BiN/rMnIGFrxUscCCje6RLmrsZ3Lb7bfhB3W6kwzLRfr/XEygAzx6S2mlOM34kqF +HFpKrg9TtLIpYLAKAIfuNbrLaNP1UKh7iLarhDz/qDcvRka/qJTzLD3eLeGXefAP +KjJ1S7s= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem new file mode 100644 index 000000000000..d20bda0ce715 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAx0fwOrokPV/14eYr5nY0a+V1a9Q+/n1PULuqIQK6rQbibC6+ +rdkQU0vcOjyAH8Y0EjEhZa5vwmPoW3wTEoJvTXh9t6N5GAoKBD4T/+t+AOOVuccF +BZ6N/PFPPzRyu6sM0j9Xsyl31zb87qNZmYU9PGlXRjDq2VHGWWZfEd0T3CEcPVw0 +n1ylZDJbh+noJPIi3TsXSHUUufA3kiNPRbJFnt3CGeZCZjKpyqCG2EiQlVXmDztE +RZSAYvs6XVciP6/SxtK9KZUXc/xyXr2zorVNsvd0tbPTVjDRJDhVNTxSZrBd2cza +iRQsko9NMvx3LIz1C2MCMWHanL9enEzZndz+hXoL7syyS4zbhmiyRnNWp7b/qetj ++2XRNw8qaxbsbDCYAy/fjYw2iM6GphX9FL+ytxe4ca6wB375tQjFDskC4PSelpeJ +i7DstmYQm3TRbE+naGwFBr8+DLo/VfFeaUfpxXoY5rDxDUrE3LOY9HvM01rbzBSE +nUvjWQJKYs9LOD1fAy4JOUadWRtleTp0TyzDk4CtJ54HORlWsX+zmAqxR2wSfLS/ +dzFzUKbTRXgaVRYY1BqlRXM/up2xV9jbBS2Z4IUEbPZXo+E0xt9zUgGLrCqoVcKW +BX/ArFwnL2LlOkZZAmI69zwfp19VA2tHlBptMzmm3/Ttw9Vcx0AaQ7s0h0MCAwEA +AQKCAgBd61qd4vKHdn1kzNztzdHg9BDGFA7oU9iYvQlua2HdgDwgLluxhXa7Oyp8 +y9y6nOgXls4dpPuJCxsMWsqGU7DvOxVNAh9lI/4ah8NXPv5wntIG73Q/dL2Ic5Yc +vLRCHFh7klzb1HRlmsXUFmp4/yGgIil+rDlS2MZ5hdTSj3X3ricoCBfI75oHQfB/ +es7s8q1ZxKqxfHSbOUqHdlq7B0zmla8QE8RBdCkvlT5YGsMBjq1RimYfwOBNRgf4 +y8MZbt0Q1WtPeLPH9zdTzWYnDfmjmhqINEsq+PDoeCA4aciQGxjwOCrapgZnwF/q +4q+r8HbgufXjnjGw5ERLt7BsRSYynoJiTWQ3p/wZ2VLpjFtxYxoJ5/qpQvbZMgGS +Yu3FZNC6cnbOs+JWbdm7Kg93N24cBrGdk/KdEE6lz6uQq07FTSqLtPEQWePzBiuA +1wfP78b2AH6vyJKq36EfMCJK2i7rpwtNz7d9NI5kiLRDB7gesqC94WJ+psEu+ErO +w9DbTV3xdOPs4FGGrR41Hbo8emrk6smhb8+VK2odggi8i2CLAkYupMsuobBlX3CL +hyJPfWDv1aREJ1w7zWVQlJkvp5zR0oXZXpfFxjpj7Ypbp7BKxmh5+WYj8msFDfaD +8VQ+pqgPpdl6zElEq9m5koHjsHH57fMeJQ59HiWpWFur+kQx4QKCAQEA0Jnvbm7R +WypbPDInkIoPDIhyP9Pqv+wMzNfYEnVEG0GhEU/H5aE20a+Dm6u0bsmPm5lCSQsu +EvylTSL3yumQZMincNIUXcPYb2Qye/ZzJnMIibCqwMKQqi4HxCXprWhiEoGPum8A +fN0bTGgMYfM6JZ/Dh1eGsEvemeW+5tn5xZF4Lfp/vkT8v4FuHDydUF/lIx7F5MMi +VteS0hHnR1DuvxHqtysf0wy2l61LFr7mQCMYTNEyFB3ZfXqpxJmFmCqPbr4PQsIm +2rqIDw+13eeoyDpJJkdi+yzHkAYDOdAsur0vOQvK/Zj1QKz9qmC1O6L4BN5yp265 +vjSE4Orvo7btEQKCAQEA9I/afLw6lHUJ4FVL0p7dH15JSFjt7nmGHocE7Wf6Yp3G +vMp+PdGyoJ2KEQB2unnQZK1gZqUuRQLannjNl7fsIiIhHgHxMBCIiylwSUVnP868 +u9/fpJV/cSGze2zF0WAttIgXKNtXG7xMntcY2k+SAe0qjqX494KT0NGnznySt2nU +A1YlkXm6u3KCOJrBKfbtiHXFoH39sA+ihuPiV7xcETS2ZrFdAX9M422p4yDHqe/0 +dTe18wIxJNiEX4xp/HRE//cuQ5dw/Z/QmNrzgWxHbOmXVR5C90vIJRuYY9xz0tDP +LMnifSKfnG16l2gqg7zb8xsxYqSGndXWKPAeiq3/EwKCAQEAhCWQbWgcjmFFzNuE +/ubG48yoe9DW/OAft8Dg68iH7bBkxd/BpbG8VZeXiw16T1i29f5f5IAFnxeX7EbD +rTLLO1113V3ocwH3YZGa/bbBedETzo4xjc1z8asZVmQiJa1ju4+CKrvZFkDH415i +wcZgxqbwKhQDijl1+g52Ii5iMYuXE6GGPVXcu8DVrWOk0N7+/IGpIeOQJG2KYDPh +TOdzZ22FQKY8EeoS3gF0+SLUIDtbUIaR7/Z86iXD2HzdCemkVaZnaoYuMRBL0ybD +sqDn5nguEObWSII0pgN5Fa3QODhS6xOSc5brfx5X0BBVn0L9VbBJ99GIL3t71jRe +vVrL0QKCAQB+jUYZT+ncUqgWruy6g7yW89pmFqagxb/SYjn5g9m8WDq0DPDAmped +p4f/fkbx/gEJZ/I/i3BjA7QPVyHERcdqblDGz2h4X8XYhUv2jnR8P0XIznNTHo1B +BJh04PeIfgWIqveZC8+KqajYdSQGLDC40Ho6MMahha9p2mPEZRAi2x97zoNIQT6Q +qxOZqPMV/RIzkAYBI9E33w9ST/AbSHw35xgQEe23zaEC+wdzYc4QMPxF/9smcdbu +YyA0tVtO6PefoNAO5/nvNFjkEED7kwVu5X2K7Urn3w4lrZ7w5e4FhEoAukN6T4Va +lAhg+uUtIHiM12B50/tZB4N30bFsP9eDAoIBAHc7ppfpo1aDK3bDr6zTSOU4Mn1l +XrfhBJHDy2Wt9WkvWtcCtXr3sDpthaChueV+mGoKvfgWyzUoauO6HDDsRYriqaQB +cXclVjyy+3atY32Opz9rnWefQkbgTOQ+oQgOzEFhxNS+11Omc6ZZ9s31N6TZi/Yz +rgXzhGrr73DkV6uwiiwkvP8vJxg8AMWKorDIm1myr9wwlK5ogDKSku1DM/y1gvlt +4EA39fqURyqxN9o5Yq+8K1+a/smjGx95M+P8Nke4bMs1+lb7bBXbMaVpC6DLqj8B +eleOZ7adY2mS0CBuf0PNkJRNDwF1B5VDmGBJLubUtGLuUUoEyUbv66WfnUw= +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem new file mode 100644 index 000000000000..ef030f740567 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE9DCCAt6gAwIBAgIQb58oJ+9SvWUCcYWA+L1oiTALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTUwMFoXDTE4MDUxMDIwNTUwMFowEzERMA8GA1UEChMIUXVpY2tUTFMwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDDmOL3EhBm4So3agPMmF0z1+/nPlrE +xoG7x0HYPk5CP3PF3TNVk3ArBPkMzge0/895a4ZEb9j+LUQEjOZa/ZwuLmSjfJSt +9xTXI1ldp8KasyzQZjC33/bUj7FGxGzgbHyJrGGBoH2W5HdswH4WzhCnGTslyiDo +VN4hklJ7gr+Geq3TPf8Eji+1L71MOrUyoNp7BaQBQT/gKxK0nV+ZuSk6eaiu+om7 +slp3x4bc21o7eIMmNXggJP6p9fMDctnioKhAPcm+5ADiFYSjivLeUQ85VkMTpmdU +yvq6ziK3Ls6erD+S3xLvcHYAaeu84qLd7qdPwkHMTQsDpO4vPMIwL8piMzZV+kwL +Bq+5xk5//FwnQH0pSo2Nr4vRn+DITZc3GKyGUJQoOUgAdfGNskTt8GXa4IsHn5iw +zr12vGaxb//GDm0RLHnh7NVbD8xxDHIJq+fJNFb7MdXa8v31PYebkWuaPhYt6HQC +I/D81zwcJIOGfzNITS2ifM5tvMaUXireo4pLC2v2aSY6RrPq1owlB6jGFwGwZSAF +O6rxSqWO1gLfhJLzqcw/NjWnO7nCZEs/iKgAa22K2CtTt3dDMTvSBYKdkRe/FYQC +MCa7MFJSaH85pYRzoDN4IuVpvROrtuQmlI47oZzb64uCPoA4A8AN+k8iysqITsgK +1m8ePPXhbu4YlwIDAQABozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYI +KwYBBQUHAwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggIBALSgrCdEQd3I +vb/FNkNZkAwdjfBD6j7ZtPBwvjEiiyNTx9hOLBGvbey7kr0HtW0KkLWsdRmCc+3z +ev9I5VjDOtpiqrvuAA1wRBaL3UzGyj/eFjPJpvkfJi8zjkIZ2y18QG3yJ6Eqy6dD +0aIQAHl9hkXMOVrf364gf0p7EoOGtSlfQ56yIGDPTFKKiy+Al0S42p17lhI4coz9 +zGXE1/SiNeZgdsk4zHDqhzzBp8foZuSL1sGcIXHkG8RtqZ1WvCyIPYRyIjIKZcXd +JCEM//EbgDzQ7VE/jm+hIlYfPjM7fmUzsfii+bIrp/0HGEU3HN++LsA6eQOwWPa/ +PrxKPP36EVXb72QK8C3lmz6y+CHhuuAm0C1b1qmYVEs4eRE21S8eB2l0KUlfOecf +xZ1LWp1agKt6fGqRgcsR3/qO27l8W7hlbFNPeOTgr6NQQkEMRW5OxbnZ58ULXqr3 +gWh8Na3D4+3j53035UBBQUMmeeFfWCvtr5n0+6BTAi62Cwwu9QQQBM/2f9/9K+B7 +cW0xPYtczm+VwJL6/rDtNN9xPWitxab1dkZp2XcHG3VWtYvE2R2EtEoKvvCLPggx +zcafsZfcD1wlvtQF7YjykGJnMa0SB0GBl9SQtvGc8PkP39yXHqXZhIoo3fp4qm9v +RfbdpOr8p/Ks34ZqQPukFwpM1s/6aicF +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem new file mode 100644 index 000000000000..5aee3ea5fffa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAw5ji9xIQZuEqN2oDzJhdM9fv5z5axMaBu8dB2D5OQj9zxd0z +VZNwKwT5DM4HtP/PeWuGRG/Y/i1EBIzmWv2cLi5ko3yUrfcU1yNZXafCmrMs0GYw +t9/21I+xRsRs4Gx8iaxhgaB9luR3bMB+Fs4Qpxk7Jcog6FTeIZJSe4K/hnqt0z3/ +BI4vtS+9TDq1MqDaewWkAUE/4CsStJ1fmbkpOnmorvqJu7Jad8eG3NtaO3iDJjV4 +ICT+qfXzA3LZ4qCoQD3JvuQA4hWEo4ry3lEPOVZDE6ZnVMr6us4ity7Onqw/kt8S +73B2AGnrvOKi3e6nT8JBzE0LA6TuLzzCMC/KYjM2VfpMCwavucZOf/xcJ0B9KUqN +ja+L0Z/gyE2XNxishlCUKDlIAHXxjbJE7fBl2uCLB5+YsM69drxmsW//xg5tESx5 +4ezVWw/McQxyCavnyTRW+zHV2vL99T2Hm5Frmj4WLeh0AiPw/Nc8HCSDhn8zSE0t +onzObbzGlF4q3qOKSwtr9mkmOkaz6taMJQeoxhcBsGUgBTuq8UqljtYC34SS86nM +PzY1pzu5wmRLP4ioAGttitgrU7d3QzE70gWCnZEXvxWEAjAmuzBSUmh/OaWEc6Az +eCLlab0Tq7bkJpSOO6Gc2+uLgj6AOAPADfpPIsrKiE7ICtZvHjz14W7uGJcCAwEA +AQKCAgBmIvmxpp8l+cH/ub5OIenZXpMJn4fqZPXtxjjd4HshIN0ln0JlF15lOG2M +gDGKFGKUts8gAX/ACocQETtgnDnn65XlwPIqfXFGflD2FNoLyjBGinY6LhtIF9is +aXmpHz1Q7tDjzZiHKLor8cBlzCjp+MToEMpqR5bO1Qd5M2cro/gM7Lyz9kN3S3x/ +x9BCpbgwsVtYxGfEePmFkwAO159tx4WMCYvOlW2kSm5j+a7+iwmA9D7MGkVZHvNN +A7Y/H0F8ekdVBN5pMG9Yrv/vk0ht2lugcS5YGr4eufFq0mhWdv+jhBTxLzqPMMBG +m9oMJcj8XyXYtwpfVsqBpCqK2wnEnv4Kf0rZzBU706nI2mjPXx3dL+5qo8uQJKNp +mxoS7vmHV5RIJgtdvyzGFHjdfu1leowhV+Jy9jWzMw4wlnmlxsfDECf5RoSf2XGt +SMGJb0dbJKae+W4MfNUFsgAWMZk3h3KF8AHHe44OpDbQeoh3JLnkWSG0oS3CR0ch +68TzCy0SZZEZ9IS+I6o5WVpwWfReCQ5NjaKipWcpiJvxg+Dc3GG3QcVXVz2gGrJh +g9v0v6eyeOJ32QGvvP7THFBjpWeeHlXT8Yz6hFcPrvErEZ029TEmhg8aLWBGfsR5 +F1bazdbqvOSEB9vBAAaddNnEDG9Rl8EmC4WdsnVgYUw1J7gfQQKCAQEA9DKjD9eN +CrUl/2YfSm2WaFhYci74XcHDVeAXN2SbOyKbMIqk3aOFQNRAsLRnwPkdiLtuqeDK +BafrfLTCORHfFdYKnUzmuekESNLckN9VyLztgqOqNAv3LD6GmSHBaJEnUyniLxOL +k0wMEBIsEQw7Fb4blM2REYJ3ZzMFmgpRGnIX8KcxhW9XgSrnqMLO0w6mVxjo7xzd +813nCcNrGhySM/EzKYtTNHy2JZmMH5QFHaIj67KklO7VeEZX5U+TKveBEt4rmHqs +Ndqf/djSs8vu1xse82pVRxMXX2mhDLmwjUjPgWYxUL92jTiyJhE7GxpVB/yHgF1J +Ecb47MDahoNKkQKCAQEAzQzvCOA77IQpGO117GcMqcjzwEUhTytojFBT+s5mHfzk +dYr5TyN86LQ7/GktNoJ5oRvD9UGRSul1OGneivqtWj6mv6/Zvfzacx8NXY4MYFs1 +nEr3Gr7orVFIzD2x7nMPG2G6+J6hZ1rhpnZ9Hprf5G41sHIJxHJ9wTYSUAmFh8bv +FiJqF90bSq/E5hgjphtX6wZWeZYspzc/5+IrJ/I0nqoxV3rjUy234zlzKJAV10sV +5oVgxLLQsUujkHp/Da+ij2aTv1Za8y3PTJ7MAHYgdpa5l/4U9MnPUEB2REBCI1NN +TqxnViwD0xgsvxfb79UzruLJIYOCKvfOumlutXM0pwKCAQBUIMXQhWAP2kyW6mXJ +TGvO0vDVlZz3H/Pdt/AHo19fRhLU7E7UFKupo/YNanl8H9au7nO3jrvKqwkT02o+ +IwwKB81sV7v9PGu/cvWN64MwPvZMVXojqCOlWH0icGCjV66Glh1YPpGNU1ushbYs +wVvxp6b04sUhlSLxqMA7S2aZh8j7nX4QDEXHODLLDyIV0Cw6QViuV/GXEDiyQmK5 +gjJUNrp7i4ZExNozpeyCTIpepSde4hKVRJrCbumFFJ8M5GvRRj0asNh3TTRlTbd5 +Pb6w2KUXEwECFW+t7UQQkEBkzDrAx6YhvXRoPqoRN0p3keDNeZBtBrZPq47CccZX +JRAhAoIBAQCJ/DgnGu54XP9i/PksGrSU1Nvi+SJPKoDyW2QIFTj22SXMS7c1oEYA +OrlbRFPeqLK8zfhyZKsnZC8zxVqy37okTqDbwbSfezZt3emamWqOtRJAmNnsr6fY +aii4+JNySQ9Td9LgV69549iRso7EN6iPCfMrR7J29izWBlMQdTfchOyDUqleYbZp +7hpsVLY4o5HoYJ10uLBX3oAsxTARc5YhZ5pIqjOr18o1KIXsN/napXaZaAwUkdiK +VsI9CZHSXezg30Bxs+UEXEFx6DKT5Oo3o3pFZAAqMlxGPvrXNv7K0tXlKXNos7nn +Jg+GkMG6hRiAibCb0umXjKcbHrQXeu1lAoIBAQDcRBsy6cSQXMSu6+PyroH+2DvR +4fuiMfSrUNjv+9K8gtjYLetrZUvRuFT3A/KzDrALKyTFTGJk3YlpTaC5iNKd+QK8 +6RBJRYeYV16fpX/2ak/8MgfB2gdW//pE0eFjw+qakcUXmo957m7dUXbOrw1VNAET +LVBeVnml+2FUj0sTXGwHKcINPR78PWZ8i1ka9DptnKLBNeA+x+OMkCA88RJJegSk +/rgDDV52z4fJHQJh9TZ7zLAXxGgDFYLGPTrdeT+D/owuPXF+SCP4pMtVnwbQgH9G +dfQ9bb7G14vAeu/kEkFdGFEreS09BOTRbTfzFjFdDvSV4JyOXe9i/sUDxf9R +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem new file mode 100644 index 000000000000..53ce07422fb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCTCCAvOgAwIBAgIQPjclBRGzhznCybQzYRQTyjALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQ1NloXDTE4MDUxMDIwNTQ1NlowJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV +BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALBe +C9O6es+mStDowUd1kiM59VkinzzdHgE24LvKmGxQ6fDnnT8S9L7iyzoxcJWlvSHu +pfyZWvij0ZIyRZ288XemTEFYq25RK0IBGGdvYz9OqT2R3lblBQrXDjSi9WG16sGx +60MGhM2egGMqFQ5DBfT16IKw00+RjFgCVzJ8T64Lzw82E0e7d6hl39SPybY+uvrt +SID60hYGmXoOdaiC9qquivks67BZprGNfORrvyJNrCFI6oKUFWHrQ1PpGd2tOwJN +1P3gkkS8pVlAif6ZQkAf+zuKu+l4j5tKxGlJAkJsafVJDLOxBKutUj5msha0g6uJ +gFXUe0+G8hkNcEjd8XqUUCwIOY3pdv4WsydKBk3uH9zMnYolw53k1q0ObvoY1NXf +beMxHQAtDi7nfQGlae9cuuOSymy95WuvzfhZFKdPWUe8lKN9QXFIWVoCFnOm8T3P ++FNCUE+p8DIWkal6Ul9THi/Kz4p7twyrUp1LwT5EtSaJ3iGAmB9I+8/1vmZT3lPi +nX8P+iVGM5yOUnptrsFm0bUcJWRD6iaTK1KxpH+Is4h2kiUiSz1tC/9bKaJYN2o9 +oy7q7+ZVfHSmIxLo8ZFYsaZBcXi96cKuuPMR3X4ISPwKDqP5irxU/QbI+YQBMshg +G4b0BNoMZ50g30r3Hcsifw4pzPQF0RDMOBeCiOi3AgMBAAGjNjA0MA4GA1UdDwEB +/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq +hkiG9w0BAQsDggIBAFuS/VrMNUwEMyUIktDyna5ExYh/FDOE+YEYf8tsX7dSMhRK +wE560/AcVZcbKKAZOnZ/262a++8tparsQt+bXBJ2so6YUqsFDNdOLCI2aShjWDRe +TNhqmLIO3FNsLRKp96WHVz+jFoiECsoYfKn0jgqTqxx+7nWFqgBaNSlF5cbCgLCH +jQV1uQhzsw/Mh/32hXAidkv/nLeLf7FbKq08hgthtoP+XstlzZ5BxkPodjb8XWXG +DSS49SWX971GHa1apwMKfxVGSppxn18ZwEmW1BUfQBNxtMytqA9DK3+xuoUdXkB0 +iJbm3Jc10JSRju8iyL121Xt6f8O33paVz/ndDJIWztUOjnItc89rxHsINPt5+cUt +jix8ohwmHGDrK7ZooXBvotvmGT/xhPr2eHUAG8JuSJ/Cr09UUOwUEigz4CfgJOHm +XukdzjOkb4r7lhNmVeGqrjRol1W0Wsc1NGH++J6xdkIeQ+i23kHwFHfQWV/J69tm +rOn2N+qijtmbIy9YfVcrFDtUtEAzXylZ2StCVQNofd0M7tXNdrUL8yAFwlrhWGJV +wsSP++1xH2Ie6Diupy8z6rbP383HmnmVPU/UecgLrlX2lEpt/UZkkX1Xm+6PhrrT +HDeeULvqtUP3PD8wS0C873Pl9GXOKISqf0HKEIDUAVZhQOsGFqiZH0388M4L +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem new file mode 100644 index 000000000000..636dc8287f6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAsF4L07p6z6ZK0OjBR3WSIzn1WSKfPN0eATbgu8qYbFDp8Oed +PxL0vuLLOjFwlaW9Ie6l/Jla+KPRkjJFnbzxd6ZMQVirblErQgEYZ29jP06pPZHe +VuUFCtcONKL1YbXqwbHrQwaEzZ6AYyoVDkMF9PXogrDTT5GMWAJXMnxPrgvPDzYT +R7t3qGXf1I/Jtj66+u1IgPrSFgaZeg51qIL2qq6K+SzrsFmmsY185Gu/Ik2sIUjq +gpQVYetDU+kZ3a07Ak3U/eCSRLylWUCJ/plCQB/7O4q76XiPm0rEaUkCQmxp9UkM +s7EEq61SPmayFrSDq4mAVdR7T4byGQ1wSN3xepRQLAg5jel2/hazJ0oGTe4f3Myd +iiXDneTWrQ5u+hjU1d9t4zEdAC0OLud9AaVp71y645LKbL3la6/N+FkUp09ZR7yU +o31BcUhZWgIWc6bxPc/4U0JQT6nwMhaRqXpSX1MeL8rPinu3DKtSnUvBPkS1Jone +IYCYH0j7z/W+ZlPeU+Kdfw/6JUYznI5Sem2uwWbRtRwlZEPqJpMrUrGkf4iziHaS +JSJLPW0L/1spolg3aj2jLurv5lV8dKYjEujxkVixpkFxeL3pwq648xHdfghI/AoO +o/mKvFT9Bsj5hAEyyGAbhvQE2gxnnSDfSvcdyyJ/DinM9AXREMw4F4KI6LcCAwEA +AQKCAgEAnrHg/oD7ZMEC7PuifoRCHMRYCf5nPkLQbtNMYG2pvT0JY6VlDo4l/2Te +7NvzrBPYHSI55RKwkq4FMwFdNtP+imTulJYOm1MaE2gc52WI7jv/eNE6OQIWCWz8 +8Uv4dBVWyTcos8S31rTaXWBOVejlAUgMERy+5wfWOpLQlzLYF4m0pMFJk/AReUtB +nmhLXlsPsB22cag/RWZmzzcXk6tT/LzVe+R5ptLkdTsUuAxjjaBKVCDiMuDAZL1m +dah3h8oKIMab8l0SABumxKqYAKkyvbSJQUhSUYAT5+3c0cfJ6q7WoMk8TqvnwfpQ +2Klbcaa4G6+79H8e/a41RWmcMVTTpLKmwzx/iMLPswLnTFbWYCsLSsml3OpmXPhG +CKdbIWMvNMBfahZmnCP2pNcZBVY1/k/lEw25ehtnWqA7HplawT6V3gk/Bzz+3e3R +XEpioZF70ipdW5Pb3OG/tKSNDvRRjqLPk9UWlQzmedzu7XN28V/blw/CBVcMAcc0 +njwAledTuqv/wQ67dtbXdcxSPZbV/Rq7y3OmpgK6RWLIFzzpOPW5gULqUZfrnxtv +StxVnlZXhFoymodFobTi7AYibsLaXLkunZWXEwFwdtLfFHznfHq/rHfBmna1lcKW +MgWRqsbaoCsqHC1nc0E4llFkn3zqGYgMQNBeqNfX6cIPI/eQzPECggEBAOk0TP8N +edIFENOrzUtpH1fB3k15heeA84SeBhj8t/xrphR3o+IVO/GtMtq9hVLeYFVPwWCi +Mmy4KhwNUOtFeCSX4MbpiXvoPEjL3QF+Sv95HsEWsT1iBQIN4aoV0ZSv48YsRczs +tLjr96hADLTMfpCwyRq9r8XVF/hnx7vqOoOC/J1kteRhjOWRnutFpdAMfkFgzUa9 +1unmDHsDifcT+vpxief9Q9zK9xMYvYmwFkBUjOlhC7WchZC20nrwvM+A2mMBpeLB +WSRWsYeOqW8zcQNGdWuXXMKxsYHwv9tXbANVWxs1gz4x7BxcFoN5poIFrnT+eImY +EwhGrKR6jZsKF00CggEBAMGbdZU0+yvxL2tAul5RGAqv9xhdUV4eg8warTQ8/RWt +8Vef2wllBYnP48rXNDovb7ZNOjMBdjIWZ2zq2McMtHqpzP+zWQWaNT8/7Zi24JTL +y4G75kZdGgTPG2Y71seZoZGxfOu4gf7cLKOqxiHYrNDHEDl5Pi13tJD/8qf6hYm6 +K3yALSv+QlM3mk+5oueKQ7Lj9rV81YomYSV5+K+WhszhvLmuxv0necOLKapeBWvL +GQ5038yAq3PFdu0HXzyA6L8YdusP1d3sqwQvLbi8KAMXJCeT6WZXGYgX2Rjfbuih +ZHUaE7Ac0EsJfMuOowSkS7oXuT81k64ngCoq5KZC5hMCggEBAKYkt9JiZG8HYuSb +GsjmHQllup5RvN+hVF0gRFHbAq2YeBtO3Xg+DpXxAjErIuhWPCWri6bwB6LDVmTj +68milaTke6TbTzLy0rg+Xbcppf766LlCFIYZ5l1/TE3j+4vGAC347sW/wkWY/7lj +4GmS43zsJmqhx6/XUJuOPJOZnZSCZr0vuhL6mOoZZDJUTXy62dx0PetvZsT/O9cM +P2fDWWTCLTEVlBqik4KMdsS4qjGsyzOeCzyZReNDDRO/nZTsRSqSSwARJhQom5Rr +RDVQXeyqbw93KAQhmshroBSB5Rc+4YiyCE3wPTo7NWL38XPi3lbF0VSd/rk/uNH5 +6hcSCmUCggEAIPHjQFCTrRaNiyKolAQYozjuQyceAXYP11tyvcDjEB1ZRB/flemq +15iYmpukN4J67/qUPLmy8zL8xnvwB28SBw195MUQEPP8u5aVR7dW3/sN1jWzKaYO +F2Nmti7YjX6HD9Oz/iiXdlbhAbi9nmTQg3ZcPGt1OSd1gncLQ6pNrvIPFFB7X1EU +2DRN/eMI5X2Rp49DG/7yF2AQh+AJgVeL+LEw/CfRlKJzBeNYY7U8Fuuoh907eAEt +K7YeVpc6jYEiGeJ/2eAH9IuhTkT48saRyHTXoiR5QwDvR0lHmAPtS4irH4Igd4dv +qlUi90B+XPvYJwKCc08aojf2hzZlUiVwIQKCAQEAraCoWea8hLFchxmAiBt7joIg +nNK7a3LOHYxT1gB9H+PoVqTmzGVTeZpD8Jnis/UHmDhRYuUGqvFIefjAWbz0jJAN +t6RMAozENCG1PoeXHf1gt2wspv14kza+8jSdpzNrzZgPZdb7Wh1UEqUkiRYwn87f +C7DHknqCj9S2qq0DFXYz15JNPVrbvD+ZLBFJhTAjppS9TuYQVLf8JPYHpLRio/9A +dMsyOz1VA2RRYN0u/u4ccxiN45K3PbVMCeDPbWXNm8G75YKQ7LnIuehMB1qkZy6N +MOnNGp3l/ZkFK0JsW/pZqTQ2FqSkb0+ttTFApFI3qB04sc4s0uKPI9fa0OQtUw== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem new file mode 100644 index 000000000000..08c393d1b009 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFETCCAvugAwIBAgIQCnqSQalw9ytL5bHLgHZe+jALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQ1OFoXDTE4MDUxMDIwNTQ1OFowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC9gvT3cwz0Ih9+7Ilv5lc15HsEiSmEMh4nOMZrSaamKgf/ydCiGo3DQapr/XDK +FHMLKq68AxwfOlzmEFQ4d9umpPMQ2+4GBr0VG23ppGtQApIPHgD06S0/CeHmDIXN +FXcKybPX/9KbgNkXBWbbJkJy0EcsdP8VJD50Q2WH89nvgEYJNFuKEELD3iGY6bBF +jeDTle5jYA7CgBKvD2avn31g24Qhxn8n8/BdYO/U0kw0qmoy1veLOjCAW0os0jkM +NlKrFpyHEWNj5B3X6UgSn8EGQaVbDq17PrQwlHJYU4nih0TnD1OwvBnFnd27pXjr +68eGA6Zc2BbUnhNGhppWHZ46LpPxpIbafSOH3ES3N/MZAfcUKIUntLlWE2xCQgFV +TW95WeVtP/r1aWgIHu0E2Jb2eHCE+qXYqJxSU7S4DcknmmcTS69hzyHs+92Ec+7Q +m0aQFZ0dyPoYPwXMgZpTAIuXEGg/FKC1fiS/deTW37DyvB2jppehKW3RJY3uso7R +o9vs6DJx1OdU5XEq9R3n7op61N7PK8Wxmn7TVYHEZHkITVvtucZZd1FNTOrOJaNJ +UnE+FuPK1Mrff+jz666Ru4zQL0CondOamX3QR5tuNK6MTqFs87wKY25qsqz7cS27 +kHW+r7UNWbJY3/UQhaPZM78zCZa2IL1nBFUjsFvEA4rtYwIDAQABozowODAOBgNV +HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz +dHJ5MAsGCSqGSIb3DQEBCwOCAgEAHVGMyoyX4lRzWCDkUjrXkrDZzuv03M2ojW2Q +UL61ejMkTWQW8R4gKrcPHAOJAPKVfGEVOrQH3ZMyxV2HnWrJ7egrn65zOzmLbWSh +O7gdpL6YYjBr218fqJn/8HadXZa4k70JyympYOLojeWSLy3KP03U+y7AMcdE1uG6 +6HJI54ZjBoW/nEyWmMh/mfMz8EN+Mgek48Z9AVaOswbtHtDIXN7XO0jbB3DbY5Yh +prVqVLYAz4sCchGTadj+aEChF5sJkKREDvAew/njC0WGS2TmMJ+V1uVhXV6354mr +edk79YvdwzwDgeYArkprahMtn9eu1aSTfUXsmM5OP5tR4gyFV1kUmTPY1yUd/yO+ +638wV0mWtGbbf6j8dUKeUBCyt2qGg8J80OUeFdvdHMswtaUq951NApX44BinPkbK +moBVQByZ5OEcmMidFC9SqYSUwTQ7uNyWeguhCXav+l3x900YlKnUQgRUZntPwXjs +yc7MXv0j0E86Gme6G1O02zamwkRgr3qOTHu2oQOow/a24fM4HASayLR0Kegt0sh3 +rzk0HRF1mGonf1Ecyyj/3LpHVsgYSckwtJoZLOqtDMn+CKtOCEByssQfD+E9Qe07 +qMyvcwpXUpfqe3ZERbJ10m98Z88VeK/XGt9ptq7HY47n1KL6lx3oyXwZIw8pq928 +89dcqL0= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem new file mode 100644 index 000000000000..205511afb41a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAvYL093MM9CIffuyJb+ZXNeR7BIkphDIeJzjGa0mmpioH/8nQ +ohqNw0Gqa/1wyhRzCyquvAMcHzpc5hBUOHfbpqTzENvuBga9FRtt6aRrUAKSDx4A +9OktPwnh5gyFzRV3Csmz1//Sm4DZFwVm2yZCctBHLHT/FSQ+dENlh/PZ74BGCTRb +ihBCw94hmOmwRY3g05XuY2AOwoASrw9mr599YNuEIcZ/J/PwXWDv1NJMNKpqMtb3 +izowgFtKLNI5DDZSqxachxFjY+Qd1+lIEp/BBkGlWw6tez60MJRyWFOJ4odE5w9T +sLwZxZ3du6V46+vHhgOmXNgW1J4TRoaaVh2eOi6T8aSG2n0jh9xEtzfzGQH3FCiF +J7S5VhNsQkIBVU1veVnlbT/69WloCB7tBNiW9nhwhPql2KicUlO0uA3JJ5pnE0uv +Yc8h7PvdhHPu0JtGkBWdHcj6GD8FzIGaUwCLlxBoPxSgtX4kv3Xk1t+w8rwdo6aX +oSlt0SWN7rKO0aPb7OgycdTnVOVxKvUd5+6KetTezyvFsZp+01WBxGR5CE1b7bnG +WXdRTUzqziWjSVJxPhbjytTK33/o8+uukbuM0C9AqJ3Tmpl90EebbjSujE6hbPO8 +CmNuarKs+3Etu5B1vq+1DVmyWN/1EIWj2TO/MwmWtiC9ZwRVI7BbxAOK7WMCAwEA +AQKCAgEArwqno2uEGnbuKnjmVRInmWKpcb4TN8Rm74lUVEKaB76o1s0cxK3MJP6h +H8/e/vg2bqkE7indLsbkiaepcuLaYijXTcomJzDQMw+7zOOOLz/Aku/+qDg8D47c +NXV5nLzn0HIPiEIF0JYJbmcR4veKxqu0Ic8K0QdCHHcn75P/x2Tuy4+twW9Vi76/ +v5KRuxzZ/fTtVKKj32kWWNXb3fltgCoh+GR0jH2XlVh1DVkVBEwnfT/rM5ESvWwU +riOah7ohT1+6QlOAPwKzwfr6FCG000eNKPb8q+p12q0ylHzMzgxtSxJwFb0X/Nzc +snaboyWLjDAQ2I7LP6WmXizznvkKbE9PjW6UGYQ+2XApqp+Hn8tSC5I/gIDlBOOa +psJ4fkRjr8n5+CbHbGmQG736hZcZY/z10TtOQbxeeeuri6oDQ62D4Z07GpWCG2EG +sUakaytZnJkIN79PpfthPZwtStlG0KVs0i5wggH/iP2h0yAmvJ64ZRIqdvuE/aBn +sdfRRlYUqmFOJsVQgtUWGKGS4WIxrGaclzT1TNxCKdiAk0glXe3sDtvBni6qDW07 +iJzEXxrsLw6MiCDhHfDeae5JYeJXK0HlCfYHXgRmEnDFTGw8rBzwz3eXvPqZ5YNt +j+31uHSwQjgOgEgSrXeTmRfLZsytKqndhBB/yBFmzZNrswXGackCggEBAMN5RSdW +t+WWl8ghDGz/CN1oRjnk298/6L7ijluKGRgG+igwBEy+5m1EGPJT+Y5LEH4TiQJe +Oc2XjQuM7zABX7JWWk1cL8Zlv3kcmR0lg4BWs7wDkoU1HYRkMP57vubtxFzFOsNa +momivEniZ/eonHm3yv0VHeenH9j3mhJ3mVDIpkH+7uhn3++c0zYh96NkjfQi1/jF +P35eSAt7FgHDOt37fWXwtGeYFRN4P19ZUNiIvZwT6Q1gmegRO8BYoW6cSbLWe5Cp +abaULds46+mjM4zJhCZRFkdWHbzP4bZHocSmwGsqcpABJ6SASTVim02GGhBIt1nj +fkqa10X1c5Sqis0CggEBAPgxFKSHccfIJ6yht2HJjysRLN/IHlO9hDcpCWUrISN/ +hxu1uxfNGmUkd0H8zDO/O+QAJXLE8PPPB77pJniIJ8kK4swwsfufN6bNV9XJldjA +o4vXnYt9Mpuky9cugD8LocUgWQzzKY5Y875TC4s3ldzyKQVm0NO+Wz1U3gfjogEC +d7PhTk7Ba/ZjVGtL7HuZxlL+/TgZklMks2ulSTW2y8aqVJxaZXv0H0NX/+fpDHYw +iljr+iqbiqZvjrzySryb0XWMtzP9oyDEXTXrWnG+kOIZW3BZ9FLxT+Te7zZ2PUbK +vTkObsKxc8WVHIYgkt/OwWSwbYLre5nvFPvgEFbQuO8CggEAeZTlUXmbul63m5AK +xYS/w88G1x2lMK/0mT4bY4562zoDwJlVI1MdydqwVZGryDiiUnjeIC3xcBISdZu8 +bjR8jFUvp6xuPs2ska0bA0kBCQNkmc3zBY2rBVy4KKFZdRNwrm8yhK3HL1KcIKyF +FEK4yPBrfozy49JMecxP9aqUHu4eky/4828gl04JBUONXwC9VpuRj7dILdaAozt0 +zbXb2JSDQ7O60jCC83A4oprQMU6j+P9dVqe+Mtz9OD8ocb8eC/FiO/FTwm9aMl+u +RMzw1GHHI3oODGLg7j6y2oilcsZxKnblePJu8N+mKWFizY5aicRg3rUkKU00Ftx7 +fn2xBQKCAQB7w7Xgie5SStyF+KrC58kuF8WB3oBJEAOjoiIeQhCnbAvK5KfkqZHV +CAc0b8TAtUc/XldOUSk6222oZQmbJ4J3fac1Xb8TlAUjd9iqMnk3+nBT5vSYP5mC +Bf7kUjr/tWQ5MfVWQNfjNTZvHWhvRwvDfzq3h9rxDEbhYbXKx1fdGwboO51aJpgY +6NWLH/RQepFsh91sIUxXi8CxGF5Wm84oRn4k7esXkdgZNAPX+N4O/guvZhV9M81D +S/QpAsYEIcuky8P7+Cplx6YXokKa4AXNyglQEHuG9PD7V7SAOxw5dhZAIpNXIThz +OfVcaVf0pVzJQjWKCLW9QHz9UXG0aScfAoIBACdr3exVMUaMOtrAnf2NXj3hecgg +WsWRBOOaSW5wXGt1JNlfYS4zwViafIwy31DNuMg22rj5Mq0TYMtuNYto5RoLSXeB +uupUrENEBnt7JFrwI/NyWG0uYMM3G2MtGHGYooaT9+++wT96QxJZr5fwFYF1ddf6 +5tFeKtNt5VM0wWBHO1voUhQ0TCaooatJjMuAB0+WbvwniKxmdbqQDzY+6myBBUVo +gBJ0JxhxakLm1XGFHDtPCsAAHX/uZ4CvH2uyWqAlx6iwGXd0wwEGrbIRB/BundxR +oaJWswU4FIPAgOpy2LEJKnvzhcmVFtZWD5sFXA1/83QvpceLTFTD5uioBPU= +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd new file mode 100644 index 000000000000..4e55de816968 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd @@ -0,0 +1 @@ +testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh new file mode 100755 index 000000000000..707dfa18b87c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +set -e +set -x + +source helpers.bash + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +# Port used by engine under test +ENGINE_PORT=5216 + +# Root directory of Distribution +DISTRIBUTION_ROOT=$(cd ../..; pwd -P) + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-overlay} +EXEC_DRIVER=${EXEC_DRIVER:-native} + +volumeMount="" +if [ "$DOCKER_VOLUME" != "" ]; then + volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" +fi + +dockerMount="" +if [ "$DOCKER_BINARY" != "" ]; then + dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" +else + DOCKER_BINARY=docker +fi + +# Image containing the integration tests environment. +INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} + +if [ "$1" == "-d" ]; then + start_daemon + shift +fi + +TESTS=${@:-.} + +# Make sure we upgrade the integration environment. +docker pull $INTEGRATION_IMAGE + +# Start a Docker engine inside a docker container +ID=$(docker run -d -it -p $ENGINE_PORT:$ENGINE_PORT --privileged $volumeMount $dockerMount \ + -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ + -e "ENGINE_PORT=$ENGINE_PORT" \ + -e "DOCKER_GRAPHDRIVER=$DOCKER_GRAPHDRIVER" \ + -e "EXEC_DRIVER=$EXEC_DRIVER" \ + ${INTEGRATION_IMAGE} \ + ./run_engine.sh) + +# Wait for it to become reachable. +tries=10 +until "$DOCKER_BINARY" -H "127.0.0.1:$ENGINE_PORT" version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 +done + +# Make sure we have images outside the container, to transfer to the container. +# Not much will happen here if the images are already present. +docker-compose pull +docker-compose build + +# Transfer images to the inner container. +for image in "$INTEGRATION_IMAGE" registry:0.9.1 dockerintegration_nginx dockerintegration_registryv2; do + docker save "$image" | "$DOCKER_BINARY" -H "127.0.0.1:$ENGINE_PORT" load +done + +#DOCKER_HOST="tcp://127.0.0.1:$ENGINE_PORT" docker-compose pull +#DOCKER_HOST="tcp://127.0.0.1:$ENGINE_PORT" docker-compose build + +# Run the tests. +docker exec -it "$ID" sh -c "DOCKER_HOST=tcp://127.0.0.1:$ENGINE_PORT ./test_runner.sh $TESTS" + +# Stop container +docker rm -f -v "$ID" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh new file mode 100755 index 000000000000..9c59aa260138 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh @@ -0,0 +1,12 @@ +#!/bin/sh +set -e +set -x + +# Set IP address in /etc/hosts for localregistry +IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') +echo "$IP localregistry" >> /etc/hosts + +sh install_certs.sh localregistry + +docker --daemon -H "0.0.0.0:$ENGINE_PORT" --log-level=panic \ + --storage-driver="$DOCKER_GRAPHDRIVER" --exec-driver="$EXEC_DRIVER" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh new file mode 100755 index 000000000000..c6da03140c60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Run the integration tests with multiple versions of the Docker engine + +set -e +set -x + +source helpers.bash + +if [ `uname` = "Linux" ]; then + tmpdir_template="$TMPDIR/docker-versions.XXXXX" +else + # /tmp isn't available for mounting in boot2docker + tmpdir_template="`pwd`/../../../docker-versions.XXXXX" +fi + +tmpdir=`mktemp -d "$tmpdir_template"` +trap "rm -rf $tmpdir" EXIT + +if [ "$1" == "-d" ]; then + start_daemon +fi + +# Released versions + +versions="1.6.0 1.6.1 1.7.0 1.7.1" + +for v in $versions; do + echo "Extracting Docker $v from dind image" + binpath="$tmpdir/docker-$v/docker" + ID=$(docker create dockerswarm/dind:$v) + docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" + + echo "Running tests with Docker $v" + DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" ./run.sh + + # Cleanup. + docker rm -f "$ID" +done + +# Latest experimental version + +echo "Extracting Docker master from dind image" +binpath="$tmpdir/docker-master/docker" +docker pull dockerswarm/dind-master +ID=$(docker create dockerswarm/dind-master) +docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" + +echo "Running tests with Docker master" +DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" ./run.sh + +# Cleanup. +docker rm -f "$ID" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh new file mode 100755 index 000000000000..0a628238cdfc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +TESTS=${@:-.} + +function execute() { + >&2 echo "++ $@" + eval "$@" +} + +execute time docker-compose build + +execute docker-compose up -d + +# Run the tests. +execute time bats -p $TESTS diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats new file mode 100644 index 000000000000..8b7ae287175d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats @@ -0,0 +1,102 @@ +# Registry host name, should be set to non-localhost address and match +# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d +hostname="localregistry" + +image="hello-world:latest" + +# Login information, should match values in nginx/test.passwd +user="testuser" +password="passpassword" +email="distribution@docker.com" + +function setup() { + docker pull $image +} + +# skip basic auth tests with Docker 1.6, where they don't pass due to +# certificate issues +function basic_auth_version_check() { + run sh -c 'docker version | fgrep -q "Client version: 1.6."' + if [ "$status" -eq 0 ]; then + skip "Basic auth tests don't support 1.6.x" + fi +} + +# has_digest enforces the last output line is "Digest: sha256:..." +# the input is the name of the array containing the output lines +function has_digest() { + filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') + [ "$filtered" != "" ] +} + +function login() { + run docker login -u $user -p $password -e $email $1 + [ "$status" -eq 0 ] + # First line is WARNING about credential save + [ "${lines[1]}" = "Login Succeeded" ] +} + +@test "Test valid certificates" { + docker tag -f $image $hostname:5440/$image + run docker push $hostname:5440/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth" { + basic_auth_version_check + login $hostname:5441 + docker tag -f $image $hostname:5441/$image + run docker push $hostname:5441/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client auth" { + docker tag -f $image $hostname:5442/$image + run docker push $hostname:5442/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client with invalid certificate authority fails" { + docker tag -f $image $hostname:5443/$image + run docker push $hostname:5443/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with TLS client auth" { + basic_auth_version_check + login $hostname:5444 + docker tag -f $image $hostname:5444/$image + run docker push $hostname:5444/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test unknown certificate authority fails" { + docker tag -f $image $hostname:5445/$image + run docker push $hostname:5445/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with unknown certificate authority fails" { + run login $hostname:5446 + [ "$status" -ne 0 ] + docker tag -f $image $hostname:5446/$image + run docker push $hostname:5446/$image + [ "$status" -ne 0 ] +} + +@test "Test TLS client auth to server with unknown certificate authority fails" { + docker tag -f $image $hostname:5447/$image + run docker push $hostname:5447/$image + [ "$status" -ne 0 ] +} + +@test "Test failure to connect to server fails to fallback to SSLv3" { + docker tag -f $image $hostname:5448/$image + run docker push $hostname:5448/$image + [ "$status" -ne 0 ] +} + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go index ba9731fbbba7..689916859aea 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go @@ -15,6 +15,7 @@ import ( const ( // DigestTarSumV1EmptyTar is the digest for the empty tar file. DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + // DigestSha256EmptyTar is the canonical sha256 digest of empty data DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) @@ -38,7 +39,7 @@ const ( type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg string, h hash.Hash) Digest { +func NewDigest(alg Algorithm, h hash.Hash) Digest { return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) } @@ -71,9 +72,9 @@ func ParseDigest(s string) (Digest, error) { // FromReader returns the most valid digest for the underlying content. func FromReader(rd io.Reader) (Digest, error) { - digester := NewCanonicalDigester() + digester := Canonical.New() - if _, err := io.Copy(digester, rd); err != nil { + if _, err := io.Copy(digester.Hash(), rd); err != nil { return "", err } @@ -130,8 +131,8 @@ func (d Digest) Validate() error { return ErrDigestInvalidFormat } - switch s[:i] { - case "sha256", "sha384", "sha512": + switch Algorithm(s[:i]) { + case SHA256, SHA384, SHA512: break default: return ErrDigestUnsupported @@ -142,8 +143,8 @@ func (d Digest) Validate() error { // Algorithm returns the algorithm portion of the digest. This will panic if // the underlying digest is not in a valid format. -func (d Digest) Algorithm() string { - return string(d[:d.sepIndex()]) +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) } // Hex returns the hex digest portion of the digest. This will panic if the diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go index 9e9ae356693e..41c8bee83c8b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go @@ -10,7 +10,7 @@ func TestParseDigest(t *testing.T) { for _, testcase := range []struct { input string err error - algorithm string + algorithm Algorithm hex string }{ { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go index d5fc5443f492..556dd93aec15 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go @@ -1,54 +1,95 @@ package digest import ( - "crypto/sha256" + "crypto" "hash" ) -// Digester calculates the digest of written data. It is functionally -// equivalent to hash.Hash but provides methods for returning the Digest type -// rather than raw bytes. -type Digester struct { - alg string - hash.Hash +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } } -// NewDigester create a new Digester with the given hashing algorithm and instance -// of that algo's hasher. -func NewDigester(alg string, h hash.Hash) Digester { - return Digester{ - alg: alg, - Hash: h, +// Hash returns a new hash as used by the algorithm. If not available, nil is +// returned. Make sure to check Available before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + return nil } + + return algorithms[a].New() } -// NewCanonicalDigester is a convenience function to create a new Digester with -// our default settings. -func NewCanonicalDigester() Digester { - return NewDigester("sha256", sha256.New()) +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest } -// Digest returns the current digest for this digester. -func (d *Digester) Digest() Digest { - return NewDigest(d.alg, d.Hash) +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash } -// ResumableHash is the common interface implemented by all resumable hash -// functions. -type ResumableHash interface { - // ResumableHash is a superset of hash.Hash - hash.Hash - // Len returns the number of bytes written to the Hash so far. - Len() uint64 - // State returns a snapshot of the state of the Hash. - State() ([]byte, error) - // Restore resets the Hash to the given state. - Restore(state []byte) error +func (d *digester) Hash() hash.Hash { + return d.hash } -// ResumableDigester is a digester that can export its internal state and be -// restored from saved state. -type ResumableDigester interface { - ResumableHash - Digest() Digest +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go deleted file mode 100644 index f2403f61d28a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build !noresumabledigest - -package digest - -import ( - "fmt" - - "github.com/jlhawn/go-crypto" - // For ResumableHash - _ "github.com/jlhawn/go-crypto/sha256" // For Resumable SHA256 - _ "github.com/jlhawn/go-crypto/sha512" // For Resumable SHA384, SHA512 -) - -// resumableDigester implements ResumableDigester. -type resumableDigester struct { - alg string - crypto.ResumableHash -} - -var resumableHashAlgs = map[string]crypto.Hash{ - "sha256": crypto.SHA256, - "sha384": crypto.SHA384, - "sha512": crypto.SHA512, -} - -// NewResumableDigester creates a new ResumableDigester with the given hashing -// algorithm. -func NewResumableDigester(alg string) (ResumableDigester, error) { - hash, supported := resumableHashAlgs[alg] - if !supported { - return resumableDigester{}, fmt.Errorf("unsupported resumable hash algorithm: %s", alg) - } - - return resumableDigester{ - alg: alg, - ResumableHash: hash.New(), - }, nil -} - -// NewCanonicalResumableDigester creates a ResumableDigester using the default -// digest algorithm. -func NewCanonicalResumableDigester() ResumableDigester { - return resumableDigester{ - alg: "sha256", - ResumableHash: crypto.SHA256.New(), - } -} - -// Digest returns the current digest for this resumable digester. -func (d resumableDigester) Digest() Digest { - return NewDigest(d.alg, d.ResumableHash) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go new file mode 100644 index 000000000000..6ba21c801a8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go @@ -0,0 +1,21 @@ +// +build !noresumabledigest + +package digest + +import ( + "testing" + + "github.com/stevvooe/resumable" + _ "github.com/stevvooe/resumable/sha256" +) + +// TestResumableDetection just ensures that the resumable capability of a hash +// is exposed through the digester type, which is just a hash plus a Digest +// method. +func TestResumableDetection(t *testing.T) { + d := Canonical.New() + + if _, ok := d.Hash().(resumable.Hash); !ok { + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go new file mode 100644 index 000000000000..271d35dbf6de --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go @@ -0,0 +1,195 @@ +package digest + +import ( + "errors" + "sort" + "strings" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are ommited from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (Digest, error) { + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg Algorithm + hex string + ) + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digests to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// table, this operation will be a no-op. +func (dst *Set) Add(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[Digest]string { + m := make(map[Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg Algorithm + val string + digest Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go new file mode 100644 index 000000000000..faeba6d32efa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go @@ -0,0 +1,272 @@ +package digest + +import ( + "crypto/sha256" + "encoding/binary" + "math/rand" + "testing" +) + +func assertEqualDigests(t *testing.T, d1, d2 Digest) { + if d1 != d2 { + t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) + } +} + +func TestLookup(t *testing.T) { + digests := []Digest{ + "sha256:12345", + "sha256:1234", + "sha256:12346", + "sha256:54321", + "sha256:65431", + "sha256:64321", + "sha256:65421", + "sha256:65321", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup("54") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[3]) + + dgst, err = dset.Lookup("1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("9876") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 9876") + } + if err != ErrDigestNotFound { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:1234") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[1]) + + dgst, err = dset.Lookup("sha256:12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) + + dgst, err = dset.Lookup("sha256:12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) +} + +func TestAddDuplication(t *testing.T) { + digests := []Digest{ + "sha256:1234", + "sha256:12345", + "sha256:12346", + "sha256:54321", + "sha256:65431", + "sha512:65431", + "sha512:65421", + "sha512:65321", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + if len(dset.entries) != 8 { + t.Fatal("Invalid dset size") + } + + if err := dset.Add(Digest("sha256:12345")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 8 { + t.Fatal("Duplicate digest insert allowed") + } + + if err := dset.Add(Digest("sha384:12345")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 9 { + t.Fatal("Insert with different algorithm not allowed") + } +} + +func assertEqualShort(t *testing.T, actual, expected string) { + if actual != expected { + t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) + } +} + +func TestShortCodeTable(t *testing.T) { + digests := []Digest{ + "sha256:1234", + "sha256:12345", + "sha256:12346", + "sha256:54321", + "sha256:65431", + "sha256:64321", + "sha256:65421", + "sha256:65321", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dump := ShortCodeTable(dset, 2) + + if len(dump) < len(digests) { + t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) + } + + assertEqualShort(t, dump[digests[0]], "sha256:1234") + assertEqualShort(t, dump[digests[1]], "sha256:12345") + assertEqualShort(t, dump[digests[2]], "sha256:12346") + assertEqualShort(t, dump[digests[3]], "54") + assertEqualShort(t, dump[digests[4]], "6543") + assertEqualShort(t, dump[digests[5]], "64") + assertEqualShort(t, dump[digests[6]], "6542") + assertEqualShort(t, dump[digests[7]], "653") +} + +func createDigests(count int) ([]Digest, error) { + r := rand.New(rand.NewSource(25823)) + digests := make([]Digest, count) + for i := range digests { + h := sha256.New() + if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { + return nil, err + } + digests[i] = NewDigest("sha256", h) + } + return digests, nil +} + +func benchAddNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchLookupNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + shorts := make([]string, 0, n) + for _, short := range ShortCodeTable(dset, shortLen) { + shorts = append(shorts, short) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err = dset.Lookup(shorts[i%n]); err != nil { + b.Fatal(err) + } + } +} + +func benchShortCodeNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShortCodeTable(dset, shortLen) + } +} + +func BenchmarkAdd10(b *testing.B) { + benchAddNTable(b, 10) +} + +func BenchmarkAdd100(b *testing.B) { + benchAddNTable(b, 100) +} + +func BenchmarkAdd1000(b *testing.B) { + benchAddNTable(b, 1000) +} + +func BenchmarkLookup10(b *testing.B) { + benchLookupNTable(b, 10, 12) +} + +func BenchmarkLookup100(b *testing.B) { + benchLookupNTable(b, 100, 12) +} + +func BenchmarkLookup1000(b *testing.B) { + benchLookupNTable(b, 1000, 12) +} + +func BenchmarkShortCode10(b *testing.B) { + benchShortCodeNTable(b, 10, 12) +} +func BenchmarkShortCode100(b *testing.B) { + benchShortCodeNTable(b, 100, 12) +} +func BenchmarkShortCode1000(b *testing.B) { + benchShortCodeNTable(b, 1000, 12) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go index 11d9d7ae53f8..f8c75b53de4c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go @@ -1,8 +1,6 @@ package digest import ( - "crypto/sha256" - "crypto/sha512" "hash" "io" "io/ioutil" @@ -33,7 +31,7 @@ func NewDigestVerifier(d Digest) (Verifier, error) { switch alg { case "sha256", "sha384", "sha512": return hashVerifier{ - hash: newHash(alg), + hash: alg.Hash(), digest: d, }, nil default: @@ -95,19 +93,6 @@ func (lv *lengthVerifier) Verified() bool { return lv.expected == lv.len } -func newHash(name string) hash.Hash { - switch name { - case "sha256": - return sha256.New() - case "sha384": - return sha512.New384() - case "sha512": - return sha512.New() - default: - panic("unsupport algorithm: " + name) - } -} - type hashVerifier struct { digest Digest hash hash.Hash diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile index d4e60012cf87..a1b9a14af203 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile @@ -1,37 +1,23 @@ FROM docs/base:latest -MAINTAINER Mary (@moxiegirl) +MAINTAINER Mary Anthony (@moxiegirl) -# to get the git info for this repo -COPY . /src - -# Reset the /docs dir so we can replace the theme meta with the new repo's git info -RUN git reset --hard - -RUN grep "VERSION =" /src/version/version.go | sed 's/.*"\(.*\)".*/\1/' > /docs/VERSION +ENV PROJECT=registry +# To get the git info for this repo +COPY . /src -# -# RUN git describe --match 'v[0-9]*' --dirty='.m' --always > /docs/VERSION -# The above line causes a floating point error in our tools -# -COPY docs/* /docs/sources/registry/ -COPY docs/images/* /docs/sources/registry/images/ -COPY docs/spec/* /docs/sources/registry/spec/ -COPY docs/spec/auth/* /docs/sources/registry/spec/auth/ -COPY docs/storage-drivers/* /docs/sources/registry/storage-drivers/ -COPY docs/mkdocs.yml /docs/mkdocs-distribution.yml +COPY . /docs/content/$PROJECT/ -RUN sed -i.old '1s;^;no_version_dropdown: true;' \ - /docs/sources/registry/*.md \ - /docs/sources/registry/spec/*.md \ - /docs/sources/registry/spec/auth/*.md \ - /docs/sources/registry/storage-drivers/*.md +# Processing GitHub Markdown +# 1 Remove +# 2 Remove any leading combination of dots and slashes (./, ../, ../../, etc) +# 3 Prepend /$PROJECT to all links, remove any trailing .md, only for non http:// links and non internal anchors -RUN sed -i.old -e '/^/g'\ - /docs/sources/registry/*.md \ - /docs/sources/registry/spec/*.md \ - /docs/sources/registry/spec/auth/*.md \ - /docs/sources/registry/storage-drivers/*.md +RUN find /docs/content/$PROJECT -type f -name "*.md" -not -name "*.compare.md" -exec sed -i.old \ + -e '/^/g' \ + -e 's/\(\][(]\)\(\.*\/\)*/\1/g' \ + -e 's/\(\][(]\)\([A-Za-z0-9_/-]\{1,\}\)\(\.md\)\{0,1\}\(#\{0,1\}\(#[A-Za-z0-9_-]*\)\{0,1\}\)[)]/\1\/'$PROJECT'\/\2\4)/g' \ + {} \; -# Then build everything together, ready for mkdocs -RUN /docs/build.sh +# Prepare the compare file and expect an empty diff against test.md +RUN sed -i.old -e 's/\/placeholder\//\/'$PROJECT'\//g' /docs/content/$PROJECT/test.compare.md && diff -u /docs/content/$PROJECT/test.md /docs/content/$PROJECT/test.compare.md diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile new file mode 100644 index 000000000000..021e8f6e5eaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile @@ -0,0 +1,55 @@ +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate + +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_GRAPHDRIVER \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +# Get the IP ADDRESS +DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") +HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") +HUGO_BIND_IP=0.0.0.0 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) + + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) + +default: docs + +docs: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + +docs-draft: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) + + +docs-shell: docs-build + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + + +docs-build: +# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files +# echo "$(GIT_BRANCH)" > GIT_BRANCH +# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET +# echo "$(GITCOMMIT)" > GITCOMMIT + docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md index 09dce42fbc57..558a1199ea93 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md @@ -1,3 +1,9 @@ + + # Architecture ## Design diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md index dfda07e3bd61..18c42b923d4a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md @@ -1,17 +1,34 @@ -page_title: Build the development environment -page_description: Explains how to build the distribution project -page_keywords: registry, service, images, repository + # Build the development environment -If a go development environment is setup, one can use `go get` to install the +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the `registry` command from the current latest: ```sh go get github.com/docker/distribution/cmd/registry ``` -The above will install the source repository into the `GOPATH`. The `registry` +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + +```sh +mkdir -p /var/lib/registry +``` + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` binary can then be run with the following: ``` @@ -19,12 +36,17 @@ $ $GOPATH/bin/registry -version $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown ``` +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + The registry can be run with the default config using the following -incantantation: +incantation: ``` -$ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config.yml -INFO[0000] endpoint local-8082 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown +$ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-dev.yml +INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] debug server listening localhost:5001 @@ -77,7 +99,6 @@ ok github.com/docker/distribution/registry/auth/silly 0.011s ... + /Users/sday/go/src/github.com/docker/distribution/bin/registry + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template -+ /Users/sday/go/src/github.com/docker/distribution/bin/dist + binaries ``` @@ -121,3 +142,15 @@ $ make If that is successful, standard `go` commands, such as `go test` should work, per package, without issue. + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. + +To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) +(librados-dev and librbd-dev will be required to build the bindings): + +```sh +export DOCKER_BUILDTAGS='include_rados' +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md index e6464bb4f6d0..eb21b06c917c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md @@ -1,166 +1,36 @@ - - + # Registry Configuration Reference -You configure a registry server using a YAML file. This page explains the -configuration options and the values they can take. You'll also find examples of -middleware and development environment configurations. - -## Overriding configuration options -Environment variables may be used to override configuration parameters other than -version. To override a configuration option, create an environment variable named -REGISTRY\_variable_ where *variable* is the name of the configuration option. - -e.g -``` -REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry/test -``` - -will set the storage root directory to `/tmp/registry/test` - -## List of configuration options +The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. -This section lists all the registry configuration options. Some options in -the list are mutually exclusive. So, make sure to read the detailed reference -information about each option that appears later in this page. +## Override specific configuration options -```yaml -version: 0.1 -log: - level: debug - formatter: text - fields: - service: registry - environment: staging -loglevel: debug # deprecated: use "log" -storage: - filesystem: - rootdirectory: /tmp/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - cache: - layerinfo: inmemory - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 -redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` - -In some instances a configuration option is **optional** but it contains child -options marked as **required**. This indicates that you can omit the parent with -all its children. However, if the parent is included, you must also include all -the children marked **required**. - -## Override configuration options - -You can use environment variables to override most configuration parameters. The -exception is the `version` variable which cannot be overridden. You can set -environment variables on the command line using the `-e` flag on `docker run` or -from within a Dockerfile using the `ENV` instruction. +In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. To override a configuration option, create an environment variable named -`REGISTRY\variable_` where *`variable`* is the name of the configuration option +`REGISTRY_variable` where *`variable`* is the name of the configuration option and the `_` (underscore) represents indention levels. For example, you can configure the `rootdirectory` of the `filesystem` storage backend: -``` -storage: - filesystem: - rootdirectory: /tmp/registry -``` + storage: + filesystem: + rootdirectory: /var/lib/registry To override this value, set an environment variable like this: -``` -REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry/test -``` + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere -This variable overrides the `/tmp/registry` value to the `/tmp/registry/test` +This variable overrides the `/var/lib/registry` value to the `/somewhere` directory. >**Note**: If an environment variable changes a map value into a string, such @@ -169,16 +39,173 @@ directory. >environment will remove all parameters related to the old storage >configuration. +## Overriding the entire configuration file + +If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. + +Typically, create a new configuration file from scratch, and call it `config.yml`, then: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/config.yml:/etc/docker/registry/config.yml \ + registry:2 + +You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). + +## List of configuration options + +This section lists all the registry configuration options. Some options in +the list are mutually exclusive. So, make sure to read the detailed reference +information about each option that appears later in this page. + + version: 0.1 + log: + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com + loglevel: debug # deprecated: use "log" + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + bucket: bucketname + encrypt: true + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + rados: + poolname: radospool + username: radosuser + chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000 + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true + http: + addr: localhost:5000 + prefix: /my/nested/registry/ + secret: asecretforlocaldevelopment + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + debug: + addr: localhost:5001 + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + +In some instances a configuration option is **optional** but it contains child +options marked as **required**. This indicates that you can omit the parent with +all its children. However, if the parent is included, you must also include all +the children marked **required**. + + -## version +## version -```yaml -version: 0.1 -``` + version: 0.1 The `version` option is **required**. It specifies the configuration's version. It is expected to remain a top-level field, to allow for a consistent version -check before parsing the remainder of the configuration file. +check before parsing the remainder of the configuration file. ## log @@ -186,14 +213,12 @@ The `log` subsection configures the behavior of the logging system. The logging system outputs everything to stdout. You can adjust the granularity and format with this configuration section. -```yaml -log: - level: debug - formatter: text - fields: - service: registry - environment: staging -``` + log: + level: debug + formatter: text + fields: + service: registry + environment: staging @@ -241,61 +266,117 @@ log:
+## hooks + + hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com + +The `hooks` subsection configures the logging hooks' behavior. This subsection +includes a sequence handler which you can use for sending mail, for example. +Refer to `loglevel` to configure the level of messages printed. ## loglevel > **DEPRECATED:** Please use [log](#log) instead. -```yaml -loglevel: debug -``` + loglevel: debug Permitted values are `error`, `warn`, `info` and `debug`. The default is `info`. ## storage -```yaml -storage: - filesystem: - rootdirectory: /tmp/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - cache: - layerinfo: inmemory - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false -``` + storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + bucket: bucketname + encrypt: true + secure: true + v4auth: true + chunksize: 5242880 + rootdirectory: /s3/object/name/prefix + rados: + poolname: radospool + username: radosuser + chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + redirect: + disable: false The storage option is **required** and defines which storage backend is in use. You must configure one backend; if you configure more, the registry returns an error. +If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 characters). Failure to do so can result in the following error message: + + mkdir /XXX protocol error and your registry will not function properly. + ### cache Use the `cache` subsection to enable caching of data accessed in the storage backend. Currently, the only available cache provides fast access to layer -metadata. This, if configured, uses the `layerinfo` field. +metadata. This, if configured, uses the `blobdescriptor` field. -You can set `layerinfo` field to `redis` or `inmemory`. The `redis` value uses +You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses a Redis pool to cache layer metadata. The `inmemory` value uses an in memory map. +>**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +>are equivalent, `layerinfo` has been deprecated, in favor or +>`blobdescriptor`. + +### redirect + +The `redirect` subsection provides configuration for managing redirects from +content backends. For backends that support it, redirecting is enabled by +default. Certain deployment scenarios may prefer to route all data through the +Registry, rather than redirecting to the backend. This may be more efficient +when using a backend that is not colocated or when a registry instance is +doing aggressive caching. + +Redirects can be disabled by adding a single flag `disable`, set to `true` +under the `redirect` section: + + redirect: + disable: true + ### filesystem The `filesystem` storage backend uses the local disk to store registry files. It @@ -308,7 +389,7 @@ here so make sure there is adequate space available. ### azure -This storage backend uses Microsoft's Azure Storage platform. +This storage backend uses Microsoft's Azure Blob Storage. @@ -327,7 +408,7 @@ This storage backend uses Microsoft's Azure Storage platform. Azure account name. - + @@ -338,7 +419,7 @@ This storage backend uses Microsoft's Azure Storage platform. Azure account key. - + @@ -348,10 +429,68 @@ This storage backend uses Microsoft's Azure Storage platform. - + + + + + + +
accountkey
container Name of the Azure container into which to store data.
+ realm + + no + + Domain name suffix for the Storage Service API endpoint. By default, this + is core.windows.net. +
+### rados + +This storage backend uses [Ceph Object Storage](http://ceph.com/docs/master/rados/). + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ poolname + + yes + + Ceph pool name. +
+ username + + no + + Ceph cluster user to connect as (i.e. admin, not client.admin). +
+ chunksize + + no + + Size of the written RADOS objects. Default value is 4MB (4194304). +
+ ### S3 @@ -467,7 +606,7 @@ This storage backend uses Amazon's Simple Storage Service (S3). This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. - + ### Maintenance @@ -478,7 +617,7 @@ maintenance functions which are related to storage can be configured under the m ### Upload Purging Upload purging is a background process that periodically removes orphaned files from the upload -directories of the registry. Upload purging is enabled by default. To +directories of the registry. Upload purging is enabled by default. To configure upload directory purging, the following parameters must be set. @@ -487,28 +626,173 @@ must be set. --------- | -------- | ----------- `enabled` | yes | Set to true to enable upload purging. Default=true. | `age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) -`interval` | yes | The interval between upload directory purging. Default=24h. +`interval` | yes | The interval between upload directory purging. Default=24h. `dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. -Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). +Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). + +### Openstack Swift + +This storage backend uses Openstack Swift object storage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ authurl + + yes + + URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +
+ username + + yes + + Your Openstack user name. +
+ password + + yes + + Your Openstack password. +
+ region + + no + + The Openstack region in which your container exists. +
+ container + + yes + + The container name in which you want to store the registry's data. +
+ tenant + + no + + Your Openstack tenant name. +
+ tenantid + + no + + Your Openstack tenant id. +
+ domain + + no + + Your Openstack domain name for Identity v3 API. +
+ domainid + + no + + Your Openstack domain id for Identity v3 API. +
+ insecureskipverify + + no + + true to skip TLS verification, false by default. +
+ chunksize + + no + + Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). +
+ rootdirectory + + no + + This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. +
+ ## auth -```yaml -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle -``` - -The `auth` option is **optional** as there are use cases (i.e. a mirror that -only permits pulls) for which authentication may not be desired. There are -currently 2 possible auth providers, `silly` and `token`. You can configure only + auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd + +The `auth` option is **optional**. There are +currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only one `auth` provider. ### silly @@ -517,7 +801,7 @@ The `silly` auth is only for development purposes. It simply checks for the existence of the `Authorization` header in the HTTP request. It has no regard for the header's value. If the header does not exist, the `silly` auth responds with a challenge response, echoing back the realm, service, and scope that access was -denied for. +denied for. The following values are used to configure the response: @@ -557,7 +841,7 @@ The following values are used to configure the response: Token based authentication allows the authentication system to be decoupled from the registry. It is a well established authentication paradigm with a high -degree of security. +degree of security. @@ -604,16 +888,59 @@ the token so it must match the value configured for the issuer. rootcertbundle -
- yes + yes The absolute path to the root certificate bundle. This bundle contains the public part of the certificates that is used to sign authentication tokens.
+ + +For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). + +### htpasswd + +The _htpasswd_ authentication backed allows one to configure basic auth using an +[Apache HTPasswd File](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). +Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are +supported. Entries with other hash types will be ignored. The htpasswd file is +loaded once, at startup. If the file is invalid, the registry will display and +error and will not start. + +> __WARNING:__ This authentication scheme should only be used with TLS +> configured, since basic authentication sends passwords as part of the http +> header. -For more information about Token based authentication configuration, see the [specification.] + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ realm + + yes + + The realm in which the registry server authenticates. +
+ path + + yes + + Path to htpasswd file to load at startup. +
## middleware @@ -621,30 +948,28 @@ The `middleware` option is **optional**. Use this option to inject middleware at named hook points. All middlewares must implement the same interface as the object they're wrapping. This means a registry middleware must implement the `distribution.Namespace` interface, repository middleware must implement -`distribution.Respository`, and storage middleware must implement +`distribution.Repository`, and storage middleware must implement `driver.StorageDriver`. Currently only one middleware, `cloudfront`, a storage middleware, is supported -in the registry implementation. - -```yaml -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 -``` +in the registry implementation. + + middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000 Each middleware entry has `name` and `options` entries. The `name` must correspond to the name under which the middleware registers itself. The @@ -711,17 +1036,15 @@ interpretation of the options. ## reporting -```yaml -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true -``` + reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true The `reporting` option is **optional** and configures error and metrics reporting tools. At the moment only two services are supported, [New @@ -756,10 +1079,10 @@ configuration may contain both. Tracks where the registry is deployed, for example, - production,staging, or - development. + production,staging, or + development. - + endpoint @@ -768,9 +1091,9 @@ configuration may contain both. no - Specify the enterprise Bugsnag endpoint. + Specify the enterprise Bugsnag endpoint. - + @@ -803,7 +1126,7 @@ configuration may contain both. New Relic application name. - + verbose @@ -814,25 +1137,24 @@ configuration may contain both. Enable New Relic debugging output on stdout. - + ## http -```yaml -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 -``` + http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + secret: asecretforlocaldevelopment + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + debug: + addr: localhost:5001 The `http` option details the configuration for the HTTP server that hosts the registry. @@ -850,7 +1172,20 @@ The `http` option details the configuration for the HTTP server that hosts the r yes - The HOST:PORT for which the server should accept connections. + The address for which the server should accept connections. The form depends on a network type (see net option): + HOST:PORT for tcp and FILE for a unix socket. + + + + + net + + + no + + + The network which is used to create a listening socket. Known networks are unix and tcp. + The default empty value means tcp. @@ -876,7 +1211,12 @@ should have both preceding and trailing slashes, for example /path/ A random piece of data. This is used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a -random piece of data using a cryptographically secure random generator. +random piece of data using a cryptographically secure random generator. This +configuration parameter may be omitted, in which case the registry will automatically +generate a secret at launch. +

+WARNING: If you are building a cluster of registries behind a load balancer, you MUST +ensure the secret is the same for all registries. @@ -927,16 +1267,17 @@ and proxy connections to the registry server. An array of absolute paths to a x509 CA file - + ### debug -The `debug` option is **optional** . Use it to configure a debug server that can -be helpful in diagnosing problems. Contributors to the distribution repository -should find the debug server useful. Docker recommends disabling it in -production environments. +The `debug` option is **optional** . Use it to configure a debug server that +can be helpful in diagnosing problems. The debug endpoint can be used for +monitoring registry metrics and health, as well as profiling. Sensitive +information may be available via the debug endpoint. Please be certain that +access to the debug endpoint is locked down in a production environment. The `debug` section takes a single, required `addr` parameter. This parameter specifies the `HOST:PORT` on which the debug server should accept connections. @@ -944,17 +1285,15 @@ specifies the `HOST:PORT` on which the debug server should accept connections. ## notifications -```yaml -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 -``` + notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 The notifications option is **optional** and currently may contain a single option, `endpoints`. @@ -977,7 +1316,7 @@ Endpoints is a list of named services (URLs) that can accept event notifications yes -A human readable name for the service. +A human readable name for the service. @@ -996,12 +1335,12 @@ A boolean to enable/disable notifications for a service. url - yes + yes The URL to which events should be published. - + headers @@ -1012,7 +1351,7 @@ The URL to which events should be published. Static headers to add to each request. - + timeout @@ -1024,16 +1363,16 @@ The URL to which events should be published. An HTTP timeout value. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:

    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds. - + threshold @@ -1044,7 +1383,7 @@ The URL to which events should be published. An integer specifying how long to wait before backing off a failure. - + backoff @@ -1057,34 +1396,32 @@ The URL to which events should be published. integer and an optional suffix indicating the unit of time. Possible units are:
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • +
  • ns (nanoseconds)
  • +
  • us (microseconds)
  • +
  • ms (milliseconds)
  • +
  • s (seconds)
  • +
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds. - + ## redis -```yaml -redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` + redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s Declare parameters for constructing the redis connections. Registry instances may use the Redis instance for several applications. The current purpose is @@ -1141,7 +1478,7 @@ with the [pool](#pool) subsection. Timeout for connecting to a redis instance. - + readtimeout @@ -1152,7 +1489,7 @@ with the [pool](#pool) subsection. Timeout for reading from redis connections. - + writetimeout @@ -1163,18 +1500,16 @@ with the [pool](#pool) subsection. Timeout for writing to redis connections. - + ### pool -```yaml -pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s Configure the behavior of the Redis connection pool. @@ -1218,7 +1553,7 @@ Configure the behavior of the Redis connection pool. sets the amount time to wait before closing inactive connections. - + @@ -1226,27 +1561,25 @@ Configure the behavior of the Redis connection pool. The following is a simple example you can use for local development: -```yaml -version: 0.1 -log: - level: debug -storage: - filesystem: - rootdirectory: /tmp/registry-dev -http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -``` + version: 0.1 + log: + level: debug + storage: + filesystem: + rootdirectory: /var/lib/registry + http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 The above configures the registry instance to run on port `5000`, binding to -`localhost`, with the `debug` server enabled. Registry data storage is in the -`/tmp/registry-dev` directory. Logging is in `debug` mode, which is the most +`localhost`, with the `debug` server enabled. Registry data storage is in the +`/var/lib/registry` directory. Logging is in `debug` mode, which is the most verbose. A similar simple configuration is available at -[config.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config.yml). +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). Both are generally useful for local development. @@ -1254,7 +1587,7 @@ Both are generally useful for local development. This example illustrates how to configure storage middleware in a registry. Middleware allows the registry to serve layers via a content delivery network -(CDN). This is useful for reducing requests to the storage layer. +(CDN). This is useful for reducing requests to the storage layer. Currently, the registry supports [Amazon Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in @@ -1275,34 +1608,31 @@ conjunction with the S3 storage driver. options: - + A set of key/value options to configure the middleware.
  • baseurl: The Cloudfront base URL.
  • privatekey: The location of your AWS private key on the filesystem.
  • keypairid: The ID of your Cloudfront keypair.
  • -
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • -
+
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • + The following example illustrates these values: -``` -middleware: - storage: - - name: cloudfront - disabled: false - options: - baseurl: http://d111111abcdef8.cloudfront.net - privatekey: /path/to/asecret.pem - keypairid: asecret - duration: 60 -``` + middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 >**Note**: Cloudfront keys exist separately to other AWS keys. See ->[the documentation on AWS credentials](http://docs.aws.amazon.com/AWSSecurityCredentials/1.0/AboutAWSCredentials.html#KeyPairs) +>[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) >for more information. - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md index 10bf6b8112f7..a6a8e3869695 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md @@ -1,571 +1,180 @@ - - + # Deploying a registry server -This section explains how to deploy a Docker Registry either privately -for your own company or publicly for other users. For example, your company may -require a private registry to support your continuous integration (CI) system as -it builds new releases or test servers. Alternatively, your company may have a -large number of products or services with images you wish to serve in a branded -manner. - -Docker's public registry maintains a default `registry` image to assist you in the -deployment process. This registry image is sufficient for running local tests -but is insufficient for production. For production you should configure and -build your own custom registry image from the `docker/distribution` code. - ->**Note**: The examples on this page were written and tested using Ubuntu 14.04. ->If you are running Docker in a different OS, you may need to "translate" ->the commands to meet the requirements of your own environment. - - -## Simple example with the official image - -In this section, you create a container running Docker's official registry -image. You push an image to, and then pull the same image from, this registry. -This a good exercise for understanding the basic interactions a client has with -a local registry. - -1. Install Docker. - -2. Run the `hello-world` image from the Docker public registry. - - $ docker run hello-world - - The `run` command automatically pulls a `hello-world` image from Docker's - official images. - -3. Start a registry on your localhost. - - $ docker run -p 5000:5000 registry:2.0 - - This starts a registry on your `DOCKER_HOST` running on port `5000`. - -3. List your images. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - registry 2.0 bbf0b6ffe923 3 days ago 545.1 MB - golang 1.4 121a93c90463 5 days ago 514.9 MB - hello-world latest e45a5af57b00 3 months ago 910 B - - Your list should include a `hello-world` image from the earlier run. - -4. Retag the `hello-world` image for your local repoistory. - - $ docker tag hello-world:latest localhost:5000/hello-mine:latest - - The command labels a `hello-world:latest` using a new tag in the - `[REGISTRYHOST/]NAME[:TAG]` format. The `REGISTRYHOST` is this case is - `localhost`. In a Mac OSX environment, you'd substitute `$(boot2docker - ip):5000` for the `localhost`. - -5. List your new image. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - registry 2.0 bbf0b6ffe923 3 days ago 545.1 MB - golang 1.4 121a93c90463 5 days ago 514.9 MB - hello-world latest e45a5af57b00 3 months ago 910 B - localhost:5000/hello-mine latest ef5a5gf57b01 3 months ago 910 B - - You should see your new image in your listing. - -6. Push this new image to your local registry. - - $ docker push localhost:5000/hello-mine:latest - The push refers to a repository [localhost:5000/hello-mine] (len: 1) - e45a5af57b00: Image already exists - 31cbccb51277: Image successfully pushed - 511136ea3c5a: Image already exists - Digest: sha256:a1b13bc01783882434593119198938b9b9ef2bd32a0a246f16ac99b01383ef7a - -7. Use the `curl` command and the Docker Registry API v2 to list your - image in the registry: - - $ curl -v -X GET http://localhost:5000/v2/hello-mine/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - > GET /v2/hello-mine/tags/list HTTP/1.1 - > User-Agent: curl/7.35.0 - > Host: localhost:5000 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Sun, 12 Apr 2015 01:29:47 GMT - < Content-Length: 40 - < - {"name":"hello-mine","tags":["latest"]} - * Connection #0 to host localhost left intact - - You can also get this information by entering the - `http://localhost:5000/v2/hello-mine/tags/list` address in your browser. - -8. Remove all the unused images from your local environment: - - $ docker rmi -f $(docker images -q -a ) - - This command is for illustrative purposes; removing the image forces any `run` - to pull from a registry rather than a local cache. If you run `docker images` - after this you should not see any instance of `hello-world` or `hello-mine` in - your images list. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - registry 2.0 bbf0b6ffe923 3 days ago 545.1 MB - golang 1.4 121a93c90463 5 days ago 514.9 MB - -9. Try running `hello-mine`. - - $ docker run hello-mine - Unable to find image 'hello-mine:latest' locally - Pulling repository hello-mine - FATA[0001] Error: image library/hello-mine:latest not found - - The `run` command fails because your new image doesn't exist in the Docker public - registry. - -10. Now, try running the image but specifying the image's registry: - - $ docker run localhost:5000/hello-mine - - If you run `docker images` after this you'll fine a `hello-mine` instance. - -### Making Docker's official registry image production ready - -Docker's official image is for simple tests or debugging. Its configuration is -unsuitable for most production instances. For example, any client with access to -the server's IP can push and pull images to it. See the next section for -information on making this image production ready. - -## Understand production deployment - -When deploying a registry for a production deployment you should consider these -factors: - - - - - - - - - - - - - - - - - - -
    - backend storage - - Where should you store the images? -
    - access and/or authentication - - Should users have full or controlled access? This can depend on whether - you are serving images to the public or internally to your company only. -
    - debugging - - When problems or issues arise, do you have the means of solving them. Logs - are useful as is reporting to see trends. -
    - caching - - Quickly retrieving images can be crucial if you are relying on images for - tests, builds, or other automated systems. -
    - -You can configure your registry features to adjust for these factors. You do -this by specifying options on the command line or, more typically, by writing a -registry configuration file. The configuration file is in YAML format. - -Docker's official repository image is preconfigured using the following -configuration file: - -```yaml -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development -storage: - cache: - layerinfo: inmemory - filesystem: - rootdirectory: /tmp/registry-dev - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-8082 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true +You need to [install Docker version 1.6.0 or newer](https://docs.docker.com/installation/). + +## Running on localhost + +Start your registry: + + docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +You can now use it with docker. + +Get any image from the hub and tag it to point to your registry: + + docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu + +... then push it to your registry: + + docker push localhost:5000/ubuntu + +... then pull it back from your registry: + + docker pull localhost:5000/ubuntu + +To stop your registry, you would: + + docker stop registry && docker rm -v registry + +## Storage + +By default, your registry data is persisted as a [docker volume](https://docs.docker.com/userguide/dockervolumes/) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. + +Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/data:/var/lib/registry \ + registry:2 + +### Alternatives + +You should usually consider using [another storage backend](https://github.com/docker/distribution/blob/master/docs/storagedrivers.md) instead of the local filesystem. Use the [storage configuration options](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) to configure an alternate storage backend. + +Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. + +## Running a domain registry + +While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. + +### Get a certificate + +Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. + +Move and/or rename your crt file to: `certs/domain.crt` - and your key file to: `certs/domain.key`. + +Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to access your registry from another docker host: + + docker pull ubuntu + docker tag ubuntu myregistrydomain.com:5000/ubuntu + docker push myregistrydomain.com:5000/ubuntu + docker pull myregistrydomain.com:5000/ubuntu + +#### Gotcha + +A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: + + cat server.crt intermediate-certificates.pem > certs/domain.crt + +### Alternatives + +While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). + +## Restricting access + +Except for registries running on secure local networks, registries should always implement access restrictions. + +### Native basic auth + +The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). + +:warning: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. + +First create a password file with one entry for the user "testuser", with password "testpassword": + + mkdir auth + docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd + +Make sure you stopped your registry from the previous step, then start it again: + + docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/auth:/auth \ + -e "REGISTRY_AUTH=htpasswd" \ + -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ + -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ + -v `pwd`/certs:/certs \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ + registry:2 + +You should now be able to: + + docker login myregistrydomain.com:5000 + +And then push and pull images as an authenticated user. + +### Alternatives + +1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find an example of such design in the [nginx proxy documentation](nginx.md). + +2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only make sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. + +You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). + +Beware that you will have to implement your own authentication service for this to work. + +## Managing with Compose + +As your registry configuration grows more complex, dealing with it can quickly become tedious. + +It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate operating your registry. + +Here is a simple `docker-compose.yml` example that condenses everything explained so far: + +``` +registry: + restart: always + image: registry:2 + ports: + - 5000:5000 + environment: + REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt + REGISTRY_HTTP_TLS_KEY: /certs/domain.key + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /var/lib/registry + REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd + REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm + volumes: + - /path/data:/var/lib/registry + - /path/certs:/certs + - /path/auth:/auth ``` -This configuration is very basic and you can see it would present some problems -in a production. For example, the `http` section details the configuration for -the HTTP server that hosts the registry. The server is not using even the most -minimal transport layer security (TLS). Let's configure that in the next section. - -## Configure TLS on a registry server - -In this section, you configure TLS on the server to enable communication through -the `https` protocol. Enabling TLS on the server is the minimum layer of -security recommended for running a registry behind a corporate firewall. One way -to do this is to build your own registry image. - -### Download the source and generate certificates - -1. [Download the registry -source](https://github.com/docker/distribution/releases/tag/v2.0.0). - - Alternatively, use the `git clone` command if you are more comfortable with that. - -2. Unpack the the downloaded package into a local directory. - - The package creates a `distribution` directory. - -3. Change to the root of the new `distribution` directory. - - $ cd distribution - -4. Make a `certs` subdirectory. - - $ mkdir certs - -5. Use SSL to generate some self-signed certificates. - - $ openssl req \ - -newkey rsa:2048 -nodes -keyout certs/domain.key \ - -x509 -days 365 -out certs/domain.crt - - This command prompts you for basic information it needs to create the certificates. - -6. List the contents of the `certs` directory. - - $ ls certs - domain.crt domain.key - - When you build this container, the `certs` directory and its contents - automatically get copied also. - -### Add TLS to the configuration - -The `distribution` repo includes sample registry configurations in the `cmd` -subdirectory. In this section, you edit one of these configurations to add TLS -support. - -1. Edit the `./cmd/registry/config.yml` file. - - $ vi ./cmd/registry/config.yml - -2. Locate the `http` block. - - http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - -3. Add a `tls` block for the server's self-signed certificates: - - http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - tls: - certificate: /go/src/github.com/docker/distribution/certs/domain.crt - key: /go/src/github.com/docker/distribution/certs/domain.key - - You provide the paths to the certificates in the container. If you want - two-way authentication across the layer, you can add an optional `clientcas` - section. - -4. Save and close the file. - - -### Build and run your registry image - -1. Build your registry image. - - $ docker build -t secure_registry . - -2. Run your new image. - - $ docker run -p 5000:5000 secure_registry:latest - time="2015-04-12T03:06:18.616502588Z" level=info msg="endpoint local-8082 disabled, skipping" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.617012948Z" level=info msg="endpoint local-8083 disabled, skipping" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.617190113Z" level=info msg="using inmemory layerinfo cache" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.617349067Z" level=info msg="listening on :5000, tls" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.628589577Z" level=info msg="debug server listening localhost:5001" - 2015/04/12 03:06:28 http: TLS handshake error from 172.17.42.1:44261: remote error: unknown certificate authority - - Watch the messages at startup. You should see that `tls` is running. - -3. Use `curl` to verify that you can connect over `https`. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -## Configure Nginx with a v1 and v2 registry - -This sections describes how to user `docker-compose` to run a combined version -1 and version 2.0 registry behind an `nginx` proxy. The combined registry is -accessed at `localhost:5000`. If a `docker` client has a version less than 1.6, -Nginx will route its requests to the 1.0 registry. Requests from newer clients -will route to the 2.0 registry. - -This procedure uses the same `distribution` directory you created in the last -procedure. The directory includes an example `compose` configuration. - -### Install Docker Compose - -1. Open a new terminal on the host with your `distribution` directory. - -2. Get the `docker-compose` binary. - - $ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose - - This command installs the binary in the `/usr/local/bin` directory. - -3. Add executable permissions to the binary. - - $ sudo chmod +x /usr/local/bin/docker-compose - - -### Do some housekeeping - -1. Remove any previous images. - - $ docker rmi -f $(docker images -q -a ) - - This step is a house keeping step. It prevents you from mistakenly picking up - an old image as you work through this example. - -2. Edit the `distribution/cmd/registry/config.yml` file and remove the `tls` block. - - If you worked through the previous example, you'll have a `tls` block. - -4. Save any changes and close the file. - -### Configure SSL - -1. Change to the `distribution/contrib/compose/nginx` directory. - - This directory contains configuration files for Nginx and both registries. - -2. Use SSL to generate some self-signed certificates. - - $ openssl req \ - -newkey rsa:2048 -nodes -keyout domain.key \ - -x509 -days 365 -out domain.crt - - This command prompts you for basic information it needs to create certificates. - -3. Edit the `Dockerfile`and add the following lines. - - COPY domain.crt /etc/nginx/domain.crt - COPY domain.key /etc/nginx/domain.key - - When you are done, the file looks like the following. - - FROM nginx:1.7 - - COPY nginx.conf /etc/nginx/nginx.conf - COPY registry.conf /etc/nginx/conf.d/registry.conf - COPY docker-registry.conf /etc/nginx/docker-registry.conf - COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf - COPY domain.crt /etc/nginx/domain.crt - COPY domain.key /etc/nginx/domain.key - -4. Save and close the `Dockerfile` file. - -5. Edit the `registry.conf` file and add the following configuration. - - ssl on; - ssl_certificate /etc/nginx/domain.crt; - ssl_certificate_key /etc/nginx/domain.key; - - This is an `nginx` configuration file. - -6. Save and close the `registry.conf` file. - -### Build and run - -1. Go up to the `distribution/contrib/compose` directory - - This directory includes a single `docker-compose.yml` configuration. - - nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 - registryv1: - image: registry - ports: - - "5000" - registryv2: - build: "../../" - ports: - - "5000" - - This configuration builds a new `nginx` image as specified by the - `nginx/Dockerfile` file. The 1.0 registry comes from Docker's official public - image. Finally, the registry 2.0 image is built from the - `distribution/Dockerfile` you've used previously. - -2. Get a registry 1.0 image. - - $ docker pull registry:0.9.1 - - The Compose configuration looks for this image locally. If you don't do this - step, later steps can fail. - -3. Build `nginx`, the registry 2.0 image, and - - $ docker-compose build - registryv1 uses an image, skipping - Building registryv2... - Step 0 : FROM golang:1.4 - - ... - - Removing intermediate container 9f5f5068c3f3 - Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf - ---> 74acc70fa106 - Removing intermediate container edb84c2b40cb - Successfully built 74acc70fa106 - - The commmand outputs its progress until it completes. - -4. Start your configuration with compose. - - $ docker-compose up - Recreating compose_registryv1_1... - Recreating compose_registryv2_1... - Recreating compose_nginx_1... - Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 - ... - - -5. In another terminal, display the running configuration. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 - 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 - aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 - -### Explore a bit - -1. Check for TLS on your `nginx` server. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -2. Tag the `v1` registry image. - - $ docker tag registry:latest localhost:5000/registry_one:latest - -2. Push it to the localhost. - - $ docker push localhost:5000/registry_one:latest - - If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. - -4. Use `curl` to list the image in the registry. - - $ curl -v -X GET http://localhost:32777/v2/registry_one/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 32777 (#0) - > GET /v2/registry_one/tags/list HTTP/1.1 - > User-Agent: curl/7.36.0 - > Host: localhost:32777 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Tue, 14 Apr 2015 22:34:13 GMT - < Content-Length: 39 - < - {"name":"registry1","tags":["latest"]} - * Connection #0 to host localhost left intact - - This example refers to the specific port assigned to the 2.0 registry. You saw - this port earlier, when you used `docker ps` to show your running containers. +:warning: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. + +You can then start your registry with a simple + + docker-compose up -d + +## Next + +You will find more specific and advanced informations in the following sections: + + - [Configuration reference](configuration.md) + - [Working with notifications](notifications.md) + - [Registry API](spec/api.md) + - [Storage driver model](storagedrivers.md) + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/distribution.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/distribution.md deleted file mode 100644 index bad7362f5253..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/distribution.md +++ /dev/null @@ -1,17 +0,0 @@ -# Project - -## Contents -- [Docker Registry 2.0](index.md) -- [Architecture](architecture.md) -- [Build the development environment](building.md) -- [Configure a registry](configuration.md) -- [Deploying a registry server](deploying.md) -- [Microsoft Azure storage driver](storage-drivers/azure.md) -- [Filesystem storage driver](storage-drivers/filesystem.md) -- [In-memory storage driver](storage-drivers/inmemory.md) -- [S3 storage driver](storage-drivers/s3.md) -- [Notifications](notifications.md) -- [Docker Registry HTTP API V2](spec/api.md) -- [Docker Registry v2 authentication via central service](spec/auth/token.md) -- [Docker Distribution JSON Canonicalization](spec/json.md) -- [Docker-Registry Storage Driver](storagedrivers.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md index 95c8ec9e5de2..fbe502cc4853 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md @@ -1,43 +1,70 @@ + + # Glossary -This page contains distribution related terms. For a complete Docker glossary, -see the [glossary in the full documentation set](http://docs.docker.com/reference/glossary/). +This page contains definitions for distribution related terms.
    -
    Blob
    +

    Blob

    - The primary unit of registry storage. A string of bytes identified by - content-address, known as a _digest_. +
    A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
    +

    + Layers are a good example of "blobs". +

    -
    Image
    -
    An image is a collection of content from which a docker container can be created.
    +

    Image

    +
    +
    An image is a named set of immutable data from which a Docker container can be created.
    +

    + An image is represented by a json file called a manifest, and is conceptually a set of layers. + + Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. + +

    +
    -
    Layer
    +

    Layer

    - A tar file representing the partial content of a filesystem. Several - layers can be "stacked" to make up the root filesystem. +
    A layer is a tar archive bundling partial content from a filesystem.
    +

    + Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. +

    -
    Manifest
    -
    Describes a collection layers that make up an image.
    +

    Manifest

    +
    A manifest is the JSON representation of an image.
    -
    Registry
    -
    A registry is a service which serves repositories.
    +

    Namespace

    +
    A namespace is a collection of repositories with a common name prefix.
    +

    + The namespace with an empty prefix is considered the Global Namespace. +

    +
    + +

    Registry

    +
    A registry is a service that let you store and deliver images.
    +
    -
    Repository
    +

    Repository

    - A repository is a collection of docker images, made up of manifests, tags - and layers. The base unit of these components are blobs. +
    A repository is a set of data containing all versions of a given image.
    -
    Tag
    -
    Tag provides a common name to an image.
    +

    Scope

    +
    A scope is the portion of a namespace onto which a given authorization token is granted.
    -
    Namespace
    -
    A namespace is a collection of repositories with a common name prefix. The - namespace with an empty common prefix is considered the Global Namespace.
    +

    Tag

    +
    A tag is conceptually a "version" of a named image.
    +

    + Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". +

    + +
    + -
    Scope
    -
    A common repository name prefix.
    diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md new file mode 100644 index 000000000000..8deb6a14219a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md @@ -0,0 +1,24 @@ + + +# Getting help + +If you need help, or just want to chat, you can reach us: + +- on irc: `#docker-distribution` on freenode +- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) + +If you want to report a bug: + +- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) +- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) + +You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/project/get-help). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md index f8f9c8b060a8..22ccd68d9007 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md @@ -1,68 +1,66 @@ - + -# Docker Registry 2.0 +# Docker Registry -Docker Registry stores and distributes images centrally. It's where you push images to and pull them from; Docker Registry gives team members the ability to share images and deploy them to testing, staging and production environments. +## What it is -Docker provides a hosted registry as part of [Docker Hub](https://hub.docker.com). Docker Hub is a cloud service that securely manages your images. It features organization accounts, automated builds, and much, much more. +The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. +The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). -Docker Registry is the core technology behind the Docker Hub. You can run your own registry instance if you want to host your images privately. Docker Registry features: +## Why use it - - **Pluggable storage drivers**: Images can be stored in Amazon S3, Microsoft Azure or the local filesystem. - - **Webhook notifications**: When an image is pushed to your registry, webhooks can fire off to launch CI builds, send notifications to IRC, etc. +You should use the Registry if you want to: -To get started with your own Docker Registry, head over to the instructions on how to [deploy a registry](deploying.md). + * tightly control where your images are being stored + * fully own your images distribution pipeline + * integrate image storage and distribution tightly into your in-house development workflow -## Understanding the registry +## Alternatives -A registry is, at its heart, a collection of repositories. In turn, a repository -is collection of images. Users interact with the registry by pushing images to -or pulling images from the registry. The Docker Registry includes several -optional features that you can configure according to your needs. +Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). -![](images/registry.png) +Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). -The architecture supports a configurable storage backend. You can store images -on a file system or on a service such as Amazon S3 or Microsoft Azure. The -default storage system is the local disk; this is suitable for development or -some small deployments. +## Requirements -Securing access to images is a concern for even the simplest deployment. The -registry service supports transport layer security (TLS) natively. You must -configure it in your instance to make use of it. You can also use a proxy server -such as Nginx and basic authentication to extend the security of a deployment. +The Registry is compatible with Docker engine **version 1.6.0 or higher**. +If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry) -The registry repository includes reference implementations for additional -authentication and authorization support. Only very large or public registry -deployments are expected to extend the registry in this way. +## TL;DR -Docker Registry architecture includes a robust notification system. This system -sends webhook notifications in response to registry activity. The registry also -includes features for both logging and reporting as well. Reporting is useful -for large installations that want to collect metrics. Currently, the feature -supports both New Relic and Bugsnag. +Start your registry -## Getting help + docker run -d -p 5000:5000 --name registry registry:2 -Docker Registry is an open source project and is under active development. If -you need help, would like to contribute, or simply want to talk about the -project with like-minded individuals, we have a number of open channels for -communication. +Pull (or build) some image from the hub -- To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/distribution/issues). -- To talk about the project please post a message to the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) or join the `#docker-distribution` channel on IRC. -- To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/distribution/pulls). + docker pull ubuntu -For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/project/get-help/). +Tag the image so that it points to your registry -## Registry documentation + docker tag ubuntu localhost:5000/myfirstimage - - [Deploying a registry](deploying.md) - - [Configure a registry](configuration.md) - - [Storage driver model](storagedrivers.md) - - [Working with notifications](notifications.md) - - [Registry API v2](spec/api.md) +Push it + + docker push localhost:5000/myfirstimage + +Pull it back + + docker pull localhost:5000/myfirstimage + +Now stop your registry and remove all data + + docker stop registry && docker rm -v registry + +## Next + +You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). + \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md new file mode 100644 index 000000000000..3ae0c3c149d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md @@ -0,0 +1,70 @@ + + +# Insecure Registry + +While it's highly recommended to secure your registry using a TLS certificate issued by a known CA, you may alternatively decide to use self-signed certificates, or even use your registry over plain http. + +You have to understand the downsides in doing so, and the extra burden in configuration. + +## Deploying a plain HTTP registry + +> :warning: it's not possible to use an insecure registry with basic authentication + +This basically tells Docker to entirely disregard security for your registry. + +1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`) +2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` + +**Pros:** + + - easy to configure + +**Cons:** + + - very insecure + - you have to configure every docker daemon that wants to access your registry + +## Using self-signed certificates + +> :warning: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker + +Generate your own certificate: + + mkdir -p certs && openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ + -x509 -days 365 -out certs/domain.crt + +Be sure to use the name `myregistrydomain.com` as a CN. + +Stop and restart your registry. + +Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt` (don't forget to restart docker after doing so). + +Stop and restart all your docker daemons. + +**Pros:** + + - more secure than the insecure registry solution + +**Cons:** + + - you have to configure every docker daemon that wants to access your registry + +## Failing... + +Failing to configure docker and trying to pull from a registry that is not using TLS will result in the following message: + +``` +FATA[0000] Error response from daemon: v1 ping attempt failed with error: +Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. +If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add +`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. +In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; +simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt +``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md new file mode 100644 index 000000000000..1ea7115e568f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md @@ -0,0 +1,55 @@ + + +# Understanding the Registry + +A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. + + > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. + +Users interact with a registry by using docker push and pull commands. + + > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. + +Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, Ceph Rados, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). + +Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. + +The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. + +Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. + +## Understanding image naming + +Image names as used in typical docker commands reflect their origin: + + * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull registry-1.docker.io/library/ubuntu` command + * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` + +You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). + +## Use cases + +Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. + +It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. + +Finally, it's the best way to distribute images inside an isolated network. + +## Requirements + +You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. + +Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. + +## Next + +Dive into [deploying your registry](deploying.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md index be3f02bbed04..5dbd766f13c2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md @@ -1,3 +1,9 @@ + + # Migrating a 1.0 registry to 2.0 TODO: This needs to be revised in light of Olivier's work diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md new file mode 100644 index 000000000000..5a928c140b3f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md @@ -0,0 +1,68 @@ + + +# Registry as a pull through cache + +A v2 Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. + +## Why? + +If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the image fetch traffic on your local network. + +## How does it work? + +The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. + +## What if the content changes on the Hub? + +When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. + +## What about my disk? + +In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. + +To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. + +## Running a Registry as a pull through cache + +The easiest way to run a registry as a pull through cache is to run the official Registry pull through cache official image. + +Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. + +### Configuring the cache + +To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. + +In order to access private images on the Docker Hub the username and password can be supplied. + +``` +proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] +``` + + + +## Configuring the Docker daemon + +You will need to pass the `--registry-mirror` option to your Docker daemon on startup: + +``` +docker --registry-mirror=https:// -d +``` + +For example, if your mirror is serving on http://10.0.0.2:5000, you would run: + +``` +docker --registry-mirror=https://10.0.0.2:5000 -d +``` + +NOTE: Depending on your local host setup, you may be able to add the --registry-mirror options to the `DOCKER_OPTS` variable in `/etc/default/` docker. + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml b/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml index 8446582068fa..07bab4ecf4f3 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml @@ -1,13 +1,18 @@ - ['registry/index.md', 'Reference', 'Docker Registry 2.0'] +- ['registry/introduction.md', 'Reference', '    ▪  Introduction' ] - ['registry/deploying.md', 'Reference', '    ▪  Deploy a registry' ] - ['registry/configuration.md', 'Reference', '    ▪  Configure a registry' ] +- ['registry/authentication.md', 'Reference', '    ▪  Authentication' ] +- ['registry/glossary.md', 'Reference', '    ▪  Glossary' ] +- ['registry/help.md', 'Reference', '    ▪  Getting help' ] - ['registry/storagedrivers.md', 'Reference', '    ▪  Storage driver model' ] - ['registry/notifications.md', 'Reference', '    ▪  Work with notifications' ] - ['registry/spec/api.md', 'Reference', '    ▪  Registry Service API v2' ] -- ['registry/spec/json.md', 'Reference', '    ▪  JSON format' ] -- ['registry/spec/auth/token.md', 'Reference', '    ▪  Authenticate via central service' ] +- ['registry/spec/json.md', '**HIDDEN**'] +- ['registry/spec/auth/token.md', '**HIDDEN**'] - ['registry/storage-drivers/azure.md', '**HIDDEN**' ] - ['registry/storage-drivers/filesystem.md', '**HIDDEN**' ] - ['registry/storage-drivers/inmemory.md', '**HIDDEN**' ] +- ['registry/storage-drivers/rados.md', '**HIDDEN**' ] - ['registry/storage-drivers/s3.md','**HIDDEN**' ] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md new file mode 100644 index 000000000000..b512ec7d0f8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md @@ -0,0 +1,143 @@ + + +# Authenticating proxy with nginx + +With this method, you implement basic authentication in a reverse proxy that sits in front of your registry. + +While this model gives you the ability to use whatever authentication backend you want through a secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. + +Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues (typically, nginx does buffer client requests to disk, opening the door to a host of problems if you are dealing with huge images and a lot of traffic). + +### Requirements + +You should have followed entirely the basic [deployment guide](deploying.md). + +If you have not, please take the time to do so. + +At this point, it's assumed that: + + * you understand Docker security requirements, and how to configure your docker engines properly + * you have installed Docker Compose + * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates + * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` (or whatever domain name you want to use) + * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) + +### Setting things up + +Read again the requirements. + +Ready? + +Run the following: + +``` +mkdir -p auth +mkdir -p data + +# This is the main nginx configuration you will use +cat < auth/registry.conf +upstream docker-registry { + server registry:5000; +} + +server { + listen 443 ssl; + server_name myregistrydomain.com; + + # SSL + ssl_certificate /etc/nginx/conf.d/domain.crt; + ssl_certificate_key /etc/nginx/conf.d/domain.key; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + auth_basic "Registry realm"; + auth_basic_user_file /etc/nginx/conf.d/htpasswd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + proxy_pass http://docker-registry; + proxy_set_header Host \$http_host; # required for docker client's sake + proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$scheme; + proxy_read_timeout 900; + } +} +EOF + +# Now, create a password file for "testuser" and "testpassword" +htpasswd -bn testuser testpassword > auth/htpasswd + +# Copy over your certificate files +cp domain.crt auth +cp domain.key auth + +# Now create your compose file + +cat < docker-compose.yml +nginx: + image: "nginx:1.9" + ports: + - 5043:443 + links: + - registry:registry + volumes: + - `pwd`/auth/:/etc/nginx/conf.d + +registry: + image: registry:2 + ports: + - 127.0.0.1:5000:5000 + environment: + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /data + volumes: + - `pwd`/data:/data +EOF +``` + +### Starting and stopping + +That's it. You can now: + + * `docker-compose up -d` to start your registry + * `docker login myregistrydomain.com:5043` (using `testuser` and `testpassword`) + * `docker tag ubuntu myregistrydomain.com:5043/toto` + * `docker push myregistrydomain.com:5043/toto` + +### Docker still complains about the certificate? + +That's certainly because you are using a self-signed certificate, despite the warnings. + +If you really insist on using these, you have to trust it at the OS level. + +Usually, on Ubuntu this is done with: + + cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt + update-ca-certificates + +... and on RedHat with: + + cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt + update-ca-trust + +Now: + + * `service docker stop && service docker start` (or any other way you use to restart docker) + * `docker-compose up -d` to bring your registry up diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md index fb5897fc9abe..9167cacb64f3 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md @@ -1,9 +1,13 @@ - - + # Notifications @@ -17,7 +21,7 @@ queues and dispatches events to [_Endpoints_](#endpoints). ## Endpoints -Notifications are sent to _endpoints_ via HTTP requests. Each configurated +Notifications are sent to _endpoints_ via HTTP requests. Each configured endpoint has isolated queues, retry configuration and http targets within each instance of a registry. When an action happens within the registry, it is converted into an event which is dropped into an inmemory queue. When the @@ -30,20 +34,18 @@ order is not guaranteed. To setup a registry instance to send notifications to endpoints, one must add them to the configuration. A simple example follows: -```yaml -notifications: - endpoints: - - name: alistener - url: https://mylistener.example.com/event - headers: - Authorization: [Bearer ] - timeout: 500ms - threshold: 5 - backoff: 1s -``` + notifications: + endpoints: + - name: alistener + url: https://mylistener.example.com/event + headers: + Authorization: [Bearer ] + timeout: 500ms + threshold: 5 + backoff: 1s The above would configure the registry with an endpoint to send events to -"https://mylistener.example.com/event", with the header "Authorization: Bearer +`https://mylistener.example.com/event`, with the header "Authorization: Bearer ". The request would timeout after 500 milliseconds. If 5 failures happen consecutively, the registry will backoff for 1 second before trying again. @@ -80,8 +82,9 @@ manifest: "action": "push", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, + "size": 1, "digest": "sha256:0123456789abcdef0", + "length": 1, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, @@ -101,6 +104,11 @@ manifest: } ``` +> __NOTE(stevvooe):__ As of version 2.1, the `length` field for event targets +> is being deprecated for the `size` field, bringing the target in line with +> common nomenclature. Both will continue to be set for the foreseeable +> future. Newer code should favor `size` but accept either. + ## Envelope The envelope contains one or more events, with the following json structure: @@ -233,7 +241,7 @@ several failures and have since recovered: "notifications":{ "endpoints":[ { - "name":"local-8082", + "name":"local-5003", "url":"http://localhost:5003/callback", "Headers":{ "Authorization":[ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md index cd9a24298f3d..c924d4571bbe 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md @@ -1,3 +1,9 @@ + + # OS X Setup Guide This guide will walk you through running the new Go based [Docker registry](https://github.com/docker/distribution) on your local OS X machine. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md index 6cc2345ca146..81450657f05f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md @@ -1,8 +1,12 @@ - + # Docker Registry HTTP API V2 @@ -115,33 +119,62 @@ This section should be updated when changes are made to the specification, indicating what is different. Optionally, we may start marking parts of the specification to correspond with the versions enumerated here. +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. +
    -
    2.0.1
    -
    -
      -
    • Added capability of doing streaming upload to PATCH blob upload.
    • -
    • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
    • -
    • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
    • -
    -
    - -
    2.0.0
    -
    -
      -
    • Added support for immutable manifest references in manifest endpoints.
    • -
    • Deleting a manifest by tag has been deprecated.
    • -
    • Specified `Docker-Content-Digest` header for appropriate entities.
    • -
    • Added error code for unsupported operations.
    • -
    • Added capability of doing streaming upload to PATCH blob upload.
    • -
    • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
    • -
    • Removed 416 return code from PUT blob upload.
    • -
    -
    - -
    2.0
    -
    - This is the baseline specification. -
    +
    f
    +
    +
      +
    • Specify the delete API for layers and manifests.
    • +
    +
    + +
    e
    +
    +
      +
    • Added support for listing registry contents.
    • +
    • Added pagination to tags API.
    • +
    • Added common approach to support pagination.
    • +
    +
    + +
    d
    +
    +
      +
    • Allow repository name components to be one character.
    • +
    • Clarified that single component names are allowed.
    • +
    +
    + +
    c
    +
    +
      +
    • Added section covering digest format.
    • +
    • Added more clarification that manifest cannot be deleted by tag.
    • +
    +
    + +
    b
    +
    +
      +
    • Added capability of doing streaming upload to PATCH blob upload.
    • +
    • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
    • +
    • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
    • +
    +
    + +
    a
    +
    +
      +
    • Added support for immutable manifest references in manifest endpoints.
    • +
    • Deleting a manifest by tag has been deprecated.
    • +
    • Specified `Docker-Content-Digest` header for appropriate entities.
    • +
    • Added error code for unsupported operations.
    • +
    +
    ## Overview @@ -167,12 +200,11 @@ path component is less than 30 characters. The V2 registry API does not enforce this. The rules for a repository name are as follows: 1. A repository name is broken up into _path components_. A component of a - repository name must be at least two lowercase, alpha-numeric characters, + repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*` and the - matched result must be 2 or more characters in length. -2. The name of a repository must have at least two path components, separated - by a forward slash. + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). 3. The total length of a repository name, including slashes, must be less the 256 characters. @@ -241,6 +273,84 @@ When a `200 OK` or `401 Unauthorized` response is returned, the Clients may require this header value to determine if the endpoint serves this API. When this header is omitted, clients may fallback to an older API version. +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: + + digest := algorithm ":" hex + algorithm := /[A-Fa-f0-9_+.-]+/ + hex := /[A-Fa-f0-9]+/ + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | +tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | + +> __NOTE:__ While we show an example of using a `tarsum` digest, the security +> of tarsum has not been verified. It is recommended that most implementations +> use sha256 for interoperability. + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: + +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring _C_ passed into a function, _SHA256_, that returns a +bytestring B, which is the hash of _C_. _D_ gets the algorithm concatenated +with the hex encoding of _B_. We then define the identifier of _C_ to _ID(C)_ +as equal to _D_. A digest can be verified by independently calculating _D_ and +comparing it with identifier _ID(C)_ + +#### Digest Header + +To provide verification of http content, any response may include a `Docker- +Content-Digest` header. This will include the digest of the target entity +returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header `Docker- +> Content-Digest` should not be trusted over the "local" digest. + ### Pulling An Image An "image" is a combination of a JSON manifest and individual layer files. The @@ -609,6 +719,25 @@ Note that the upload url will not be available forever. If the upload uuid is unknown to the registry, a `404 Not Found` response will be returned and the client must restart the upload process. +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + #### Pushing an Image Manifest Once all of the layers for an image are uploaded, the client can upload the @@ -655,7 +784,131 @@ each unknown blob. The response format is as follows: ] } -#### Listing Image Tags +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been recieved. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with above value from the `Link` header, +receiving the values _c_ and _d_. Note that n may change on second to last +response or be omitted fully, if the server may so choose. + +### Listing Image Tags It may be necessary to list all of the tags under a given repository. The tags for an image repository can be retrieved with the following request: @@ -676,8 +929,51 @@ The response will be in the following format: } For repositories with a large number of tags, this response may be quite -large, so care should be taken by the client when parsing the response to -reduce copying. +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. ### Deleting an Image @@ -715,18 +1011,20 @@ corresponding responses, with success and failure, are enumerated. A list of methods and URIs are covered in the table below: |Method|Path|Entity|Description| --------|----|------|------------ +|------|----|------|-----------| | GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | | GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | | GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | | PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | | GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| POST | `/v2//blobs/uploads/` | Intiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | +| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | +| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | | GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | | PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | | PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | | DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | +| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | The detail for each endpoint is covered in the following sections. @@ -736,21 +1034,21 @@ The detail for each endpoint is covered in the following sections. The error codes encountered via the API are enumerated in the following table: |Code|Message|Description| --------|----|------|------------ - `UNKNOWN` | unknown error | Generic error returned when the error does not have an API classification. - `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. - `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. +|----|-------|-----------| + `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. + `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. - `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. - `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. - `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. - `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. - `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. + `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. + `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. - `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. - `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. - `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. + `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. + `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. + `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. + `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. + `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. @@ -796,7 +1094,6 @@ The API implements V2 protocol and is accessible. - ###### On Failure: Unauthorized ``` @@ -829,7 +1126,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -857,6 +1154,7 @@ Retrieve information about tags. Fetch the tags under the repository identified by `name`. +##### Tags ``` GET /v2//tags/list @@ -864,7 +1162,7 @@ Host: Authorization: ``` - +Return all tags for the repository The following parameters should be specified on the request: @@ -930,7 +1228,7 @@ The repository is not known to the registry. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | @@ -960,7 +1258,116 @@ The client does not have access to the repository. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| +| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | + + + +##### Tags Paginated + +``` +GET /v2//tags/list?n=&last= +``` + +Return a portion of the tags for the specified repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`name`|path|Name of the target repository.| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ], +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Unauthorized + +``` +401 Unauthorized +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have access to the repository. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -969,7 +1376,7 @@ The error codes that may be included in the response body are enumerated below: ### Manifest -Create, update and retrieve manifests. +Create, update, delete and retrieve manifests. @@ -995,7 +1402,7 @@ The following parameters should be specified on the request: |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| +|`reference`|path|Tag or digest of the target manifest.| @@ -1022,7 +1429,7 @@ Content-Type: application/json; charset=utf-8 } ``` -The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. +The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. The following headers will be returned with the response: @@ -1058,7 +1465,7 @@ The name or reference was invalid. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | @@ -1089,7 +1496,7 @@ The client does not have access to the repository. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -1119,7 +1526,7 @@ The named manifest is not known to the registry. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | @@ -1163,15 +1570,15 @@ The following parameters should be specified on the request: |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| +|`reference`|path|Tag or digest of the target manifest.| -###### On Success: Accepted +###### On Success: Created ``` -202 Accepted +201 Created Location: Content-Length: 0 Docker-Content-Digest: @@ -1215,7 +1622,7 @@ The received manifest was invalid in some way, as described by the error codes. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | | `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | @@ -1249,7 +1656,7 @@ The client does not have permission to push to the repository. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -1280,7 +1687,7 @@ One or more layers may be missing during a manifest upload. If so, the missing l The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | @@ -1319,15 +1726,33 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + #### DELETE Manifest -Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest. +Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. @@ -1347,7 +1772,7 @@ The following parameters should be specified on the request: |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| +|`reference`|path|Tag or digest of the target manifest.| @@ -1363,8 +1788,7 @@ The following parameters should be specified on the request: - -###### On Failure: Invalid Name or Tag +###### On Failure: Invalid Name or Reference ``` 400 Bad Request @@ -1382,14 +1806,14 @@ Content-Type: application/json; charset=utf-8 } ``` -The specified `name` or `tag` were invalid and the delete was unable to proceed. +The specified `name` or `reference` were invalid and the delete was unable to proceed. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | @@ -1429,7 +1853,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -1452,24 +1876,42 @@ Content-Type: application/json; charset=utf-8 } ``` -The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. +The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + ### Blob -Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest. +Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. @@ -1566,7 +2008,7 @@ There was a problem with the request that needs to be addressed by the client, s The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | @@ -1606,7 +2048,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -1636,7 +2078,7 @@ The blob, identified by `name` and `digest`, is unknown to the registry. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | @@ -1715,7 +2157,7 @@ There was a problem with the request that needs to be addressed by the client, s The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | @@ -1755,7 +2197,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -1785,7 +2227,7 @@ Content-Type: application/json; charset=utf-8 The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | @@ -1802,14 +2244,142 @@ The range specification cannot be satisfied for the requested content. This can +#### DELETE Blob + +Delete the blob identified by `name` and `digest` + + + +``` +DELETE /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Docker-Content-Digest: +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|0| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Method Not Allowed + +``` +405 Method Not Allowed +Content-Type: application/json; charset=utf-8 -### Intiate Blob Upload +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + + +### Initiate Blob Upload Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. -#### POST Intiate Blob Upload +#### POST Initiate Blob Upload Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. @@ -1877,7 +2447,7 @@ The following headers will be returned with the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | @@ -1917,11 +2487,29 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + ##### Initiate Resumable Blob Upload ``` @@ -1983,7 +2571,7 @@ The following headers will be returned with the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | @@ -2023,7 +2611,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -2110,7 +2698,7 @@ There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | @@ -2151,7 +2739,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -2181,7 +2769,7 @@ The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | @@ -2267,7 +2855,7 @@ There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | @@ -2308,7 +2896,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -2338,7 +2926,7 @@ The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | @@ -2422,7 +3010,7 @@ There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | @@ -2463,7 +3051,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -2493,7 +3081,7 @@ The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | @@ -2591,10 +3179,11 @@ There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | @@ -2632,7 +3221,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -2662,7 +3251,7 @@ The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | @@ -2740,7 +3329,7 @@ An error was encountered processing the delete. The client may ignore this error The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | @@ -2780,7 +3369,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | @@ -2810,10 +3399,107 @@ The upload is unknown to the registry. The client may ignore this error and assu The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | +### Catalog + +List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. + + + +#### GET Catalog + +Retrieve a sorted, json list of repositories available in the registry. + + +##### Catalog Fetch Complete + +``` +GET /v2/_catalog +``` + +Request an unabridged list of repositories available. + + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] +} +``` + +Returns the unabridged list of repositories as a json response. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +##### Catalog Fetch Paginated + +``` +GET /v2/_catalog?n=&last= +``` + +Return the specified portion of repositories. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +} +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + + diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl index b61cca766bea..cc6bd7c53f10 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl @@ -1,8 +1,12 @@ - + # Docker Registry HTTP API V2 @@ -115,33 +119,62 @@ This section should be updated when changes are made to the specification, indicating what is different. Optionally, we may start marking parts of the specification to correspond with the versions enumerated here. +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. +
    -
    2.0.1
    -
    -
      -
    • Added capability of doing streaming upload to PATCH blob upload.
    • -
    • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
    • -
    • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
    • -
    -
    - -
    2.0.0
    -
    -
      -
    • Added support for immutable manifest references in manifest endpoints.
    • -
    • Deleting a manifest by tag has been deprecated.
    • -
    • Specified `Docker-Content-Digest` header for appropriate entities.
    • -
    • Added error code for unsupported operations.
    • -
    • Added capability of doing streaming upload to PATCH blob upload.
    • -
    • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
    • -
    • Removed 416 return code from PUT blob upload.
    • -
    -
    - -
    2.0
    -
    - This is the baseline specification. -
    +
    f
    +
    +
      +
    • Specify the delete API for layers and manifests.
    • +
    +
    + +
    e
    +
    +
      +
    • Added support for listing registry contents.
    • +
    • Added pagination to tags API.
    • +
    • Added common approach to support pagination.
    • +
    +
    + +
    d
    +
    +
      +
    • Allow repository name components to be one character.
    • +
    • Clarified that single component names are allowed.
    • +
    +
    + +
    c
    +
    +
      +
    • Added section covering digest format.
    • +
    • Added more clarification that manifest cannot be deleted by tag.
    • +
    +
    + +
    b
    +
    +
      +
    • Added capability of doing streaming upload to PATCH blob upload.
    • +
    • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
    • +
    • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
    • +
    +
    + +
    a
    +
    +
      +
    • Added support for immutable manifest references in manifest endpoints.
    • +
    • Deleting a manifest by tag has been deprecated.
    • +
    • Specified `Docker-Content-Digest` header for appropriate entities.
    • +
    • Added error code for unsupported operations.
    • +
    +
    ## Overview @@ -167,12 +200,11 @@ path component is less than 30 characters. The V2 registry API does not enforce this. The rules for a repository name are as follows: 1. A repository name is broken up into _path components_. A component of a - repository name must be at least two lowercase, alpha-numeric characters, + repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*` and the - matched result must be 2 or more characters in length. -2. The name of a repository must have at least two path components, separated - by a forward slash. + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). 3. The total length of a repository name, including slashes, must be less the 256 characters. @@ -241,6 +273,84 @@ When a `200 OK` or `401 Unauthorized` response is returned, the Clients may require this header value to determine if the endpoint serves this API. When this header is omitted, clients may fallback to an older API version. +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: + + digest := algorithm ":" hex + algorithm := /[A-Fa-f0-9_+.-]+/ + hex := /[A-Fa-f0-9]+/ + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | +tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | + +> __NOTE:__ While we show an example of using a `tarsum` digest, the security +> of tarsum has not been verified. It is recommended that most implementations +> use sha256 for interoperability. + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: + +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring _C_ passed into a function, _SHA256_, that returns a +bytestring B, which is the hash of _C_. _D_ gets the algorithm concatenated +with the hex encoding of _B_. We then define the identifier of _C_ to _ID(C)_ +as equal to _D_. A digest can be verified by independently calculating _D_ and +comparing it with identifier _ID(C)_ + +#### Digest Header + +To provide verification of http content, any response may include a `Docker- +Content-Digest` header. This will include the digest of the target entity +returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header `Docker- +> Content-Digest` should not be trusted over the "local" digest. + ### Pulling An Image An "image" is a combination of a JSON manifest and individual layer files. The @@ -609,6 +719,25 @@ Note that the upload url will not be available forever. If the upload uuid is unknown to the registry, a `404 Not Found` response will be returned and the client must restart the upload process. +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + #### Pushing an Image Manifest Once all of the layers for an image are uploaded, the client can upload the @@ -655,7 +784,131 @@ each unknown blob. The response format is as follows: ] } -#### Listing Image Tags +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been recieved. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with above value from the `Link` header, +receiving the values _c_ and _d_. Note that n may change on second to last +response or be omitted fully, if the server may so choose. + +### Listing Image Tags It may be necessary to list all of the tags under a given repository. The tags for an image repository can be retrieved with the following request: @@ -676,8 +929,51 @@ The response will be in the following format: } For repositories with a large number of tags, this response may be quite -large, so care should be taken by the client when parsing the response to -reduce copying. +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. ### Deleting an Image @@ -715,7 +1011,7 @@ corresponding responses, with success and failure, are enumerated. A list of methods and URIs are covered in the table below: |Method|Path|Entity|Description| --------|----|------|------------ +|------|----|------|-----------| {{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | {{end}}{{end}} @@ -726,7 +1022,7 @@ The detail for each endpoint is covered in the following sections. The error codes encountered via the API are enumerated in the following table: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| {{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} {{end}} @@ -745,7 +1041,7 @@ The error codes encountered via the API are enumerated in the following table: ##### {{.Name}}{{end}} ``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{if .QueryParameters}}?{{range .QueryParameters}}{{.Name}}={{.Format}}{{end}}{{end}}{{range .Headers}} +{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} {{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} @@ -777,8 +1073,13 @@ Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} ``` {{.Description}} +{{if .Fields}}The following fields may be returned in the response body: -{{if .Headers}}The following headers will be returned with the response: +|Name|Description| +|----|-----------| +{{range .Fields}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{if .Headers}} +The following headers will be returned with the response: |Name|Description| |----|-----------| @@ -810,7 +1111,7 @@ The following headers will be returned on the response: The error codes that may be included in the response body are enumerated below: |Code|Message|Description| --------|----|------|------------ +|----|-------|-----------| {{range $err := .ErrorCodes}}| `{{$err}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | {{end}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md index 5a394cbe43c1..a2da9483c2f6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md @@ -1,8 +1,13 @@ - + + # Docker Registry v2 authentication via central service @@ -271,7 +276,7 @@ Token has 3 main parts: name
    - The name of the recource of the given type hosted by the + The name of the resource of the given type hosted by the service.
    diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md index 34440c717a17..a7b1807f38be 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md @@ -1,8 +1,14 @@ - + + # Docker Distribution JSON Canonicalization diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md index 9cda0547a8b1..259e3cf65861 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md @@ -1,8 +1,16 @@ # Image Manifest Version 2, Schema 1 -This document outlines the format of of the V2 image manifest. Image manifests -describe the various constituents of a docker image. Image manifests can be - serialized to JSON format with the following media types: +This document outlines the format of of the V2 image manifest. The image +manifest described herein was introduced in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). +It is a provisional manifest to provide a compatibility with the [V1 Image +format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the +requirements are defined for the [V2 Schema 2 +image](https://github.com/docker/distribution/pull/62). + + +Image manifests describe the various constituents of a docker image. Image +manifests can be serialized to JSON format with the following media types: Manifest Type | Media Type ------------- | ------------- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md index fd46ece390c4..f994f38af365 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md @@ -1,8 +1,11 @@ - + + # Microsoft Azure storage driver @@ -17,6 +20,5 @@ The following parameters must be used to authenticate and configure the storage * `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. * `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. - [azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ -[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx \ No newline at end of file +[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md index fa9f8259e19e..2dbad8cdcd16 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md @@ -1,8 +1,11 @@ - + + # Filesystem storage driver @@ -10,4 +13,4 @@ An implementation of the `storagedriver.StorageDriver` interface which uses the ## Parameters -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/tmp/registry/storage`. +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/var/lib/registry`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md index 948cd5bcbd64..f43e1510fd69 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md @@ -1,8 +1,11 @@ - + + # In-memory storage driver diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md new file mode 100755 index 000000000000..174a5e562978 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md @@ -0,0 +1,33 @@ + + +# Aliyun OSS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. + +## Parameters + +* `accesskeyid`: Your access key ID. + +* `accesskeysecret`: Your access key secret. + +* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at + +* `endpoint`: (optional) By default, the endpoint shoulb be `..aliyuncs.com` or `.-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. + +* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at + +* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). + +* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to false if not specified. + +* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. + +* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md new file mode 100644 index 000000000000..4b630e19ae51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md @@ -0,0 +1,40 @@ + + + +# Ceph RADOS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses +[Ceph RADOS Object Storage][rados] for storage backend. + +## Parameters + +The following parameters must be used to configure the storage driver +(case-sensitive): + +* `poolname`: Name of the Ceph pool +* `username` *optional*: The user to connect as (i.e. admin, not client.admin) +* `chunksize` *optional*: Size of the written RADOS objects. Default value is +4MB (4194304). + +This drivers loads the [Ceph client configuration][rados-config] from the +following regular paths (the first found is used): + +* `$CEPH_CONF` (environment variable) +* `/etc/ceph/ceph.conf` +* `~/.ceph/config` +* `ceph.conf` (in the current working directory) + +## Developing + +To include this driver when building Docker Distribution, use the build tag +`include_rados`. Please see the [building documentation][building] for details. + +[rados]: http://ceph.com/docs/master/rados/ +[rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ +[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md index e6c11348553e..8dc3b234024c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md @@ -1,8 +1,11 @@ - + + # S3 storage driver @@ -22,7 +25,7 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Amaz `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transfering over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. `v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md new file mode 100644 index 000000000000..372cb6abc07d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md @@ -0,0 +1,139 @@ + + + +# OpenStack Swift storage driver + +An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + authurl + +

    URL for obtaining an auth token.

    +
    + username + +

    + Your OpenStack user name.

    +

    +
    + password +

    +

    +

    + Your OpenStack password. +

    +
    + container + +

    + The name of your Swift container where you wish to store objects. An + additional container called _segments stores the data + is used. The driver creates both the named container and the segments + container during its initialization. +

    +
    + tenant + +

    + Optionally, your OpenStack tenant name. You can either use tenant or tenantid. +

    +
    + tenantid + +

    + Optionally, your OpenStack tenant id. You can either use tenant or tenantid. +

    +
    + domain + +

    + Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. +

    +
    + domainid + +

    + Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. +

    +
    + insecureskipverify + +

    + Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. +

    +
    + region + +

    + Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). +

    +
    + chunksize + +

    + Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. +

    +
    + prefix + +

    + Optionally, supply the root directory tree in which to store all registry files. Defaults to the empty string which is the container's root.

    +

    +
    \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md index e476457d36be..b014049c4094 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md @@ -1,8 +1,14 @@ - + + # Docker Registry Storage Driver @@ -16,6 +22,9 @@ This storage driver package comes bundled with several drivers: - [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. - [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. - [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). +- [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. +- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). +- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). ## Storage Driver API @@ -23,29 +32,30 @@ The storage driver API is designed to model a filesystem-like key/value storage Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. -Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process. +Storage drivers are intended to be written in Go, providing compile-time +validation of the `storagedriver.StorageDriver` interface. ## Driver Selection and Configuration The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. -Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. +Storage driver factories may be registered by name using the +`factory.Register` method, and then later invoked by calling `factory.Create` +with a driver name and parameters map. If no such storage driver can be found, +`factory.Create` will return an `InvalidStorageDriverError`. ## Driver Contribution ### Writing new storage drivers -To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable. +To create a valid storage driver, one must implement the +`storagedriver.StorageDriver` interface and make sure to expose this driver +via the factory system. -#### In-process drivers +#### Registering Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. -#### Out-of-process drivers -As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. - -Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time. - ## Testing -Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. - -## Drivers written in other languages -Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome. +Storage driver test suites are provided in +`storagedriver/testsuites/testsuites.go` and may be used for any storage +driver written in Go. Tests can be registered using the `RegisterSuite` +function, which run the same set of tests for any registered drivers. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/test.compare.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/test.compare.md new file mode 100644 index 000000000000..86087a15c2c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/test.compare.md @@ -0,0 +1,16 @@ + +WHATEVER + + +WHATEVER AGAIN + + +[display11](/placeholder/link-1) [display12](/placeholder/link_2) [display13](/placeholder/link/3) +[display21](/placeholder/link-1) [display22](/placeholder/link_2) [display23](/placeholder/link/3) +[display31.md](/placeholder/link-1) [display32.md](/placeholder/link_2) [display33.md](/placeholder/link/3) +[display41](/placeholder/link-1) [display42](/placeholder/link_2) [display43](/placeholder/link/3) +[display51](/placeholder/link-1) [display52](/placeholder/link_2) [display53](/placeholder/link/3) +[display61](/placeholder/link-1) [display62](/placeholder/link_2) [display63](/placeholder/link/3) +[display71](/placeholder/link-1#something-else) [display62](/placeholder/link_2#else) [display63](#else) + +[dont-touch](https://somewhere/foo.md#dont) [dont-touch](https://somewhere/foo.md#dont) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/test.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/test.md new file mode 100644 index 000000000000..3c139cb30a08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/docs/test.md @@ -0,0 +1,16 @@ + + + +[display11](/link-1) [display12](/link_2) [display13](/link/3) +[display21](link-1.md) [display22](link_2.md) [display23](link/3.md) +[display31.md](link-1.md) [display32.md](link_2.md) [display33.md](link/3.md) +[display41](./link-1.md) [display42](./link_2.md) [display43](./link/3.md) +[display51](../link-1.md) [display52](../link_2.md) [display53](../link/3.md) +[display61](../../link-1.md) [display62](../../link_2.md) [display63](../../link/3.md) +[display71](link-1.md#something-else) [display62](link_2#else) [display63](#else) + +[dont-touch](https://somewhere/foo.md#dont) [dont-touch](https://somewhere/foo.md#dont) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/errors.go index 7883b9f736fc..4511fde67335 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/errors.go @@ -5,23 +5,11 @@ import ( "strings" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" ) -var ( - // ErrLayerExists returned when layer already exists - ErrLayerExists = fmt.Errorf("layer exists") - - // ErrLayerTarSumVersionUnsupported when tarsum is unsupported version. - ErrLayerTarSumVersionUnsupported = fmt.Errorf("unsupported tarsum version") - - // ErrLayerUploadUnknown returned when upload is not found. - ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") - - // ErrLayerClosed returned when an operation is attempted on a closed - // Layer or LayerUpload. - ErrLayerClosed = fmt.Errorf("layer closed") -) +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = fmt.Errorf("operation unsupported") // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. @@ -30,7 +18,7 @@ type ErrRepositoryUnknown struct { } func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown respository name=%s", err.Name) + return fmt.Sprintf("unknown repository name=%s", err.Name) } // ErrRepositoryNameInvalid should be used to denote an invalid repository @@ -55,14 +43,14 @@ func (err ErrManifestUnknown) Error() string { return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) } -// ErrUnknownManifestRevision is returned when a manifest cannot be found by +// ErrManifestUnknownRevision is returned when a manifest cannot be found by // revision within a repository. -type ErrUnknownManifestRevision struct { +type ErrManifestUnknownRevision struct { Name string Revision digest.Digest } -func (err ErrUnknownManifestRevision) Error() string { +func (err ErrManifestUnknownRevision) Error() string { return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) } @@ -88,22 +76,11 @@ func (errs ErrManifestVerification) Error() string { return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) } -// ErrUnknownLayer returned when layer cannot be found. -type ErrUnknownLayer struct { - FSLayer manifest.FSLayer -} - -func (err ErrUnknownLayer) Error() string { - return fmt.Sprintf("unknown layer %v", err.FSLayer.BlobSum) -} - -// ErrLayerInvalidDigest returned when tarsum check fails. -type ErrLayerInvalidDigest struct { +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { Digest digest.Digest - Reason error } -func (err ErrLayerInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/export.patch b/Godeps/_workspace/src/github.com/docker/distribution/export.patch deleted file mode 100644 index 7d1b39b0d485..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/export.patch +++ /dev/null @@ -1,38272 +0,0 @@ -blob -mark :1 -data 918 -image: dmp42/go:stable - -script: - # To be spoofed back into the test image - - go get github.com/modocache/gover - - - go get -t ./... - - # Go fmt - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - # Go lint - - test -z "$(golint ./... | tee /dev/stderr)" - # Go vet - - go vet ./... - # Go test - - go test -v -race -cover ./... - # Helper to concatenate reports - - gover - # Send to coverall - - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} - - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck - -notify: - email: - recipients: - - distribution@docker.com - - slack: - team: docker - channel: "#dt" - username: mom - token: {{SLACK_TOKEN}} - on_success: true - on_failure: true - -blob -mark :2 -data 433 -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace - -blob -mark :3 -data 482 -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -blob -mark :4 -data 1043 -Ahmet Alp Balkan -Amy Lindburg -Andrey Kostov -Andy Goldstein -Anton Tiurin -Arnaud Porterie -Ben Firshman -Brian Bland -Daisuke Fujita -David Lawrence -Derek McGowan -Diogo Mónica -Donald Huang -Frederick F. Kautz IV -Jessie Frazelle -Josh Hawn -Kenneth Lim -Mary Anthony -Nathan Sullivan -Nghia Tran -Olivier Gambier -Richard Scothern -Shreyas Karnik -Simon Thulbourn -Stephen J Day -Tianon Gravi -xiekeyang - -blob -mark :5 -data 5006 -# Contributing to the registry - -## Are you having issues? - -Please first try any of these support forums before opening an issue: - - * irc #docker on freenode (archives: [https://botbot.me/freenode/docker/]) - * https://forums.docker.com/ - * if your problem is with the "hub" (the website and other user-facing components), or about automated builds, then please direct your issues to https://support.docker.com - -## So, you found a bug? - -First check if your problem was already reported in the issue tracker. - -If it's already there, please refrain from adding "same here" comments - these don't add any value and are only adding useless noise. **Said comments will quite often be deleted at sight**. On the other hand, if you have any technical, relevant information to add, by all means do! - -Your issue is not there? Then please, create a ticket. - -If possible the following guidelines should be followed: - - * try to come up with a minimal, simple to reproduce test-case - * try to add a title that describe succinctly the issue - * if you are running your own registry, please provide: - * registry version - * registry launch command used - * registry configuration - * registry logs - * in all cases: - * `docker version` and `docker info` - * run your docker daemon in debug mode (-D), and provide docker daemon logs - -## You have a patch for a known bug, or a small correction? - -Basic github workflow (fork, patch, make sure the tests pass, PR). - -... and some simple rules to ensure quick merge: - - * clearly point to the issue(s) you want to fix - * when possible, prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - * if you need to amend your PR following comments, squash instead of adding more commits - -## You want some shiny new feature to be added? - -Fork the project. - -Create a new proposal in the folder `open-design/specs`, named `DEP_MY_AWESOME_PROPOSAL.md`, using `open-design/specs/TEMPLATE.md` as a starting point. - -Then immediately submit this new file as a pull-request, in order to get early feedback. - -Eventually, you will have to update your proposal to accommodate the feedback you received. - -Usually, it's not advisable to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can significantly altered (or rejected). - -Your implementation should then be submitted as a separate PR, that will be reviewed as well. - -## Issue and PR labels - -To keep track of the state of issues and PRs, we've adopted a set of simple labels. The following are currently in use: - -
    -
    Backlog
    -
    Issues marked with this label are considered not yet ready for implementation. Either they are untriaged or require futher detail to proceed.
    - -
    Blocked
    -
    If an issue requires further clarification or is blocked on an unresolved dependency, this label should be used.
    - -
    Sprint
    -
    Issues marked with this label are being worked in the current sprint. All required information should be available and design details have been worked out.
    - -
    In Progress
    -
    The issue or PR is being actively worked on by the assignee.
    - -
    Done
    -
    Issues marked with this label are complete. This can be considered a psuedo-label, in that if it is closed, it is considered "Done".
    -
    - -These integrate with waffle.io to show the current status of the project. The project board is available at the following url: - -https://waffle.io/docker/distribution - -If an issue or PR is not labeled correctly or you believe it is not in the right state, please contact a maintainer to fix the problem. - -## Milestones - -Issues and PRs should be assigned to relevant milestones. If an issue or PR is assigned a milestone, it should be available by that date. Depending on level of effort, items may be shuffled in or out of milestones. Issues or PRs that don't have a milestone are considered unscheduled. Typically, "In Progress" issues should have a milestone. - -## PR Titles - -PR titles should be lowercased, except for proper noun references (such a -method name or type). - -PR titles should be prefixed with affected directories, comma separated. For -example, if a specification is modified, the prefix would be "doc/spec". If -the modifications are only in the root, do not include it. If multiple -directories are modified, include each, separated by a comma and space. - -Here are some examples: - -- doc/spec: move API specification into correct position -- context, registry, auth, auth/token, cmd/registry: context aware logging - -blob -mark :6 -data 288 -FROM golang:1.4 - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -RUN make PREFIX=/go clean binaries - -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["cmd/registry/config.yml"] - -blob -mark :7 -data 3219 -{ - "ImportPath": "github.com/docker/distribution", - "GoVersion": "go1.4.2", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "code.google.com/p/go-uuid/uuid", - "Comment": "null-15", - "Rev": "35bc42037350f0078e3c974c6ea690f1926603ab" - }, - { - "ImportPath": "github.com/AdRoll/goamz/aws", - "Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103" - }, - { - "ImportPath": "github.com/AdRoll/goamz/cloudfront", - "Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103" - }, - { - "ImportPath": "github.com/AdRoll/goamz/s3", - "Rev": "d3664b76d90508cdda5a6c92042f26eab5db3103" - }, - { - "ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/storage", - "Comment": "v1.2-43-gd90753b", - "Rev": "d90753bcad2ed782fcead7392d1e831df29aa2bb" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.7.3", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" - }, - { - "ImportPath": "github.com/bugsnag/bugsnag-go", - "Comment": "v1.0.2-5-gb1d1530", - "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" - }, - { - "ImportPath": "github.com/bugsnag/osext", - "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" - }, - { - "ImportPath": "github.com/bugsnag/panicwrap", - "Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27" - }, - { - "ImportPath": "github.com/codegangsta/cli", - "Comment": "1.2.0-66-g6086d79", - "Rev": "6086d7927ec35315964d9fea46df6c04e6d697c1" - }, - { - "ImportPath": "github.com/docker/docker/pkg/tarsum", - "Comment": "v1.4.1-863-g165ea5c", - "Rev": "165ea5c158cff3fc40d476ffe233a5ccc03e7d61" - }, - { - "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar", - "Comment": "v1.4.1-863-g165ea5c", - "Rev": "165ea5c158cff3fc40d476ffe233a5ccc03e7d61" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" - }, - { - "ImportPath": "github.com/garyburd/redigo/internal", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/garyburd/redigo/redis", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" - }, - { - "ImportPath": "github.com/gorilla/handlers", - "Rev": "0e84b7d810c16aed432217e330206be156bafae0" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" - }, - { - "ImportPath": "github.com/jlhawn/go-crypto", - "Rev": "cd738dde20f0b3782516181b0866c9bb9db47401" - }, - { - "ImportPath": "github.com/yvasiyarov/go-metrics", - "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" - }, - { - "ImportPath": "github.com/yvasiyarov/gorelic", - "Comment": "v0.0.6-8-ga9bba5b", - "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" - }, - { - "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", - "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" - } - ] -} - -blob -mark :8 -data 136 -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. - -blob -mark :9 -data 11325 -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -blob -mark :10 -data 177 -Solomon Hykes (@shykes) -Olivier Gambier (@dmp42) -Sam Alba (@samalba) -Stephen Day (@stevvooe) - -blob -mark :11 -data 2423 -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version $(VERSION)" - -.PHONY: clean all fmt vet lint build test binaries -.DEFAULT: default -all: AUTHORS clean fmt vet fmt lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ./cmd/registry - -${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ./cmd/registry-api-descriptor-template - -${PREFIX}/bin/dist: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ./cmd/dist - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet ./... - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ - echo "+ please format Go code with 'gofmt -s'" - -lint: - @echo "+ $@" - @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -v ${GO_LDFLAGS} ./... - -test: - @echo "+ $@" - @go test -test.short ./... - -test-full: - @echo "+ $@" - @go test ./... - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template ${PREFIX}/bin/dist - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" - - -# Use the existing docs build cmds from docker/docker -# Later, we will move this into an import -DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -DOCSPORT := 8000 -DOCKER_DOCS_IMAGE := docker-docs-$(VERSION) -DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE - -docs: docs-build - $(DOCKER_RUN_DOCS) -p $(DOCSPORT):8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve - -docs-shell: docs-build - $(DOCKER_RUN_DOCS) -p $(DOCSPORT):8000 "$(DOCKER_DOCS_IMAGE)" bash - -docs-build: - docker build -t "$(DOCKER_DOCS_IMAGE)" -f docs/Dockerfile . - -blob -mark :12 -data 4813 -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry Service 2.0 implementation -for storing and distributing Docker images. It supersedes the [docker/docker- -registry](https://github.com/docker/docker-registry) project with a new API -design, focused around security and performance. - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **dist** | An _experimental_ tool to provide distribution, oriented functionality without the `docker` daemon. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/overview.md) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](http://docs.docker.com/installation) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products and may want to maintain an -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](docs/deploying.md) -may be the better choice. - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
    - IRC - - #docker-distribution on FreeNode -
    - Issue Tracker - - github.com/docker/distribution/issues -
    - Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
    - Mailing List - - docker@dockerproject.org -
    - - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE.md). - -blob -mark :13 -data 3910 -# Roadmap - -The Distribution Project consists of several components, some of which are still being defined. This document defines the high-level goals of the project, identifies the current components, and defines the release-relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -Registry 2.0 is the first release of the next-generation registry. This is primarily -focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/doc/spec/api.md), with -a focus on security and performance. - -#### Registry 2.0 - -Features: - -- Faster push and pull -- New, more efficient implementation -- Simplified deployment -- Full API specification for V2 protocol -- Pluggable storage system (s3, azure, filesystem and inmemory supported) -- Immutable manifest references ([#46](https://github.com/docker/distribution/issues/46)) -- Webhook notification system ([#42](https://github.com/docker/distribution/issues/42)) -- Native TLS Support ([#132](https://github.com/docker/distribution/pull/132)) -- Pluggable authentication system -- Health Checks ([#230](https://github.com/docker/distribution/pull/230)) - -#### Registry 2.1 - -Planned Features: - -> **NOTE:** This feature list is incomplete at this time. - -- Support for Manifest V2, Schema 2 and explicit tagging objects ([#62](https://github.com/docker/distribution/issues/62), [#173](https://github.com/docker/distribution/issues/173)) -- Mirroring ([#19](https://github.com/docker/distribution/issues/19)) -- Flexible client package based on distribution interfaces ([#193](https://github.com/docker/distribution/issues/193) - -#### Registry 2.2 - -TBD - -*** - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -Distribution Components map to Docker Platform Releases via the use of labels. Project Pages are used to define the set of features that are included in each Docker Platform Release. - -| Platform Version | Label | Planning | -|-----------|------|-----| -| Docker 1.6 | [Docker/1.6](https://github.com/docker/distribution/labels/docker%2F1.6) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.6-Project-Page) | -| Docker 1.7| [Docker/1.7](https://github.com/docker/distribution/labels/docker%2F1.7) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.7-Project-Page) | -| Docker 1.8| [Docker/1.8](https://github.com/docker/distribution/labels/docker%2F1.8) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.8-Project-Page) | - - -blob -mark :14 -data 3610 -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - - post: - # Install many go versions - - gvm install go1.3.3 -B --name=old - - gvm install go1.4 -B --name=stable - # - gvm install tip --name=bleed - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use old && - mkdir -p "$(dirname $BASE_OLD)" && - cp -R "$CHECKOUT" "$BASE_OLD" - - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - # - > - # gvm use bleed && - # mkdir -p "$(dirname $BASE_BLEED)" && - # cp -R "$CHECKOUT" "$BASE_BLEED" - - override: - # Install dependencies for every copied clone/go version - - gvm use old && go get github.com/tools/godep: - pwd: $BASE_OLD - - - gvm use stable && go get github.com/tools/godep: - pwd: $BASE_STABLE - - # - gvm use bleed && go get github.com/tools/godep: - # pwd: $BASE_BLEED - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - - gvm use old && go version - - gvm use stable && go version - # - gvm use bleed && go version - - # FMT - - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": - pwd: $BASE_STABLE - - # VET - - gvm use stable && go vet ./...: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": - pwd: $BASE_STABLE - - override: - # Test every version we have (but stable) - - gvm use old; godep go test -test.v -test.short ./...: - timeout: 600 - pwd: $BASE_OLD - - # - gvm use bleed; go test -test.v -test.short ./...: - # timeout: 600 - # pwd: $BASE_BLEED - - # Test stable, and report - # Preset the goverall report file - - echo "$CIRCLE_PAIN" > ~/goverage.report - - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: - pwd: $BASE_STABLE - - - gvm use stable; go list ./... | xargs -L 1 -I{} godep go test -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: - timeout: 600 - pwd: $BASE_STABLE - - post: - # Aggregate and report to coveralls - - gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report: - pwd: $BASE_STABLE - - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN: - pwd: $BASE_STABLE - - ## Notes - # Disabled the -race detector due to massive memory usage. - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck - -blob -mark :15 -data 202 -package main - -import "github.com/codegangsta/cli" - -var ( - commandList = cli.Command{ - Name: "images", - Usage: "List available images", - Action: imageList, - } -) - -func imageList(c *cli.Context) { -} - -blob -mark :16 -data 297 -package main - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "dist" - app.Usage = "Package and ship Docker content" - - app.Action = commandList.Action - app.Commands = []cli.Command{ - commandList, - commandPull, - commandPush, - } - app.Run(os.Args) -} - -blob -mark :17 -data 376 -package main - -import "github.com/codegangsta/cli" - -var ( - commandPull = cli.Command{ - Name: "pull", - Usage: "Pull and verify an image from a registry", - Action: imagePull, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "r,registry", - Value: "hub.docker.io", - Usage: "Registry to use (e.g.: localhost:5000)", - }, - }, - } -) - -func imagePull(c *cli.Context) { -} - -blob -mark :18 -data 361 -package main - -import "github.com/codegangsta/cli" - -var ( - commandPush = cli.Command{ - Name: "push", - Usage: "Push an image to a registry", - Action: imagePush, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "r,registry", - Value: "hub.docker.io", - Usage: "Registry to use (e.g.: localhost:5000)", - }, - }, - } -) - -func imagePush(*cli.Context) { -} - -blob -mark :19 -data 2293 -// registry-api-descriptor-template uses the APIDescriptor defined in the -// api/v2 package to execute templates passed to the command line. -// -// For example, to generate a new API specification, one would execute the -// following command from the repo root: -// -// $ registry-api-descriptor-template doc/spec/api.md.tmpl > doc/spec/api.md -// -// The templates are passed in the api/v2.APIDescriptor object. Please see the -// package documentation for fields available on that object. The template -// syntax is from Go's standard library text/template package. For information -// on Go's template syntax, please see golang.org/pkg/text/template. -package main - -import ( - "log" - "net/http" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/docker/distribution/registry/api/v2" -) - -var spaceRegex = regexp.MustCompile(`\n\s*`) - -func main() { - - if len(os.Args) != 2 { - log.Fatalln("please specify a template to execute.") - } - - path := os.Args[1] - filename := filepath.Base(path) - - funcMap := template.FuncMap{ - "removenewlines": func(s string) string { - return spaceRegex.ReplaceAllString(s, " ") - }, - "statustext": http.StatusText, - "prettygorilla": prettyGorillaMuxPath, - } - - tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) - - if err := tmpl.Execute(os.Stdout, v2.APIDescriptor); err != nil { - log.Fatalln(err) - } -} - -// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux -// route string, making it suitable for documentation. -func prettyGorillaMuxPath(s string) string { - // Stateful parser that removes regular expressions from gorilla - // routes. It correctly handles balanced bracket pairs. - - var output string - var label string - var level int - -start: - if s[0] == '{' { - s = s[1:] - level++ - goto capture - } - - output += string(s[0]) - s = s[1:] - - goto end -capture: - switch s[0] { - case '{': - level++ - case '}': - level-- - - if level == 0 { - s = s[1:] - goto label - } - case ':': - s = s[1:] - goto skip - default: - label += string(s[0]) - } - s = s[1:] - goto capture -skip: - switch s[0] { - case '{': - level++ - case '}': - level-- - } - s = s[1:] - - if level == 0 { - goto label - } - - goto skip -label: - if label != "" { - output += "<" + label + ">" - label = "" - } -end: - if s != "" { - goto start - } - - return output - -} - -blob -mark :20 -data 673 -// +build ignore - -package main - -import ( - "encoding/json" - "os" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/storage/driver/azure" - "github.com/docker/distribution/registry/storage/driver/ipc" -) - -// An out-of-process Azure Storage driver, intended to be run by ipc.NewDriverClient -func main() { - parametersBytes := []byte(os.Args[1]) - var parameters map[string]interface{} - err := json.Unmarshal(parametersBytes, ¶meters) - if err != nil { - panic(err) - } - - driver, err := azure.FromParameters(parameters) - if err != nil { - panic(err) - } - - if err := ipc.StorageDriverServer(driver); err != nil { - log.Fatalln("driver error:", err) - } -} - -blob -mark :21 -data 601 -// +build ignore - -package main - -import ( - "encoding/json" - "os" - - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/registry/storage/driver/filesystem" - "github.com/docker/distribution/registry/storage/driver/ipc" -) - -// An out-of-process filesystem driver, intended to be run by ipc.NewDriverClient -func main() { - parametersBytes := []byte(os.Args[1]) - var parameters map[string]string - err := json.Unmarshal(parametersBytes, ¶meters) - if err != nil { - panic(err) - } - - if err := ipc.StorageDriverServer(filesystem.FromParameters(parameters)); err != nil { - logrus.Fatalln(err) - } -} - -blob -mark :22 -data 447 -// +build ignore - -package main - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/registry/storage/driver/ipc" -) - -// An out-of-process inmemory driver, intended to be run by ipc.NewDriverClient -// This exists primarily for example and testing purposes -func main() { - if err := ipc.StorageDriverServer(inmemory.New()); err != nil { - logrus.Fatalln(err) - } -} - -blob -mark :23 -data 634 -// +build ignore - -package main - -import ( - "encoding/json" - "os" - - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/registry/storage/driver/ipc" - "github.com/docker/distribution/registry/storage/driver/s3" -) - -// An out-of-process S3 driver, intended to be run by ipc.NewDriverClient -func main() { - parametersBytes := []byte(os.Args[1]) - var parameters map[string]string - err := json.Unmarshal(parametersBytes, ¶meters) - if err != nil { - panic(err) - } - - driver, err := s3.FromParameters(parameters) - if err != nil { - panic(err) - } - - if err := ipc.StorageDriverServer(driver); err != nil { - logrus.Fatalln(err) - } -} - -blob -mark :24 -data 892 -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development -storage: - cache: - layerinfo: inmemory - filesystem: - rootdirectory: /tmp/registry-dev -http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-8082 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - -blob -mark :25 -data 7158 -package main - -import ( - "crypto/tls" - "crypto/x509" - _ "expvar" - "flag" - "fmt" - "io/ioutil" - "net/http" - _ "net/http/pprof" - "os" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/formatters/logstash" - "github.com/bugsnag/bugsnag-go" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - _ "github.com/docker/distribution/health" - _ "github.com/docker/distribution/registry/auth/silly" - _ "github.com/docker/distribution/registry/auth/token" - "github.com/docker/distribution/registry/handlers" - _ "github.com/docker/distribution/registry/storage/driver/azure" - _ "github.com/docker/distribution/registry/storage/driver/filesystem" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" - _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" - _ "github.com/docker/distribution/registry/storage/driver/s3" - "github.com/docker/distribution/version" - gorhandlers "github.com/gorilla/handlers" - "github.com/yvasiyarov/gorelic" -) - -var showVersion bool - -func init() { - flag.BoolVar(&showVersion, "version", false, "show the version and exit") -} - -func main() { - flag.Usage = usage - flag.Parse() - - if showVersion { - version.PrintVersion() - return - } - - ctx := context.Background() - ctx = context.WithValue(ctx, "version", version.Version) - - config, err := resolveConfiguration() - if err != nil { - fatalf("configuration error: %v", err) - } - - ctx, err = configureLogging(ctx, config) - if err != nil { - fatalf("error configuring logger: %v", err) - } - - app := handlers.NewApp(ctx, *config) - handler := configureReporting(app) - handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - - if config.HTTP.Debug.Addr != "" { - go debugServer(config.HTTP.Debug.Addr) - } - - if config.HTTP.TLS.Certificate == "" { - context.GetLogger(app).Infof("listening on %v", config.HTTP.Addr) - if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { - context.GetLogger(app).Fatalln(err) - } - } else { - tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, - } - - if len(config.HTTP.TLS.ClientCAs) != 0 { - pool := x509.NewCertPool() - - for _, ca := range config.HTTP.TLS.ClientCAs { - caPem, err := ioutil.ReadFile(ca) - if err != nil { - context.GetLogger(app).Fatalln(err) - } - - if ok := pool.AppendCertsFromPEM(caPem); !ok { - context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) - } - } - - for _, subj := range pool.Subjects() { - context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) - } - - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool - } - - context.GetLogger(app).Infof("listening on %v, tls", config.HTTP.Addr) - server := &http.Server{ - Addr: config.HTTP.Addr, - Handler: handler, - TLSConfig: tlsConf, - } - - if err := server.ListenAndServeTLS(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key); err != nil { - context.GetLogger(app).Fatalln(err) - } - } -} - -func usage() { - fmt.Fprintln(os.Stderr, "usage:", os.Args[0], "") - flag.PrintDefaults() -} - -func fatalf(format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, format+"\n", args...) - usage() - os.Exit(1) -} - -func resolveConfiguration() (*configuration.Configuration, error) { - var configurationPath string - - if flag.NArg() > 0 { - configurationPath = flag.Arg(0) - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil -} - -func configureReporting(app *handlers.App) http.Handler { - var handler http.Handler = app - - if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - - handler = bugsnag.Handler(handler) - } - - if app.Config.Reporting.NewRelic.LicenseKey != "" { - agent := gorelic.NewAgent() - agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey - if app.Config.Reporting.NewRelic.Name != "" { - agent.NewrelicName = app.Config.Reporting.NewRelic.Name - } - agent.CollectHTTPStat = true - agent.Verbose = app.Config.Reporting.NewRelic.Verbose - agent.Run() - - handler = agent.WrapHTTPHandler(handler) - } - - return handler -} - -// configureLogging prepares the context with a logger using the -// configuration. -func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) - return ctx, nil - } - - log.SetLevel(logLevel(config.Log.Level)) - - formatter := config.Log.Formatter - if formatter == "" { - formatter = "text" // default formatter - } - - switch formatter { - case "json": - log.SetFormatter(&log.JSONFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "logstash": - log.SetFormatter(&logstash.LogstashFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - default: - // just let the library use default on empty string. - if config.Log.Formatter != "" { - return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) - } - } - - if config.Log.Formatter != "" { - log.Debugf("using %q logging formatter", config.Log.Formatter) - } - - // log the application version with messages - ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) - - if len(config.Log.Fields) > 0 { - // build up the static fields, if present. - var fields []interface{} - for k := range config.Log.Fields { - fields = append(fields, k) - } - - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) - } - - return ctx, nil -} - -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - l = log.InfoLevel - log.Warnf("error parsing level %q: %v, using %q ", level, err, l) - } - - return l -} - -// debugServer starts the debug server with pprof, expvar among other -// endpoints. The addr should not be exposed externally. For most of these to -// work, tls cannot be enabled on the endpoint, so it is generally separate. -func debugServer(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } -} - -blob -mark :26 -data 5231 -Docker-Registry Configuration -============================= - -This document describes the registry configuration model and how to specify a custom configuration with a configuration file and/or environment variables. - -Semantic-ish Versioning ------------------------ - -The configuration file is designed with versioning in mind, such that most upgrades will not require a change in configuration files, and such that configuration files can be "upgraded" from one version to another. - -The version is specified as a string of the form `MajorVersion.MinorVersion`, where MajorVersion and MinorVersion are both non-negative integer values. Much like [semantic versioning](http://semver.org/), minor version increases denote inherently backwards-compatible changes, such as the addition of optional fields, whereas major version increases denote a restructuring, such as renaming fields or adding required fields. Because of the explicit version definition in the configuration file, it should be possible to parse old configuration files and port them to the current configuration version, although this is not guaranteed for all future versions. - -File Structure (as of Version 0.1) ------------------------------------- - -The configuration structure is defined by the `Configuration` struct in `configuration.go`, and is best described by the following two examples: - -```yaml -version: 0.1 -loglevel: info -storage: - s3: - region: us-east-1 - bucket: my-bucket - rootdirectory: /registry - encrypt: true - secure: false - accesskey: SAMPLEACCESSKEY - secretkey: SUPERSECRET - host: ~ - port: ~ -auth: - silly: - realm: test-realm - service: my-service -reporting: - bugsnag: - apikey: mybugsnagapikey - releasestage: development - newrelic: - licensekey: mynewreliclicensekey - name: docker-distribution -http: - addr: 0.0.0.0:5000 - secret: mytokensecret -``` - -```yaml -version: 0.1 -loglevel: debug -storage: inmemory -``` - -### version -The version is expected to remain a top-level field, as to allow for a consistent version check before parsing the remainder of the configuration file. - -### loglevel -This specifies the log level of the registry. - -Supported values: -* `error` -* `warn` -* `info` -* `debug` - -### storage -This specifies the storage driver, and may be provided either as a string (only the driver type) or as a driver name with a parameters map, as seen in the first example above. - -The parameters map will be passed into the factory constructor of the given storage driver type. - -### auth -This specifies the authorization method the registry will use, and is provided as an auth type with a parameters map. - -The parameters map will be passed into the factory constructor of the given auth type. - -### reporting -This specifies metrics/error reporting systems which the registry will forward information about stats/errors to. There are currently two supported systems, which are documented below. - -#### bugsnag -Reports http errors and panics to [bugsnag](https://bugsnag.com). - -##### apikey -(Required for bugsnag use) Specifies the bugnsag API Key for authenticating to your account. - -##### releasestage -(Optional) Tracks the stage at which the registry is deployed. For example: "production", "staging", "development". - -##### endpoint -(Optional) Used for specifying an enterprise bugsnag endpoint other than https://bugsnag.com. - -#### newrelic -Reports heap, goroutine, and http stats to [NewRelic](https://newrelic.com). - -##### licensekey -(Required for newrelic use) Specifies the NewRelic License Key for authenticating to your account. - -##### name -(Optional) Specifies the component name that is displayed in the NewRelic panel. - -### http -This is used for HTTP transport-specific configuration options. - -#### addr -Specifies the bind address for the registry instance. Example: 0.0.0.0:5000 - -#### secret -Specifies the secret key with which query-string HMAC tokens are generated. - -### Notes - -All keys in the configuration file **must** be provided as a string of lowercase letters and numbers only, and values must be string-like (booleans and numerical values are fine to parse as strings). - -Environment Variables ---------------------- - -To support the workflow of running a docker registry from a standard container without having to modify configuration files, the registry configuration also supports environment variables for overriding fields. - -Any configuration field other than version can be replaced by providing an environment variable of the following form: `REGISTRY_[_]...`. - -For example, to change the loglevel to `error`, one can provide `REGISTRY_LOGLEVEL=error`, and to change the s3 storage driver's region parameter to `us-west-1`, one can provide `REGISTRY_STORAGE_S3_LOGLEVEL=us-west-1`. - -### Notes -If an environment variable changes a map value into a string, such as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then all sub-fields will be erased. As such, specifying the storage type in the environment will remove all parameters related to the old storage configuration. - -By restricting all keys in the configuration file to lowercase letters and numbers, we can avoid any potential environment variable mapping ambiguity. - -blob -mark :27 -data 13432 -package configuration - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strings" - "time" -) - -// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and -// optionally modified by environment variables -type Configuration struct { - // Version is the version which defines the format of the rest of the configuration - Version Version `yaml:"version"` - - // Log supports setting various parameters related to the logging - // subsystem. - Log struct { - // Level is the granularity at which registry operations are logged. - Level Loglevel `yaml:"level"` - - // Formatter overrides the default formatter with another. Options - // include "text", "json" and "logstash". - Formatter string `yaml:"formatter,omitempty"` - - // Fields allows users to specify static string fields to include in - // the logger context. - Fields map[string]interface{} `yaml:"fields,omitempty"` - } - - // Loglevel is the level at which registry operations are logged. This is - // deprecated. Please use Log.Level in the future. - Loglevel Loglevel `yaml:"loglevel,omitempty"` - - // Storage is the configuration for the registry's storage driver - Storage Storage `yaml:"storage"` - - // Auth allows configuration of various authorization methods that may be - // used to gate requests. - Auth Auth `yaml:"auth,omitempty"` - - // Middleware lists all middlewares to be used by the registry. - Middleware map[string][]Middleware `yaml:"middleware,omitempty"` - - // Reporting is the configuration for error reporting - Reporting Reporting `yaml:"reporting,omitempty"` - - // HTTP contains configuration parameters for the registry's http - // interface. - HTTP struct { - // Addr specifies the bind address for the registry instance. - Addr string `yaml:"addr,omitempty"` - - Prefix string `yaml:"prefix,omitempty"` - - // Secret specifies the secret key which HMAC tokens are created with. - Secret string `yaml:"secret,omitempty"` - - // TLS instructs the http server to listen with a TLS configuration. - // This only support simple tls configuration with a cert and key. - // Mostly, this is useful for testing situations or simple deployments - // that require tls. If more complex configurations are required, use - // a proxy or make a proposal to add support here. - TLS struct { - // Certificate specifies the path to an x509 certificate file to - // be used for TLS. - Certificate string `yaml:"certificate,omitempty"` - - // Key specifies the path to the x509 key file, which should - // contain the private portion for the file specified in - // Certificate. - Key string `yaml:"key,omitempty"` - - // Specifies the CA certs for client authentication - // A file may contain multiple CA certificates encoded as PEM - ClientCAs []string `yaml:"clientcas,omitempty"` - } `yaml:"tls,omitempty"` - - // Debug configures the http debug interface, if specified. This can - // include services such as pprof, expvar and other data that should - // not be exposed externally. Left disabled by default. - Debug struct { - // Addr specifies the bind address for the debug server. - Addr string `yaml:"addr,omitempty"` - } `yaml:"debug,omitempty"` - } `yaml:"http,omitempty"` - - // Notifications specifies configuration about various endpoint to which - // registry events are dispatched. - Notifications Notifications `yaml:"notifications,omitempty"` - - // Redis configures the redis pool available to the registry webapp. - Redis struct { - // Addr specifies the the redis instance available to the application. - Addr string `yaml:"addr,omitempty"` - - // Password string to use when making a connection. - Password string `yaml:"password,omitempty"` - - // DB specifies the database to connect to on the redis instance. - DB int `yaml:"db,omitempty"` - - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data - - // Pool configures the behavior of the redis connection pool. - Pool struct { - // MaxIdle sets the maximum number of idle connections. - MaxIdle int `yaml:"maxidle,omitempty"` - - // MaxActive sets the maximum number of connections that should be - // opened before blocking a connection request. - MaxActive int `yaml:"maxactive,omitempty"` - - // IdleTimeout sets the amount time to wait before closing - // inactive connections. - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - } `yaml:"redis,omitempty"` -} - -// v0_1Configuration is a Version 0.1 Configuration struct -// This is currently aliased to Configuration, as it is the current version -type v0_1Configuration Configuration - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints -func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { - var versionString string - err := unmarshal(&versionString) - if err != nil { - return err - } - - newVersion := Version(versionString) - if _, err := newVersion.major(); err != nil { - return err - } - - if _, err := newVersion.minor(); err != nil { - return err - } - - *version = newVersion - return nil -} - -// CurrentVersion is the most recent Version that can be parsed -var CurrentVersion = MajorMinorVersion(0, 1) - -// Loglevel is the level at which operations are logged -// This can be error, warn, info, or debug -type Loglevel string - -// UnmarshalYAML implements the yaml.Umarshaler interface -// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a -// valid loglevel -func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { - var loglevelString string - err := unmarshal(&loglevelString) - if err != nil { - return err - } - - loglevelString = strings.ToLower(loglevelString) - switch loglevelString { - case "error", "warn", "info", "debug": - default: - return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) - } - - *loglevel = Loglevel(loglevelString) - return nil -} - -// Parameters defines a key-value parameters mapping -type Parameters map[string]interface{} - -// Storage defines the configuration for registry object storage -type Storage map[string]Parameters - -// Type returns the storage driver type, such as filesystem or s3 -func (storage Storage) Type() string { - // Return only key in this map - for k := range storage { - switch k { - case "cache": - // allow configuration of caching - default: - return k - } - } - return "" -} - -// Parameters returns the Parameters map for a Storage configuration -func (storage Storage) Parameters() Parameters { - return storage[storage.Type()] -} - -// setParameter changes the parameter at the provided key to the new value -func (storage Storage) setParameter(key string, value interface{}) { - storage[storage.Type()][key] = value -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { - var storageMap map[string]Parameters - err := unmarshal(&storageMap) - if err == nil { - if len(storageMap) > 1 { - types := make([]string, 0, len(storageMap)) - for k := range storageMap { - switch k { - case "cache": - // allow configuration of caching - default: - types = append(types, k) - } - } - - if len(types) > 1 { - return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) - } - } - *storage = storageMap - return nil - } - - var storageType string - err = unmarshal(&storageType) - if err == nil { - *storage = Storage{storageType: Parameters{}} - return nil - } - - return err -} - -// MarshalYAML implements the yaml.Marshaler interface -func (storage Storage) MarshalYAML() (interface{}, error) { - if storage.Parameters() == nil { - return storage.Type(), nil - } - return map[string]Parameters(storage), nil -} - -// Auth defines the configuration for registry authorization. -type Auth map[string]Parameters - -// Type returns the storage driver type, such as filesystem or s3 -func (auth Auth) Type() string { - // Return only key in this map - for k := range auth { - return k - } - return "" -} - -// Parameters returns the Parameters map for an Auth configuration -func (auth Auth) Parameters() Parameters { - return auth[auth.Type()] -} - -// setParameter changes the parameter at the provided key to the new value -func (auth Auth) setParameter(key string, value interface{}) { - auth[auth.Type()][key] = value -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]Parameters - err := unmarshal(&m) - if err == nil { - if len(m) > 1 { - types := make([]string, 0, len(m)) - for k := range m { - types = append(types, k) - } - - // TODO(stevvooe): May want to change this slightly for - // authorization to allow multiple challenges. - return fmt.Errorf("must provide exactly one type. Provided: %v", types) - - } - *auth = m - return nil - } - - var authType string - err = unmarshal(&authType) - if err == nil { - *auth = Auth{authType: Parameters{}} - return nil - } - - return err -} - -// MarshalYAML implements the yaml.Marshaler interface -func (auth Auth) MarshalYAML() (interface{}, error) { - if auth.Parameters() == nil { - return auth.Type(), nil - } - return map[string]Parameters(auth), nil -} - -// Notifications configures multiple http endpoints. -type Notifications struct { - // Endpoints is a list of http configurations for endpoints that - // respond to webhook notifications. In the future, we may allow other - // kinds of endpoints, such as external queues. - Endpoints []Endpoint `yaml:"endpoints,omitempty"` -} - -// Endpoint describes the configuration of an http webhook notification -// endpoint. -type Endpoint struct { - Name string `yaml:"name"` // identifies the endpoint in the registry instance. - Disabled bool `yaml:"disabled"` // disables the endpoint - URL string `yaml:"url"` // post url for the endpoint. - Headers http.Header `yaml:"headers"` // static headers that should be added to all requests - Timeout time.Duration `yaml:"timeout"` // HTTP timeout - Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure - Backoff time.Duration `yaml:"backoff"` // backoff duration -} - -// Reporting defines error reporting methods. -type Reporting struct { - // Bugsnag configures error reporting for Bugsnag (bugsnag.com). - Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` - // NewRelic configures error reporting for NewRelic (newrelic.com) - NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` -} - -// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). -type BugsnagReporting struct { - // APIKey is the Bugsnag api key. - APIKey string `yaml:"apikey,omitempty"` - // ReleaseStage tracks where the registry is deployed. - // Examples: production, staging, development - ReleaseStage string `yaml:"releasestage,omitempty"` - // Endpoint is used for specifying an enterprise Bugsnag endpoint. - Endpoint string `yaml:"endpoint,omitempty"` -} - -// NewRelicReporting configures error reporting for NewRelic (newrelic.com) -type NewRelicReporting struct { - // LicenseKey is the NewRelic user license key - LicenseKey string `yaml:"licensekey,omitempty"` - // Name is the component name of the registry in NewRelic - Name string `yaml:"name,omitempty"` - // Verbose configures debug output to STDOUT - Verbose bool `yaml:"verbose,omitempty"` -} - -// Middleware configures named middlewares to be applied at injection points. -type Middleware struct { - // Name the middleware registers itself as - Name string `yaml:"name"` - // Flag to disable middleware easily - Disabled bool `yaml:"disabled,omitempty"` - // Map of parameters that will be passed to the middleware's initialization function - Options Parameters `yaml:"options"` -} - -// Parse parses an input configuration yaml document into a Configuration struct -// This should generally be capable of handling old configuration format versions -// -// Environment variables may be used to override configuration parameters other than version, -// following the scheme below: -// Configuration.Abc may be replaced by the value of REGISTRY_ABC, -// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth -func Parse(rd io.Reader) (*Configuration, error) { - in, err := ioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - p := NewParser("registry", []VersionedParseInfo{ - { - Version: MajorMinorVersion(0, 1), - ParseAs: reflect.TypeOf(v0_1Configuration{}), - ConversionFunc: func(c interface{}) (interface{}, error) { - if v0_1, ok := c.(*v0_1Configuration); ok { - if v0_1.Loglevel == Loglevel("") { - v0_1.Loglevel = Loglevel("info") - } - if v0_1.Storage.Type() == "" { - return nil, fmt.Errorf("No storage configuration provided") - } - return (*Configuration)(v0_1), nil - } - return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) - }, - }, - }) - - config := new(Configuration) - err = p.Parse(in, config) - if err != nil { - return nil, err - } - - return config, nil -} - -blob -mark :28 -data 12429 -package configuration - -import ( - "bytes" - "net/http" - "os" - "testing" - - . "gopkg.in/check.v1" - "gopkg.in/yaml.v2" -) - -// Hook up gocheck into the "go test" runner -func Test(t *testing.T) { TestingT(t) } - -// configStruct is a canonical example configuration, which should map to configYamlV0_1 -var configStruct = Configuration{ - Version: "0.1", - Log: struct { - Level Loglevel `yaml:"level"` - Formatter string `yaml:"formatter,omitempty"` - Fields map[string]interface{} `yaml:"fields,omitempty"` - }{ - Fields: map[string]interface{}{"environment": "test"}, - }, - Loglevel: "info", - Storage: Storage{ - "s3": Parameters{ - "region": "us-east-1", - "bucket": "my-bucket", - "rootdirectory": "/registry", - "encrypt": true, - "secure": false, - "accesskey": "SAMPLEACCESSKEY", - "secretkey": "SUPERSECRET", - "host": nil, - "port": 42, - }, - }, - Auth: Auth{ - "silly": Parameters{ - "realm": "silly", - "service": "silly", - }, - }, - Reporting: Reporting{ - Bugsnag: BugsnagReporting{ - APIKey: "BugsnagApiKey", - }, - }, - Notifications: Notifications{ - Endpoints: []Endpoint{ - { - Name: "endpoint-1", - URL: "http://example.com", - Headers: http.Header{ - "Authorization": []string{"Bearer "}, - }, - }, - }, - }, - HTTP: struct { - Addr string `yaml:"addr,omitempty"` - Prefix string `yaml:"prefix,omitempty"` - Secret string `yaml:"secret,omitempty"` - TLS struct { - Certificate string `yaml:"certificate,omitempty"` - Key string `yaml:"key,omitempty"` - ClientCAs []string `yaml:"clientcas,omitempty"` - } `yaml:"tls,omitempty"` - Debug struct { - Addr string `yaml:"addr,omitempty"` - } `yaml:"debug,omitempty"` - }{ - TLS: struct { - Certificate string `yaml:"certificate,omitempty"` - Key string `yaml:"key,omitempty"` - ClientCAs []string `yaml:"clientcas,omitempty"` - }{ - ClientCAs: []string{"/path/to/ca.pem"}, - }, - }, -} - -// configYamlV0_1 is a Version 0.1 yaml document representing configStruct -var configYamlV0_1 = ` -version: 0.1 -log: - fields: - environment: test -loglevel: info -storage: - s3: - region: us-east-1 - bucket: my-bucket - rootdirectory: /registry - encrypt: true - secure: false - accesskey: SAMPLEACCESSKEY - secretkey: SUPERSECRET - host: ~ - port: 42 -auth: - silly: - realm: silly - service: silly -notifications: - endpoints: - - name: endpoint-1 - url: http://example.com - headers: - Authorization: [Bearer ] -reporting: - bugsnag: - apikey: BugsnagApiKey -http: - clientcas: - - /path/to/ca.pem -` - -// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory -// storage driver with no parameters -var inmemoryConfigYamlV0_1 = ` -version: 0.1 -loglevel: info -storage: inmemory -auth: - silly: - realm: silly - service: silly -notifications: - endpoints: - - name: endpoint-1 - url: http://example.com - headers: - Authorization: [Bearer ] -` - -type ConfigSuite struct { - expectedConfig *Configuration -} - -var _ = Suite(new(ConfigSuite)) - -func (suite *ConfigSuite) SetUpTest(c *C) { - os.Clearenv() - suite.expectedConfig = copyConfig(configStruct) -} - -// TestMarshalRoundtrip validates that configStruct can be marshaled and -// unmarshaled without changing any parameters -func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { - configBytes, err := yaml.Marshal(suite.expectedConfig) - c.Assert(err, IsNil) - config, err := Parse(bytes.NewReader(configBytes)) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseSimple validates that configYamlV0_1 can be parsed into a struct -// matching configStruct -func (suite *ConfigSuite) TestParseSimple(c *C) { - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseInmemory validates that configuration yaml with storage provided as -// a string can be parsed into a Configuration struct with no storage parameters -func (suite *ConfigSuite) TestParseInmemory(c *C) { - suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} - suite.expectedConfig.Reporting = Reporting{} - suite.expectedConfig.Log.Fields = nil - - config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseIncomplete validates that an incomplete yaml configuration cannot -// be parsed without providing environment variables to fill in the missing -// components. -func (suite *ConfigSuite) TestParseIncomplete(c *C) { - incompleteConfigYaml := "version: 0.1" - _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) - c.Assert(err, NotNil) - - suite.expectedConfig.Log.Fields = nil - suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} - suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} - suite.expectedConfig.Reporting = Reporting{} - suite.expectedConfig.Notifications = Notifications{} - - os.Setenv("REGISTRY_STORAGE", "filesystem") - os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - os.Setenv("REGISTRY_AUTH", "silly") - os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") - - config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithSameEnvStorage validates that providing environment variables -// that match the given storage type will only include environment-defined -// parameters and remove yaml-defined parameters -func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { - suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} - - os.Setenv("REGISTRY_STORAGE", "s3") - os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change -// and add to the given storage parameters will change and add parameters to the parsed -// Configuration struct -func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { - suite.expectedConfig.Storage.setParameter("region", "us-west-1") - suite.expectedConfig.Storage.setParameter("secure", true) - suite.expectedConfig.Storage.setParameter("newparam", "some Value") - - os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") - os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") - os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvStorageType validates that providing an environment variable that -// changes the storage type will be reflected in the parsed Configuration struct -func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { - suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} - - os.Setenv("REGISTRY_STORAGE", "inmemory") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithExtraneousEnvStorageParams validates that environment variables -// that change parameters out of the scope of the specified storage type are -// ignored. -func (suite *ConfigSuite) TestParseWithExtraneousEnvStorageParams(c *C) { - os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable -// that changes the storage type will be reflected in the parsed Configuration struct and that -// environment storage parameters will also be included -func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { - suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} - suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") - - os.Setenv("REGISTRY_STORAGE", "filesystem") - os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log -// level to the same as the one provided in the yaml will not change the parsed Configuration struct -func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { - os.Setenv("REGISTRY_LOGLEVEL", "info") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the -// log level will override the value provided in the yaml document -func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { - suite.expectedConfig.Loglevel = "error" - - os.Setenv("REGISTRY_LOGLEVEL", "error") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseInvalidLoglevel validates that the parser will fail to parse a -// configuration if the loglevel is malformed -func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { - invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" - _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) - c.Assert(err, NotNil) - - os.Setenv("REGISTRY_LOGLEVEL", "derp") - - _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, NotNil) - -} - -// TestParseWithDifferentEnvReporting validates that environment variables -// properly override reporting parameters -func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { - suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" - suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" - suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" - suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" - - os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") - os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") - os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") - os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration -// version than the CurrentVersion -func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { - suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) - configBytes, err := yaml.Marshal(suite.expectedConfig) - c.Assert(err, IsNil) - _, err = Parse(bytes.NewReader(configBytes)) - c.Assert(err, NotNil) -} - -func copyConfig(config Configuration) *Configuration { - configCopy := new(Configuration) - - configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) - configCopy.Loglevel = config.Loglevel - configCopy.Log = config.Log - configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) - for k, v := range config.Log.Fields { - configCopy.Log.Fields[k] = v - } - - configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} - for k, v := range config.Storage.Parameters() { - configCopy.Storage.setParameter(k, v) - } - configCopy.Reporting = Reporting{ - Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, - NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, - } - - configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} - for k, v := range config.Auth.Parameters() { - configCopy.Auth.setParameter(k, v) - } - - configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} - for _, v := range config.Notifications.Endpoints { - configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) - } - - return configCopy -} - -blob -mark :29 -data 5787 -package configuration - -import ( - "fmt" - "os" - "reflect" - "regexp" - "strconv" - "strings" - - "gopkg.in/yaml.v2" -) - -// Version is a major/minor version pair of the form Major.Minor -// Major version upgrades indicate structure or type changes -// Minor version upgrades should be strictly additive -type Version string - -// MajorMinorVersion constructs a Version from its Major and Minor components -func MajorMinorVersion(major, minor uint) Version { - return Version(fmt.Sprintf("%d.%d", major, minor)) -} - -func (version Version) major() (uint, error) { - majorPart := strings.Split(string(version), ".")[0] - major, err := strconv.ParseUint(majorPart, 10, 0) - return uint(major), err -} - -// Major returns the major version portion of a Version -func (version Version) Major() uint { - major, _ := version.major() - return major -} - -func (version Version) minor() (uint, error) { - minorPart := strings.Split(string(version), ".")[1] - minor, err := strconv.ParseUint(minorPart, 10, 0) - return uint(minor), err -} - -// Minor returns the minor version portion of a Version -func (version Version) Minor() uint { - minor, _ := version.minor() - return minor -} - -// VersionedParseInfo defines how a specific version of a configuration should -// be parsed into the current version -type VersionedParseInfo struct { - // Version is the version which this parsing information relates to - Version Version - // ParseAs defines the type which a configuration file of this version - // should be parsed into - ParseAs reflect.Type - // ConversionFunc defines a method for converting the parsed configuration - // (of type ParseAs) into the current configuration version - // Note: this method signature is very unclear with the absence of generics - ConversionFunc func(interface{}) (interface{}, error) -} - -// Parser can be used to parse a configuration file and environment of a defined -// version into a unified output structure -type Parser struct { - prefix string - mapping map[Version]VersionedParseInfo - env map[string]string -} - -// NewParser returns a *Parser with the given environment prefix which handles -// versioned configurations which match the given parseInfos -func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { - p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo), env: make(map[string]string)} - - for _, parseInfo := range parseInfos { - p.mapping[parseInfo.Version] = parseInfo - } - - for _, env := range os.Environ() { - envParts := strings.SplitN(env, "=", 2) - p.env[envParts[0]] = envParts[1] - } - - return &p -} - -// Parse reads in the given []byte and environment and writes the resulting -// configuration into the input v -// -// Environment variables may be used to override configuration parameters other -// than version, following the scheme below: -// v.Abc may be replaced by the value of PREFIX_ABC, -// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth -func (p *Parser) Parse(in []byte, v interface{}) error { - var versionedStruct struct { - Version Version - } - - if err := yaml.Unmarshal(in, &versionedStruct); err != nil { - return err - } - - parseInfo, ok := p.mapping[versionedStruct.Version] - if !ok { - return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) - } - - parseAs := reflect.New(parseInfo.ParseAs) - err := yaml.Unmarshal(in, parseAs.Interface()) - if err != nil { - return err - } - - err = p.overwriteFields(parseAs, p.prefix) - if err != nil { - return err - } - - c, err := parseInfo.ConversionFunc(parseAs.Interface()) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) - return nil -} - -func (p *Parser) overwriteFields(v reflect.Value, prefix string) error { - for v.Kind() == reflect.Ptr { - v = reflect.Indirect(v) - } - switch v.Kind() { - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - sf := v.Type().Field(i) - fieldPrefix := strings.ToUpper(prefix + "_" + sf.Name) - if e, ok := p.env[fieldPrefix]; ok { - fieldVal := reflect.New(sf.Type) - err := yaml.Unmarshal([]byte(e), fieldVal.Interface()) - if err != nil { - return err - } - v.Field(i).Set(reflect.Indirect(fieldVal)) - } - err := p.overwriteFields(v.Field(i), fieldPrefix) - if err != nil { - return err - } - } - case reflect.Map: - p.overwriteMap(v, prefix) - } - return nil -} - -func (p *Parser) overwriteMap(m reflect.Value, prefix string) error { - switch m.Type().Elem().Kind() { - case reflect.Struct: - for _, k := range m.MapKeys() { - err := p.overwriteFields(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) - if err != nil { - return err - } - } - envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) - if err != nil { - return err - } - for key, val := range p.env { - if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { - mapValue := reflect.New(m.Type().Elem()) - err := yaml.Unmarshal([]byte(val), mapValue.Interface()) - if err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) - } - } - case reflect.Map: - for _, k := range m.MapKeys() { - err := p.overwriteMap(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) - if err != nil { - return err - } - } - default: - envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) - if err != nil { - return err - } - - for key, val := range p.env { - if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { - mapValue := reflect.New(m.Type().Elem()) - err := yaml.Unmarshal([]byte(val), mapValue.Interface()) - if err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) - } - } - } - return nil -} - -blob -mark :30 -data 1928 -package context - -import ( - "code.google.com/p/go-uuid/uuid" - "golang.org/x/net/context" -) - -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - -// instanceContext is a context that provides only an instance id. It is -// provided as the main background context. -type instanceContext struct { - Context - id string // id of context, logged as "instance.id" -} - -func (ic *instanceContext) Value(key interface{}) interface{} { - if key == "instance.id" { - return ic.id - } - - return ic.Context.Value(key) -} - -var background = &instanceContext{ - Context: context.Background(), - id: uuid.New(), -} - -// Background returns a non-nil, empty Context. The background context -// provides a single key, "instance.id" that is globally unique to the -// process. -func Background() Context { - return background -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - -// stringMapContext is a simple context implementation that checks a map for a -// key, falling back to a parent if not present. -type stringMapContext struct { - context.Context - m map[string]interface{} -} - -// WithValues returns a context that proxies lookups through a map. Only -// supports string keys. -func WithValues(ctx context.Context, m map[string]interface{}) context.Context { - mo := make(map[string]interface{}, len(m)) // make our own copy. - for k, v := range m { - mo[k] = v - } - - return stringMapContext{ - Context: ctx, - m: mo, - } -} - -func (smc stringMapContext) Value(key interface{}) interface{} { - if ks, ok := key.(string); ok { - if v, ok := smc.m[ks]; ok { - return v - } - } - - return smc.Context.Value(key) -} - -blob -mark :31 -data 3483 -// Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevent request information but this package is not limited to -// that purpose. -// -// Logging -// -// The most useful aspect of this package is GetLogger. This function takes -// any context.Context interface and returns the current logger from the -// context. Canonical usage looks like this: -// -// GetLogger(ctx).Infof("something interesting happened") -// -// GetLogger also takes optional key arguments. The keys will be looked up in -// the context and reported with the logger. The following example would -// return a logger that prints the version with each log message: -// -// ctx := context.Context(context.Background(), "version", version) -// GetLogger(ctx, "version").Infof("this log message has a version field") -// -// The above would print out a log message like this: -// -// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m -// -// When used with WithLogger, we gain the ability to decorate the context with -// loggers that have information from disparate parts of the call stack. -// Following from the version example, we can build a new context with the -// configured logger such that we always print the version field: -// -// ctx = WithLogger(ctx, GetLogger(ctx, "version")) -// -// Since the logger has been pushed to the context, we can now get the version -// field for free with our log messages. Future calls to GetLogger on the new -// context will have the version field: -// -// GetLogger(ctx).Infof("this log message has a version field") -// -// This becomes more powerful when we start stacking loggers. Let's say we -// have the version logger from above but also want a request id. Using the -// context above, in our request scoped function, we place another logger in -// the context: -// -// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context -// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) -// -// When GetLogger is called on the new context, "http.request.id" will be -// included as a logger field, along with the original "version" field: -// -// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m -// -// Note that this only affects the new context, the previous context, with the -// version field, can be used independently. Put another way, the new logger, -// added to the request context, is unique to that context and can have -// request scoped varaibles. -// -// HTTP Requests -// -// This package also contains several methods for working with http requests. -// The concepts are very similar to those described above. We simply place the -// request in the context using WithRequest. This makes the request variables -// available. GetRequestLogger can then be called to get request specific -// variables in a log line: -// -// ctx = WithRequest(ctx, req) -// GetRequestLogger(ctx).Infof("request variables") -// -// Like above, if we want to include the request data in all log messages in -// the context, we push the logger to a new context and use that one: -// -// ctx = WithLogger(ctx, GetRequestLogger(ctx)) -// -// The concept is fairly powerful and ensures that calls throughout the stack -// can be traced in log messages. Using the fields like "http.request.id", one -// can analyze call flow for a particular request with a simple grep of the -// logs. -package context - -blob -mark :32 -data 8396 -package context - -import ( - "errors" - "net" - "net/http" - "strings" - "sync" - "time" - - "code.google.com/p/go-uuid/uuid" - log "github.com/Sirupsen/logrus" - "github.com/gorilla/mux" -) - -// Common errors used with this package. -var ( - ErrNoRequestContext = errors.New("no http request in context") - ErrNoResponseWriterContext = errors.New("no http response in context") -) - -func parseIP(ipStr string) net.IP { - ip := net.ParseIP(ipStr) - if ip == nil { - log.Warnf("invalid remote IP address: %q", ipStr) - } - return ip -} - -// RemoteAddr extracts the remote address of the request, taking into -// account proxy headers. -func RemoteAddr(r *http.Request) string { - if prior := r.Header.Get("X-Forwarded-For"); prior != "" { - proxies := strings.Split(prior, ",") - if len(proxies) > 0 { - remoteAddr := strings.Trim(proxies[0], " ") - if parseIP(remoteAddr) != nil { - return remoteAddr - } - } - } - // X-Real-Ip is less supported, but worth checking in the - // absence of X-Forwarded-For - if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { - if parseIP(realIP) != nil { - return realIP - } - } - - return r.RemoteAddr -} - -// RemoteIP extracts the remote IP of the request, taking into -// account proxy headers. -func RemoteIP(r *http.Request) string { - addr := RemoteAddr(r) - - // Try parsing it as "IP:port" - if ip, _, err := net.SplitHostPort(addr); err == nil { - return ip - } - - return addr -} - -// WithRequest places the request on the context. The context of the request -// is assigned a unique id, available at "http.request.id". The request itself -// is available at "http.request". Other common attributes are available under -// the prefix "http.request.". If a request is already present on the context, -// this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { - if ctx.Value("http.request") != nil { - // NOTE(stevvooe): This needs to be considered a programming error. It - // is unlikely that we'd want to have more than one request in - // context. - panic("only one request per context") - } - - return &httpRequestContext{ - Context: ctx, - startedAt: time.Now(), - id: uuid.New(), // assign the request a unique. - r: r, - } -} - -// GetRequest returns the http request in the given context. Returns -// ErrNoRequestContext if the context does not have an http request associated -// with it. -func GetRequest(ctx Context) (*http.Request, error) { - if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { - return r, nil - } - return nil, ErrNoRequestContext -} - -// GetRequestID attempts to resolve the current request id, if possible. An -// error is return if it is not available on the context. -func GetRequestID(ctx Context) string { - return GetStringValue(ctx, "http.request.id") -} - -// WithResponseWriter returns a new context and response writer that makes -// interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - irw := &instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - } - - return irw, irw -} - -// GetResponseWriter returns the http.ResponseWriter from the provided -// context. If not present, ErrNoResponseWriterContext is returned. The -// returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { - v := ctx.Value("http.response") - - rw, ok := v.(http.ResponseWriter) - if !ok || rw == nil { - return nil, ErrNoResponseWriterContext - } - - return rw, nil -} - -// getVarsFromRequest let's us change request vars implementation for testing -// and maybe future changes. -var getVarsFromRequest = mux.Vars - -// WithVars extracts gorilla/mux vars and makes them available on the returned -// context. Variables are available at keys with the prefix "vars.". For -// example, if looking for the variable "name", it can be accessed as -// "vars.name". Implementations that are accessing values need not know that -// the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { - return &muxVarsContext{ - Context: ctx, - vars: getVarsFromRequest(r), - } -} - -// GetRequestLogger returns a logger that contains fields from the request in -// the current context. If the request is not available in the context, no -// fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { - return GetLogger(ctx, - "http.request.id", - "http.request.method", - "http.request.host", - "http.request.uri", - "http.request.referer", - "http.request.useragent", - "http.request.remoteaddr", - "http.request.contenttype") -} - -// GetResponseLogger reads the current response stats and builds a logger. -// Because the values are read at call time, pushing a logger returned from -// this function on the context will lead to missing or invalid data. Only -// call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { - l := getLogrusLogger(ctx, - "http.response.written", - "http.response.status", - "http.response.contenttype") - - duration := Since(ctx, "http.request.startedat") - - if duration > 0 { - l = l.WithField("http.response.duration", duration.String()) - } - - return l -} - -// httpRequestContext makes information about a request available to context. -type httpRequestContext struct { - Context - - startedAt time.Time - id string - r *http.Request -} - -// Value returns a keyed element of the request for use in the context. To get -// the request itself, query "request". For other components, access them as -// "request.". For example, r.RequestURI -func (ctx *httpRequestContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.request" { - return ctx.r - } - - if !strings.HasPrefix(keyStr, "http.request.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - switch parts[2] { - case "uri": - return ctx.r.RequestURI - case "remoteaddr": - return RemoteAddr(ctx.r) - case "method": - return ctx.r.Method - case "host": - return ctx.r.Host - case "referer": - referer := ctx.r.Referer() - if referer != "" { - return referer - } - case "useragent": - return ctx.r.UserAgent() - case "id": - return ctx.id - case "startedat": - return ctx.startedAt - case "contenttype": - ct := ctx.r.Header.Get("Content-Type") - if ct != "" { - return ct - } - } - } - -fallback: - return ctx.Context.Value(key) -} - -type muxVarsContext struct { - Context - vars map[string]string -} - -func (ctx *muxVarsContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "vars" { - return ctx.vars - } - - if strings.HasPrefix(keyStr, "vars.") { - keyStr = strings.TrimPrefix(keyStr, "vars.") - } - - if v, ok := ctx.vars[keyStr]; ok { - return v - } - } - - return ctx.Context.Value(key) -} - -// instrumentedResponseWriter provides response writer information in a -// context. -type instrumentedResponseWriter struct { - http.ResponseWriter - Context - - mu sync.Mutex - status int - written int64 -} - -func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { - n, err = irw.ResponseWriter.Write(p) - - irw.mu.Lock() - irw.written += int64(n) - - // Guess the likely status if not set. - if irw.status == 0 { - irw.status = http.StatusOK - } - - irw.mu.Unlock() - - return -} - -func (irw *instrumentedResponseWriter) WriteHeader(status int) { - irw.ResponseWriter.WriteHeader(status) - - irw.mu.Lock() - irw.status = status - irw.mu.Unlock() -} - -func (irw *instrumentedResponseWriter) Flush() { - if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw.ResponseWriter - } - - if !strings.HasPrefix(keyStr, "http.response.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - irw.mu.Lock() - defer irw.mu.Unlock() - - switch parts[2] { - case "written": - return irw.written - case "status": - if irw.status != 0 { - return irw.status - } - case "contenttype": - contentType := irw.Header().Get("Content-Type") - if contentType != "" { - return contentType - } - } - } - -fallback: - return irw.Context.Value(key) -} - -blob -mark :33 -data 6090 -package context - -import ( - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "reflect" - "testing" - "time" -) - -func TestWithRequest(t *testing.T) { - var req http.Request - - start := time.Now() - req.Method = "GET" - req.Host = "example.com" - req.RequestURI = "/test-test" - req.Header = make(http.Header) - req.Header.Set("Referer", "foo.com/referer") - req.Header.Set("User-Agent", "test/0.1") - - ctx := WithRequest(Background(), &req) - for _, testcase := range []struct { - key string - expected interface{} - }{ - { - key: "http.request", - expected: &req, - }, - { - key: "http.request.id", - }, - { - key: "http.request.method", - expected: req.Method, - }, - { - key: "http.request.host", - expected: req.Host, - }, - { - key: "http.request.uri", - expected: req.RequestURI, - }, - { - key: "http.request.referer", - expected: req.Referer(), - }, - { - key: "http.request.useragent", - expected: req.UserAgent(), - }, - { - key: "http.request.remoteaddr", - expected: req.RemoteAddr, - }, - { - key: "http.request.startedat", - }, - } { - v := ctx.Value(testcase.key) - - if v == nil { - t.Fatalf("value not found for %q", testcase.key) - } - - if testcase.expected != nil && v != testcase.expected { - t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) - } - - // Key specific checks! - switch testcase.key { - case "http.request.id": - if _, ok := v.(string); !ok { - t.Fatalf("request id not a string: %v", v) - } - case "http.request.startedat": - vt, ok := v.(time.Time) - if !ok { - t.Fatalf("value not a time: %v", v) - } - - now := time.Now() - if vt.After(now) { - t.Fatalf("time generated too late: %v > %v", vt, now) - } - - if vt.Before(start) { - t.Fatalf("time generated too early: %v < %v", vt, start) - } - } - } -} - -type testResponseWriter struct { - flushed bool - status int - written int64 - header http.Header -} - -func (trw *testResponseWriter) Header() http.Header { - if trw.header == nil { - trw.header = make(http.Header) - } - - return trw.header -} - -func (trw *testResponseWriter) Write(p []byte) (n int, err error) { - if trw.status == 0 { - trw.status = http.StatusOK - } - - n = len(p) - trw.written += int64(n) - return -} - -func (trw *testResponseWriter) WriteHeader(status int) { - trw.status = status -} - -func (trw *testResponseWriter) Flush() { - trw.flushed = true -} - -func TestWithResponseWriter(t *testing.T) { - trw := testResponseWriter{} - ctx, rw := WithResponseWriter(Background(), &trw) - - if ctx.Value("http.response") != &trw { - t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), &trw) - } - - if n, err := rw.Write(make([]byte, 1024)); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } else if n != 1024 { - t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) - } - - if ctx.Value("http.response.status") != http.StatusOK { - t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) - } - - if ctx.Value("http.response.written") != int64(1024) { - t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) - } - - // Make sure flush propagates - rw.(http.Flusher).Flush() - - if !trw.flushed { - t.Fatalf("response writer not flushed") - } - - // Write another status and make sure context is correct. This normally - // wouldn't work except for in this contrived testcase. - rw.WriteHeader(http.StatusBadRequest) - - if ctx.Value("http.response.status") != http.StatusBadRequest { - t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) - } -} - -func TestWithVars(t *testing.T) { - var req http.Request - vars := map[string]string{ - "foo": "asdf", - "bar": "qwer", - } - - getVarsFromRequest = func(r *http.Request) map[string]string { - if r != &req { - t.Fatalf("unexpected request: %v != %v", r, req) - } - - return vars - } - - ctx := WithVars(Background(), &req) - for _, testcase := range []struct { - key string - expected interface{} - }{ - { - key: "vars", - expected: vars, - }, - { - key: "vars.foo", - expected: "asdf", - }, - { - key: "vars.bar", - expected: "qwer", - }, - } { - v := ctx.Value(testcase.key) - - if !reflect.DeepEqual(v, testcase.expected) { - t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) - } - } -} - -// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test -// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten -// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header -// just contains the IP address, it is different enough for testing. -func TestRemoteAddr(t *testing.T) { - var expectedRemote string - backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - if r.RemoteAddr == expectedRemote { - t.Errorf("Unexpected matching remote addresses") - } - - actualRemote := RemoteAddr(r) - if expectedRemote != actualRemote { - t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) - } - - w.WriteHeader(200) - })) - - defer backend.Close() - backendURL, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - - proxy := httputil.NewSingleHostReverseProxy(backendURL) - frontend := httptest.NewServer(proxy) - defer frontend.Close() - - // X-Forwarded-For set by proxy - expectedRemote = "127.0.0.1" - proxyReq, err := http.NewRequest("GET", frontend.URL, nil) - if err != nil { - t.Fatal(err) - } - - _, err = http.DefaultClient.Do(proxyReq) - if err != nil { - t.Fatal(err) - } - - // RemoteAddr in X-Real-Ip - getReq, err := http.NewRequest("GET", backend.URL, nil) - if err != nil { - t.Fatal(err) - } - - expectedRemote = "1.2.3.4" - getReq.Header["X-Real-ip"] = []string{expectedRemote} - _, err = http.DefaultClient.Do(getReq) - if err != nil { - t.Fatal(err) - } - - // Valid X-Real-Ip and invalid X-Forwarded-For - getReq.Header["X-forwarded-for"] = []string{"1.2.3"} - _, err = http.DefaultClient.Do(getReq) - if err != nil { - t.Fatal(err) - } -} - -blob -mark :34 -data 3116 -package context - -import ( - "fmt" - - "github.com/Sirupsen/logrus" -) - -// Logger provides a leveled-logging interface. -type Logger interface { - // standard logger methods - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) - - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - - Panic(args ...interface{}) - Panicf(format string, args ...interface{}) - Panicln(args ...interface{}) - - // Leveled methods, from logrus - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) - Debugln(args ...interface{}) - - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Errorln(args ...interface{}) - - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Infoln(args ...interface{}) - - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Warnln(args ...interface{}) -} - -// WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) -} - -// GetLoggerWithField returns a logger instance with the specified field key -// and value without affecting the context. Extra specified keys will be -// resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) -} - -// GetLoggerWithFields returns a logger instance with the specified fields -// without affecting the context. Extra specified keys will be resolved from -// the context. -func GetLoggerWithFields(ctx Context, fields map[string]interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithFields(logrus.Fields(fields)) -} - -// GetLogger returns the logger from the current context, if present. If one -// or more keys are provided, they will be resolved on the context and -// included in the logger. While context.Value takes an interface, any key -// argument passed to GetLogger will be passed to fmt.Sprint when expanded as -// a logging key field. If context keys are integer constants, for example, -// its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...) -} - -// GetLogrusLogger returns the logrus logger for the context. If one more keys -// are provided, they will be resolved on the context and included in the -// logger. Only use this function if specific logrus functionality is -// required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { - var logger *logrus.Entry - - // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") - if loggerInterface != nil { - if lgr, ok := loggerInterface.(*logrus.Entry); ok { - logger = lgr - } - } - - if logger == nil { - // If no logger is found, just return the standard logger. - logger = logrus.NewEntry(logrus.StandardLogger()) - } - - fields := logrus.Fields{} - - for _, key := range keys { - v := ctx.Value(key) - if v != nil { - fields[fmt.Sprint(key)] = v - } - } - - return logger.WithFields(fields) -} - -blob -mark :35 -data 2850 -package context - -import ( - "runtime" - "time" - - "code.google.com/p/go-uuid/uuid" -) - -// WithTrace allocates a traced timing span in a new context. This allows a -// caller to track the time between calling WithTrace and the returned done -// function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapased time and a -// "trace.func" field, corresponding to the function that called WithTrace. -// -// The logging keys "trace.id" and "trace.parent.id" are provided to implement -// dapper-like tracing. This function should be complemented with a WithSpan -// method that could be used for tracing distributed RPC calls. -// -// The main benefit of this function is to post-process log messages or -// intercept them in a hook to provide timing data. Trace ids and parent ids -// can also be linked to provide call tracing, if so required. -// -// Here is an example of the usage: -// -// func timedOperation(ctx Context) { -// ctx, done := WithTrace(ctx) -// defer done("this will be the log message") -// // ... function body ... -// } -// -// If the function ran for roughly 1s, such a usage would emit a log message -// as follows: -// -// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... -// -// Notice that the function name is automatically resolved, along with the -// package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { - if ctx == nil { - ctx = Background() - } - - pc, file, line, _ := runtime.Caller(1) - f := runtime.FuncForPC(pc) - ctx = &traced{ - Context: ctx, - id: uuid.New(), - start: time.Now(), - parent: GetStringValue(ctx, "trace.id"), - fnname: f.Name(), - file: file, - line: line, - } - - return ctx, func(format string, a ...interface{}) { - GetLogger(ctx, "trace.duration", "trace.id", "trace.parent.id", - "trace.func", "trace.file", "trace.line"). - Infof(format, a...) // info may be too chatty. - } -} - -// traced represents a context that is traced for function call timing. It -// also provides fast lookup for the various attributes that are available on -// the trace. -type traced struct { - Context - id string - parent string - start time.Time - fnname string - file string - line int -} - -func (ts *traced) Value(key interface{}) interface{} { - switch key { - case "trace.start": - return ts.start - case "trace.duration": - return time.Since(ts.start) - case "trace.id": - return ts.id - case "trace.parent.id": - if ts.parent == "" { - return nil // must return nil to signal no parent. - } - - return ts.parent - case "trace.func": - return ts.fnname - case "trace.file": - return ts.file - case "trace.line": - return ts.line - } - - return ts.Context.Value(key) -} - -blob -mark :36 -data 1870 -package context - -import ( - "runtime" - "testing" - "time" -) - -// TestWithTrace ensures that tracing has the expected values in the context. -func TestWithTrace(t *testing.T) { - pc, file, _, _ := runtime.Caller(0) // get current caller. - f := runtime.FuncForPC(pc) - - base := []valueTestCase{ - { - key: "trace.id", - notnilorempty: true, - }, - - { - key: "trace.file", - expected: file, - notnilorempty: true, - }, - { - key: "trace.line", - notnilorempty: true, - }, - { - key: "trace.start", - notnilorempty: true, - }, - } - - ctx, done := WithTrace(Background()) - defer done("this will be emitted at end of test") - - checkContextForValues(t, ctx, append(base, valueTestCase{ - key: "trace.func", - expected: f.Name(), - })) - - traced := func() { - parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. - - pc, _, _, _ := runtime.Caller(0) // get current caller. - f := runtime.FuncForPC(pc) - ctx, done := WithTrace(ctx) - defer done("this should be subordinate to the other trace") - time.Sleep(time.Second) - checkContextForValues(t, ctx, append(base, valueTestCase{ - key: "trace.func", - expected: f.Name(), - }, valueTestCase{ - key: "trace.parent.id", - expected: parentID, - })) - } - traced() - - time.Sleep(time.Second) -} - -type valueTestCase struct { - key string - expected interface{} - notnilorempty bool // just check not empty/not nil -} - -func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { - - for _, testcase := range values { - v := ctx.Value(testcase.key) - if testcase.notnilorempty { - if v == nil || v == "" { - t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) - } - continue - } - - if v != testcase.expected { - t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) - } - } -} - -blob -mark :37 -data 789 -package context - -import ( - "time" -) - -// Since looks up key, which should be a time.Time, and returns the duration -// since that time. If the key is not found, the value returned will be zero. -// This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { - startedAtI := ctx.Value(key) - if startedAtI != nil { - if startedAt, ok := startedAtI.(time.Time); ok { - return time.Since(startedAt) - } - } - - return 0 -} - -// GetStringValue returns a string value from the context. The empty string -// will be returned if not found. -func GetStringValue(ctx Context, key string) (value string) { - stringi := ctx.Value(key) - if stringi != nil { - if valuev, ok := stringi.(string); ok { - value = valuev - } - } - - return value -} - -blob -mark :38 -data 5022 -# Docker Compose V1 + V2 registry - -This compose configuration configures a `v1` and `v2` registry behind an `nginx` -proxy. By default, you can access the combined registry at `localhost:5000`. - -The configuration does not support pushing images to `v2` and pulling from `v1`. -If a `docker` client has a version less than 1.6, Nginx will route its requests -to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. - -### Install Docker Compose - -1. Open a new terminal on the host with your `distribution` source. - -2. Get the `docker-compose` binary. - - $ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose - - This command installs the binary in the `/usr/local/bin` directory. - -3. Add executable permissions to the binary. - - $ sudo chmod +x /usr/local/bin/docker-compose - -## Build and run with Compose - -1. In your terminal, navigate to the `distribution/contrib/compose` directory - - This directory includes a single `docker-compose.yml` configuration. - - nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 - registryv1: - image: registry - ports: - - "5000" - registryv2: - build: "../../" - ports: - - "5000" - - This configuration builds a new `nginx` image as specified by the - `nginx/Dockerfile` file. The 1.0 registry comes from Docker's official - public image. Finally, the registry 2.0 image is built from the - `distribution/Dockerfile` you've used previously. - -2. Get a registry 1.0 image. - - $ docker pull registry:0.9.1 - - The Compose configuration looks for this image locally. If you don't do this - step, later steps can fail. - -3. Build `nginx`, the registry 2.0 image, and - - $ docker-compose build - registryv1 uses an image, skipping - Building registryv2... - Step 0 : FROM golang:1.4 - - ... - - Removing intermediate container 9f5f5068c3f3 - Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf - ---> 74acc70fa106 - Removing intermediate container edb84c2b40cb - Successfully built 74acc70fa106 - - The commmand outputs its progress until it completes. - -4. Start your configuration with compose. - - $ docker-compose up - Recreating compose_registryv1_1... - Recreating compose_registryv2_1... - Recreating compose_nginx_1... - Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 - ... - - -5. In another terminal, display the running configuration. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 - 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 - aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 - -### Explore a bit - -1. Check for TLS on your `nginx` server. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -2. Tag the `v1` registry image. - - $ docker tag registry:latest localhost:5000/registry_one:latest - -2. Push it to the localhost. - - $ docker push localhost:5000/registry_one:latest - - If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. - -4. Use `curl` to list the image in the registry. - - $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 32777 (#0) - > GET /v2/registry1/tags/list HTTP/1.1 - > User-Agent: curl/7.36.0 - > Host: localhost:32777 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Tue, 14 Apr 2015 22:34:13 GMT - < Content-Length: 39 - < - {"name":"registry1","tags":["latest"]} - * Connection #0 to host localhost left intact - - This example refers to the specific port assigned to the 2.0 registry. You saw - this port earlier, when you used `docker ps` to show your running containers. - - - -blob -mark :39 -data 220 -nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" - -blob -mark :40 -data 227 -FROM nginx:1.7 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf - -blob -mark :41 -data 376 -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; - -blob -mark :42 -data 472 -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 -proxy_read_timeout 900; - -blob -mark :43 -data 599 -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - - -blob -mark :44 -data 857 -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - - -blob -mark :45 -data 4349 -package digest - -import ( - "bytes" - "fmt" - "hash" - "io" - "io/ioutil" - "regexp" - "strings" - - "github.com/docker/docker/pkg/tarsum" -) - -const ( - // DigestTarSumV1EmptyTar is the digest for the empty tar file. - DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// More important for this code base, this type is compatible with tarsum -// digests. For example, the following would be a valid Digest: -// -// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg string, h hash.Hash) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) -} - -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. -func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// ParseDigest parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { - d := Digest(s) - - return d, d.Validate() -} - -// FromReader returns the most valid digest for the underlying content. -func FromReader(rd io.Reader) (Digest, error) { - digester := NewCanonicalDigester() - - if _, err := io.Copy(digester, rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromTarArchive produces a tarsum digest from reader rd. -func FromTarArchive(rd io.Reader) (Digest, error) { - ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1) - if err != nil { - return "", err - } - - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - return "", err - } - - d, err := ParseDigest(ts.Sum(nil)) - if err != nil { - return "", err - } - - return d, nil -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) (Digest, error) { - return FromReader(bytes.NewReader(p)) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - // Common case will be tarsum - _, err := ParseTarSum(s) - if err == nil { - return nil - } - - // Continue on for general parser - - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - - i := strings.Index(s, ":") - if i < 0 { - return ErrDigestInvalidFormat - } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return ErrDigestInvalidFormat - } - - switch s[:i] { - case "sha256", "sha384", "sha512": - break - default: - return ErrDigestUnsupported - } - - return nil -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() string { - return string(d[:d.sepIndex()]) -} - -// Hex returns the hex digest portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Hex() string { - return string(d[d.sepIndex()+1:]) -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic("could not find ':' in digest: " + d) - } - - return i -} - -blob -mark :46 -data 3274 -package digest - -import ( - "bytes" - "io" - "testing" -) - -func TestParseDigest(t *testing.T) { - for _, testcase := range []struct { - input string - err error - algorithm string - hex string - }{ - { - input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "tarsum+sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "tarsum.dev+sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - algorithm: "tarsum.v1+sha256", - hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - }, - { - input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - algorithm: "sha384", - hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", - }, - { - // empty hex - input: "sha256:", - err: ErrDigestInvalidFormat, - }, - { - // just hex - input: "d41d8cd98f00b204e9800998ecf8427e", - err: ErrDigestInvalidFormat, - }, - { - // not hex - input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", - err: ErrDigestInvalidFormat, - }, - { - input: "foo:d41d8cd98f00b204e9800998ecf8427e", - err: ErrDigestUnsupported, - }, - } { - digest, err := ParseDigest(testcase.input) - if err != testcase.err { - t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) - } - - if testcase.err != nil { - continue - } - - if digest.Algorithm() != testcase.algorithm { - t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) - } - - if digest.Hex() != testcase.hex { - t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) - } - - // Parse string return value and check equality - newParsed, err := ParseDigest(digest.String()) - - if err != nil { - t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) - } - - if newParsed != digest { - t.Fatalf("expected equal: %q != %q", newParsed, digest) - } - } -} - -// A few test cases used to fix behavior we expect in storage backend. - -func TestFromTarArchiveZeroLength(t *testing.T) { - checkTarsumDigest(t, "zero-length archive", bytes.NewReader([]byte{}), DigestTarSumV1EmptyTar) -} - -func TestFromTarArchiveEmptyTar(t *testing.T) { - // String of 1024 zeros is a valid, empty tar file. - checkTarsumDigest(t, "1024 zero bytes", bytes.NewReader(bytes.Repeat([]byte("\x00"), 1024)), DigestTarSumV1EmptyTar) -} - -func checkTarsumDigest(t *testing.T, msg string, rd io.Reader, expected Digest) { - dgst, err := FromTarArchive(rd) - if err != nil { - t.Fatalf("unexpected error digesting %s: %v", msg, err) - } - - if dgst != expected { - t.Fatalf("unexpected digest for %s: %q != %q", msg, dgst, expected) - } -} - -blob -mark :47 -data 1396 -package digest - -import ( - "crypto/sha256" - "hash" -) - -// Digester calculates the digest of written data. It is functionally -// equivalent to hash.Hash but provides methods for returning the Digest type -// rather than raw bytes. -type Digester struct { - alg string - hash.Hash -} - -// NewDigester create a new Digester with the given hashing algorithm and instance -// of that algo's hasher. -func NewDigester(alg string, h hash.Hash) Digester { - return Digester{ - alg: alg, - Hash: h, - } -} - -// NewCanonicalDigester is a convenience function to create a new Digester with -// our default settings. -func NewCanonicalDigester() Digester { - return NewDigester("sha256", sha256.New()) -} - -// Digest returns the current digest for this digester. -func (d *Digester) Digest() Digest { - return NewDigest(d.alg, d.Hash) -} - -// ResumableHash is the common interface implemented by all resumable hash -// functions. -type ResumableHash interface { - // ResumableHash is a superset of hash.Hash - hash.Hash - // Len returns the number of bytes written to the Hash so far. - Len() uint64 - // State returns a snapshot of the state of the Hash. - State() ([]byte, error) - // Restore resets the Hash to the given state. - Restore(state []byte) error -} - -// ResumableDigester is a digester that can export its internal state and be -// restored from saved state. -type ResumableDigester interface { - ResumableHash - Digest() Digest -} - -blob -mark :48 -data 1317 -// +build !noresumabledigest - -package digest - -import ( - "fmt" - - "github.com/jlhawn/go-crypto" - // For ResumableHash - _ "github.com/jlhawn/go-crypto/sha256" // For Resumable SHA256 - _ "github.com/jlhawn/go-crypto/sha512" // For Resumable SHA384, SHA512 -) - -// resumableDigester implements ResumableDigester. -type resumableDigester struct { - alg string - crypto.ResumableHash -} - -var resumableHashAlgs = map[string]crypto.Hash{ - "sha256": crypto.SHA256, - "sha384": crypto.SHA384, - "sha512": crypto.SHA512, -} - -// NewResumableDigester creates a new ResumableDigester with the given hashing -// algorithm. -func NewResumableDigester(alg string) (ResumableDigester, error) { - hash, supported := resumableHashAlgs[alg] - if !supported { - return resumableDigester{}, fmt.Errorf("unsupported resumable hash algorithm: %s", alg) - } - - return resumableDigester{ - alg: alg, - ResumableHash: hash.New(), - }, nil -} - -// NewCanonicalResumableDigester creates a ResumableDigester using the default -// digest algorithm. -func NewCanonicalResumableDigester() ResumableDigester { - return resumableDigester{ - alg: "sha256", - ResumableHash: crypto.SHA256.New(), - } -} - -// Digest returns the current digest for this resumable digester. -func (d resumableDigester) Digest() Digest { - return NewDigest(d.alg, d.ResumableHash) -} - -blob -mark :49 -data 2176 -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with tarsums and -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". A tarsum example will be more illustrative of the use case -// involved in the registry: -// -// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b -// -// For this, we consider the algorithm to be "tarsum+sha256". Prudent -// applications will favor the ParseDigest function to verify the format over -// using simple type casts. However, a normal string can be cast as a digest -// with a simple type conversion: -// -// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b") -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest - -blob -mark :50 -data 2055 -package digest - -import ( - "fmt" - - "regexp" -) - -// TarSumRegexp defines a reguler expression to match tarsum identifiers. -var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+") - -// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with -// capture groups corresponding to each component. -var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)") - -// TarSumInfo contains information about a parsed tarsum. -type TarSumInfo struct { - // Version contains the version of the tarsum. - Version string - - // Algorithm contains the algorithm for the final digest - Algorithm string - - // Digest contains the hex-encoded digest. - Digest string -} - -// InvalidTarSumError provides informations about a TarSum that cannot be parsed -// by ParseTarSum. -type InvalidTarSumError string - -func (e InvalidTarSumError) Error() string { - return fmt.Sprintf("invalid tarsum: %q", string(e)) -} - -// ParseTarSum parses a tarsum string into its components of interest. For -// example, this method may receive the tarsum in the following format: -// -// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e -// -// The function will return the following: -// -// TarSumInfo{ -// Version: "v1", -// Algorithm: "sha256", -// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", -// } -// -func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) { - components := TarsumRegexpCapturing.FindStringSubmatch(tarSum) - - if len(components) != 1+TarsumRegexpCapturing.NumSubexp() { - return TarSumInfo{}, InvalidTarSumError(tarSum) - } - - return TarSumInfo{ - Version: components[3], - Algorithm: components[4], - Digest: components[5], - }, nil -} - -// String returns the valid, string representation of the tarsum info. -func (tsi TarSumInfo) String() string { - if tsi.Version == "" { - return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest) - } - - return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest) -} - -blob -mark :51 -data 1963 -package digest - -import ( - "reflect" - "testing" -) - -func TestParseTarSumComponents(t *testing.T) { - for _, testcase := range []struct { - input string - expected TarSumInfo - err error - }{ - { - input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - expected: TarSumInfo{ - Version: "v1", - Algorithm: "sha256", - Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - }, - }, - { - input: "", - err: InvalidTarSumError(""), - }, - { - input: "purejunk", - err: InvalidTarSumError("purejunk"), - }, - { - input: "tarsum.v23+test:12341234123412341effefefe", - expected: TarSumInfo{ - Version: "v23", - Algorithm: "test", - Digest: "12341234123412341effefefe", - }, - }, - - // The following test cases are ported from docker core - { - // Version 0 tarsum - input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - expected: TarSumInfo{ - Algorithm: "sha256", - Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - }, - { - // Dev version tarsum - input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - expected: TarSumInfo{ - Version: "dev", - Algorithm: "sha256", - Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - }, - } { - tsi, err := ParseTarSum(testcase.input) - if err != nil { - if testcase.err != nil && err == testcase.err { - continue // passes - } - - t.Fatalf("unexpected error parsing tarsum: %v", err) - } - - if testcase.err != nil { - t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err) - } - - if !reflect.DeepEqual(tsi, testcase.expected) { - t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected) - } - - if testcase.input != tsi.String() { - t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input) - } - } -} - -blob -mark :52 -data 2966 -package digest - -import ( - "crypto/sha256" - "crypto/sha512" - "hash" - "io" - "io/ioutil" - - "github.com/docker/docker/pkg/tarsum" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -// NewDigestVerifier returns a verifier that compares the written bytes -// against a passed in digest. -func NewDigestVerifier(d Digest) (Verifier, error) { - if err := d.Validate(); err != nil { - return nil, err - } - - alg := d.Algorithm() - switch alg { - case "sha256", "sha384", "sha512": - return hashVerifier{ - hash: newHash(alg), - digest: d, - }, nil - default: - // Assume we have a tarsum. - version, err := tarsum.GetVersionFromTarsum(string(d)) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - - // TODO(stevvooe): We may actually want to ban the earlier versions of - // tarsum. That decision may not be the place of the verifier. - - ts, err := tarsum.NewTarSum(pr, true, version) - if err != nil { - return nil, err - } - - // TODO(sday): Ick! A goroutine per digest verification? We'll have to - // get the tarsum library to export an io.Writer variant. - go func() { - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - pr.CloseWithError(err) - } else { - pr.Close() - } - }() - - return &tarsumVerifier{ - digest: d, - ts: ts, - pr: pr, - pw: pw, - }, nil - } -} - -// NewLengthVerifier returns a verifier that returns true when the number of -// read bytes equals the expected parameter. -func NewLengthVerifier(expected int64) Verifier { - return &lengthVerifier{ - expected: expected, - } -} - -type lengthVerifier struct { - expected int64 // expected bytes read - len int64 // bytes read -} - -func (lv *lengthVerifier) Write(p []byte) (n int, err error) { - n = len(p) - lv.len += int64(n) - return n, err -} - -func (lv *lengthVerifier) Verified() bool { - return lv.expected == lv.len -} - -func newHash(name string) hash.Hash { - switch name { - case "sha256": - return sha256.New() - case "sha384": - return sha512.New384() - case "sha512": - return sha512.New() - default: - panic("unsupport algorithm: " + name) - } -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} - -type tarsumVerifier struct { - digest Digest - ts tarsum.TarSum - pr *io.PipeReader - pw *io.PipeWriter -} - -func (tv *tarsumVerifier) Write(p []byte) (n int, err error) { - return tv.pw.Write(p) -} - -func (tv *tarsumVerifier) Verified() bool { - return tv.digest == Digest(tv.ts.Sum(nil)) -} - -blob -mark :53 -data 5511 -package digest - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "io" - "os" - "strings" - "testing" - - "github.com/docker/distribution/testutil" -) - -func TestDigestVerifier(t *testing.T) { - p := make([]byte, 1<<20) - rand.Read(p) - digest, err := FromBytes(p) - if err != nil { - t.Fatalf("unexpected error digesting bytes: %#v", err) - } - - verifier, err := NewDigestVerifier(digest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, bytes.NewReader(p)) - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } - - tf, tarSum, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating tarfile: %v", err) - } - - digest, err = FromTarArchive(tf) - if err != nil { - t.Fatalf("error digesting tarsum: %v", err) - } - - if digest.String() != tarSum { - t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum) - } - - expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size - tf.Seek(0, os.SEEK_SET) // seek back - - // This is the most relevant example for the registry application. It's - // effectively a read through pipeline, where the final sink is the digest - // verifier. - verifier, err = NewDigestVerifier(digest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - lengthVerifier := NewLengthVerifier(expectedSize) - rd := io.TeeReader(tf, lengthVerifier) - io.Copy(verifier, rd) - - if !lengthVerifier.Verified() { - t.Fatalf("verifier detected incorrect length") - } - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } -} - -// TestVerifierUnsupportedDigest ensures that unsupported digest validation is -// flowing through verifier creation. -func TestVerifierUnsupportedDigest(t *testing.T) { - unsupported := Digest("bean:0123456789abcdef") - - _, err := NewDigestVerifier(unsupported) - if err == nil { - t.Fatalf("expected error when creating verifier") - } - - if err != ErrDigestUnsupported { - t.Fatalf("incorrect error for unsupported digest: %v", err) - } -} - -// TestJunkNoDeadlock ensures that junk input into a digest verifier properly -// returns errors from the tarsum library. Specifically, we pass in a file -// with a "bad header" and should see the error from the io.Copy to verifier. -// This has been seen with gzipped tarfiles, mishandled by the tarsum package, -// but also on junk input, such as html. -func TestJunkNoDeadlock(t *testing.T) { - expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") - junk := bytes.Repeat([]byte{'a'}, 1024) - - verifier, err := NewDigestVerifier(expected) - if err != nil { - t.Fatalf("unexpected error creating verifier: %v", err) - } - - rd := bytes.NewReader(junk) - if _, err := io.Copy(verifier, rd); err == nil { - t.Fatalf("unexpected error verifying input data: %v", err) - } -} - -// TestBadTarNoDeadlock runs a tar with a "bad" tar header through digest -// verifier, ensuring that the verifier returns an error properly. -func TestBadTarNoDeadlock(t *testing.T) { - // TODO(stevvooe): This test is exposing a bug in tarsum where if we pass - // a gzipped tar file into tarsum, the library returns an error. This - // should actually work. When the tarsum package is fixed, this test will - // fail and we can remove this test or invert it. - - // This tarfile was causing deadlocks in verifiers due mishandled copy error. - // This is a gzipped tar, which we typically don't see but should handle. - // - // From https://registry-1.docker.io/v2/library/ubuntu/blobs/tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473 - const badTar = ` -H4sIAAAJbogA/0otSdZnoDEwMDAxMDc1BdJggE6D2YZGJobGBmbGRsZAdYYGBkZGDAqmtHYYCJQW -lyQWAZ1CqTnonhsiAAAAAP//AsV/YkEJTdMAGfFvZmA2Gv/0AAAAAAD//4LFf3F+aVFyarFeTmZx -CbXtAOVnMxMTXPFvbGpmjhb/xobmwPinSyCO8PgHAAAA///EVU9v2z4MvedTEMihl9a5/26/YTkU -yNKiTTDsKMt0rE0WDYmK628/ym7+bFmH2DksQACbIB/5+J7kObwiQsXc/LdYVGibLObRccw01Qv5 -19EZ7hbbZudVgWtiDFCSh4paYII4xOVxNgeHLXrYow+GXAAqgSuEQhzlTR5ZgtlsVmB+aKe8rswe -zzsOjwtoPGoTEGplHHhMCJqxSNUPwesbEGbzOXxR34VCHndQmjfhUKhEq/FURI0FqJKFR5q9NE5Z -qbaoBGoglAB+5TSK0sOh3c3UPkRKE25dEg8dDzzIWmqN2wG3BNY4qRL1VFFAoJJb5SXHU90n34nk -SUS8S0AeGwqGyXdZel1nn7KLGhPO0kDeluvN48ty9Q2269ft8/PTy2b5GfKuh9/2LBIWo6oz+N8G -uodmWLETg0mW4lMP4XYYCL4+rlawftpIO40SA+W6Yci9wRZE1MNOjmyGdhBQRy9OHpqOdOGh/wT7 -nZdOkHZ650uIK+WrVZdkgErJfnNEJysLnI5FSAj4xuiCQNpOIoNWmhyLByVHxEpLf3dkr+k9KMsV -xV0FhiVB21hgD3V5XwSqRdOmsUYr7oNtZXTVzyTHc2/kqokBy2ihRMVRTN+78goP5Ur/aMhz+KOJ -3h2UsK43kdwDo0Q9jfD7ie2RRur7MdpIrx1Z3X4j/Q1qCswN9r/EGCvXiUy0fI4xeSknnH/92T/+ -fgIAAP//GkWjYBSMXAAIAAD//2zZtzAAEgAA` - expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") - - verifier, err := NewDigestVerifier(expected) - if err != nil { - t.Fatalf("unexpected error creating verifier: %v", err) - } - - rd := base64.NewDecoder(base64.StdEncoding, strings.NewReader(badTar)) - - if _, err := io.Copy(verifier, rd); err == nil { - t.Fatalf("unexpected error verifying input data: %v", err) - } - - if verifier.Verified() { - // For now, we expect an error, since tarsum library cannot handle - // compressed tars (!!!). - t.Fatalf("no error received after invalid tar") - } -} - -// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for -// DigestVerifier. We should be tarsum/gzip limited for common cases but we -// want to verify this. -// -// The relevant benchmarks for comparison can be run with the following -// commands: -// -// go test -bench . crypto/sha1 -// go test -bench . github.com/docker/docker/pkg/tarsum -// - -blob -mark :54 -data 310 -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are availalbe in the -// README.md. -package distribution - -blob -mark :55 -data 852 -FROM docs/base:latest -MAINTAINER Mary (@moxiegirl) - -# to get the git info for this repo -COPY . /src - -# Reset the /docs dir so we can replace the theme meta with the new repo's git info -RUN git reset --hard - -# -# RUN git describe --match 'v[0-9]*' --dirty='.m' --always > /docs/VERSION -# The above line causes a floating point error in our tools -# -RUN grep "VERSION =" /src/version/version.go | sed 's/.*"\(.*\)".*/\1/' > /docs/VERSION -COPY docs/* /docs/sources/distribution/ -COPY docs/images/* /docs/sources/distribution/images/ -COPY docs/spec/* /docs/sources/distribution/spec/ -COPY docs/spec/auth/* /docs/sources/distribution/spec/auth/ -COPY docs/storage-drivers/* /docs/sources/distribution/storage-drivers/ -COPY docs/mkdocs.yml /docs/mkdocs-distribution.yml - - -# Then build everything together, ready for mkdocs -RUN /docs/build.sh - -blob -mark :56 -data 2256 -# Architecture - -## Design -**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. - -### Eventual Consistency - -> **NOTE:** This section belongs somewhere, perhaps in a design document. We -> are leaving this here so the information is not lost. - -Running the registry on eventually consistent backends has been part of the -design from the beginning. This section covers some of the approaches to -dealing with this reality. - -There are a few classes of issues that we need to worry about when -implementing something on top of the storage drivers: - -1. Read-After-Write consistency (see this [article on - s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). -2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). - -In reality, the registry must worry about these kinds of errors when doing the -following: - -1. Accepting data into a temporary upload file may not have latest data block - yet (read-after-write). -2. Moving uploaded data into its blob location (write-write race). -3. Modifying the "current" manifest for given tag (write-write race). -4. A whole slew of operations around deletes (read-after-write, delete-write - races, garbage collection, etc.). - -The backend path layout employs a few techniques to avoid these problems: - -1. Large writes are done to private upload directories. This alleviates most - of the corruption potential under multiple writers by avoiding multiple - writers. -2. Constraints in storage driver implementations, such as support for writing - after the end of a file to extend it. -3. Digest verification to avoid data corruption. -4. Manifest files are stored by digest and cannot change. -5. All other non-content files (links, hashes, etc.) are written as an atomic - unit. Anything that requires additions and deletions is broken out into - separate "files". Last writer still wins. - -Unfortunately, one must play this game when trying to build something like -this on top of eventually consistent storage systems. If we run into serious -problems, we can wrap the storagedrivers in a shared consistency layer but -that would increase complexity and hinder registry cluster performance. - -blob -mark :57 -data 4490 -page_title: Build the development environment -page_description: Explains how to build the distribution project -page_keywords: registry, service, images, repository - -# Build the development environment - -If a go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - -```sh -go get github.com/docker/distribution/cmd/registry -``` - -The above will install the source repository into the `GOPATH`. The `registry` -binary can then be run with the following: - -``` -$ $GOPATH/bin/registry -version -$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown -``` - -The registry can be run with the default config using the following -incantantation: - -``` -$ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config.yml -INFO[0000] endpoint local-8082 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] debug server listening localhost:5001 -``` - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - -``` -go get github.com/tools/godep github.com/golang/lint/golint -``` - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - -``` -$ GOPATH=`godep path`:$GOPATH make -+ clean -+ fmt -+ vet -+ lint -+ build -github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar -github.com/Sirupsen/logrus -github.com/docker/libtrust -... -github.com/yvasiyarov/gorelic -github.com/docker/distribution/registry/handlers -github.com/docker/distribution/cmd/registry -+ test -... -ok github.com/docker/distribution/digest 7.875s -ok github.com/docker/distribution/manifest 0.028s -ok github.com/docker/distribution/notifications 17.322s -? github.com/docker/distribution/registry [no test files] -ok github.com/docker/distribution/registry/api/v2 0.101s -? github.com/docker/distribution/registry/auth [no test files] -ok github.com/docker/distribution/registry/auth/silly 0.011s -... -+ /Users/sday/go/src/github.com/docker/distribution/bin/registry -+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template -+ /Users/sday/go/src/github.com/docker/distribution/bin/dist -+ binaries -``` - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - -```sh -$ ./bin/registry -version -./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m -``` - -### Developing - -The above approaches are helpful for small experimentation. If more complex -tasks are at hand, it is recommended to employ the full power of `godep`. - -The Makefile is designed to have its `GOPATH` defined externally. This allows -one to experiment with various development environment setups. This is -primarily useful when testing upstream bugfixes, by modifying local code. This -can be demonstrated using `godep` to migrate the `GOPATH` to use the specified -dependencies. The `GOPATH` can be migrated to the current package versions -declared in `Godeps` with the following command: - -```sh -godep restore -``` - -> **WARNING:** This command will checkout versions of the code specified in -> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is -> undesired, it is recommended to create a workspace devoted to work on the -> _Distribution_ project. - -With a successful run of the above command, one can now use `make` without -specifying the `GOPATH`: - -```sh -$ make -``` - -If that is successful, standard `go` commands, such as `go test` should work, -per package, without issue. - -blob -mark :58 -data 25172 -page_title: Configure a Registry -page_description: Explains how to deploy a registry service -page_keywords: registry, service, images, repository - - -# Registry Configuration Reference - -You configure a registry server using a YAML file. This page explains the -configuration options and the values they can take. You'll also find examples of -middleware and development environment configurations. - -## List of configuration options - -This section lists all the registry configuration options. Some options in -the list are mutually exclusive. So, make sure to read the detailed reference -information about each option that appears later in this page. - -```yaml -version: 0.1 -log: - level: debug - formatter: text - fields: - service: registry - environment: staging -loglevel: debug # deprecated: use "log" -storage: - filesystem: - rootdirectory: /tmp/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - cache: - layerinfo: inmemory -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 -redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` - -In some instances a configuration option is **optional** but it contains child -options marked as **required**. This indicates that you can omit the parent with -all its children. However, if the parent is included, you must also include all -the children marked **required**. - -## version - -```yaml -version: 0.1 -``` - -The `version` option is **required**. It specifies the configuration's version. -It is expected to remain a top-level field, to allow for a consistent version -check before parsing the remainder of the configuration file. - -## log - -The `log` subsection configures the behavior of the logging system. The logging -system outputs everything to stdout. You can adjust the granularity and format -with this configuration section. - -```yaml -log: - level: debug - formatter: text - fields: - service: registry - environment: staging -``` - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - level - - no - - Sets the sensitivity of logging output. Permitted values are - error, warn, info and - debug. The default is info. -
    - formatter - - no - - This selects the format of logging output. The format primarily affects how keyed - attributes for a log line are encoded. Options are text, json or - logstash. The default is text. -
    - fields - - no - - A map of field names to values. These are added to every log line for - the context. This is useful for identifying log messages source after - being mixed in other systems. -
    - - -## loglevel - -> **DEPRECATED:** Please use [log](#log) instead. - -```yaml -loglevel: debug -``` - -Permitted values are `error`, `warn`, `info` and `debug`. The default is -`info`. - -## storage - -```yaml -storage: - filesystem: - rootdirectory: /tmp/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - cache: - layerinfo: inmemory -``` - -The storage option is **required** and defines which storage backend is in use. -You must configure one backend; if you configure more, the registry returns an error. - -### cache - -Use the `cache` subsection to enable caching of data accessed in the storage -backend. Currently, the only available cache provides fast access to layer -metadata. This, if configured, uses the `layerinfo` field. - -You can set `layerinfo` field to `redis` or `inmemory`. The `redis` value uses -a Redis pool to cache layer metadata. The `inmemory` value uses an in memory -map. - -### filesystem - -The `filesystem` storage backend uses the local disk to store registry files. It -is ideal for development and may be appropriate for some small-scale production -applications. - -This backend has a single, required `rootdirectory` parameter. The parameter -specifies the absolute path to a directory. The registry stores all its data -here so make sure there is adequate space available. - -### azure - -This storage backend uses Microsoft's Azure Storage platform. - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - accountname - - yes - - Azure account name. -
    - accountkey - - yes - - Azure account key. -
    - container - - yes - - Name of the Azure container into which to store data. -
    - - - -### S3 - -This storage backend uses Amazon's Simple Storage Service (S3). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - accesskey - - yes - - Your AWS Access Key. -
    - secretkey - - yes - - Your AWS Secret Key. -
    - region - - yes - - The AWS region in which your bucket exists. For the moment, the Go AWS - library in use does not use the newer DNS based bucket routing. -
    - bucket - - yes - - The bucket name in which you want to store the registry's data. -
    - encrypt - - no - - Specifies whether the registry stores the image in encrypted format or - not. A boolean value. The default is false. -
    - secure - - no - - Indicates whether to use HTTPS instead of HTTP. A boolean value. The - default is false. -
    - v4auth - - no - - Indicates whether the registry uses Version 4 of AWS's authentication. - Generally, you should set this to true. By default, this is - false. -
    - chunksize - - no - - The S3 API requires multipart upload chunks to be at least 5MB. This value - should be a number that is larger than 5*1024*1024. -
    - rootdirectory - - no - - This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. -
    - - -## auth - -```yaml -auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle -``` - -The `auth` option is **optional** as there are use cases (i.e. a mirror that -only permits pulls) for which authentication may not be desired. There are -currently 2 possible auth providers, `silly` and `token`. You can configure only -one `auth` provider. - -### silly - -The `silly` auth is only for development purposes. It simply checks for the -existence of the `Authorization` header in the HTTP request. It has no regard for -the header's value. If the header does not exist, the `silly` auth responds with a -challenge response, echoing back the realm, service, and scope that access was -denied for. - -The following values are used to configure the response: - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - realm - - yes - - The realm in which the registry server authenticates. -
    - service - - yes - - The service being authenticated. -
    - - - -### token - -Token based authentication allows the authentication system to be decoupled from -the registry. It is a well established authentication paradigm with a high -degree of security. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - realm - - yes - - The realm in which the registry server authenticates. -
    - service - - yes - - The service being authenticated. -
    - issuer - - yes - -The name of the token issuer. The issuer inserts this into -the token so it must match the value configured for the issuer. -
    - rootcertbundle - - yes - -The absolute path to the root certificate bundle. This bundle contains the -public part of the certificates that is used to sign authentication tokens. -
    - -For more information about Token based authentication configuration, see the [specification.] - -## middleware - -The `middleware` option is **optional**. Use this option to inject middleware at -named hook points. All middlewares must implement the same interface as the -object they're wrapping. This means a registry middleware must implement the -`distribution.Namespace` interface, repository middleware must implement -`distribution.Respository`, and storage middleware must implement -`driver.StorageDriver`. - -Currently only one middleware, `cloudfront`, a storage middleware, is supported -in the registry implementation. - -```yaml -middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 -``` - -Each middleware entry has `name` and `options` entries. The `name` must -correspond to the name under which the middleware registers itself. The -`options` field is a map that details custom configuration required to -initialize the middleware. It is treated as a `map[string]interface{}`. As such, -it supports any interesting structures desired, leaving it up to the middleware -initialization function to best determine how to handle the specific -interpretation of the options. - -### cloudfront - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - baseurl - - yes - - SCHEME://HOST[/PATH] at which Cloudfront is served. -
    - privatekey - - yes - - Private Key for Cloudfront provided by AWS. -
    - keypairid - - yes - - Key pair ID provided by AWS. -
    - duration - - no - - Duration for which a signed URL should be valid. -
    - - -## reporting - -```yaml -reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true -``` - -The `reporting` option is **optional** and configures error and metrics -reporting tools. At the moment only two services are supported, [New -Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid -configuration may contain both. - -### bugsnag - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - apikey - - yes - - API Key provided by Bugsnag -
    - releasestage - - no - - Tracks where the registry is deployed, for example, - production,staging, or - development. -
    - endpoint - - no - - Specify the enterprise Bugsnag endpoint. -
    - - -### newrelic - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - licensekey - - yes - - License key provided by New Relic. -
    - name - - no - - New Relic application name. -
    - verbose - - no - - Enable New Relic debugging output on stdout. -
    - -## http - -```yaml -http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 -``` - -The `http` option details the configuration for the HTTP server that hosts the registry. - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - addr - - yes - - The HOST:PORT for which the server should accept connections. -
    - prefix - - no - -If the server does not run at the root path use this value to specify the -prefix. The root path is the section before v2. It -should have both preceding and trailing slashes, for example /path/. -
    - secret - - yes - -A random piece of data. This is used to sign state that may be stored with the -client to protect against tampering. For production environments you should generate a -random piece of data using a cryptographically secure random generator. -
    - - -### tls - -The `tls` struct within `http` is **optional**. Use this to configure TLS -for the server. If you already have a server such as Nginx or Apache running on -the same host as the registry, you may prefer to configure TLS termination there -and proxy connections to the registry server. - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - certificate - - yes - - Absolute path to x509 cert file -
    - key - - yes - - Absolute path to x509 private key file. -
    - clientcas - - no - - An array of absolute paths to a x509 CA file -
    - - -### debug - -The `debug` option is **optional** . Use it to configure a debug server that can -be helpful in diagnosing problems. Contributors to the distribution repository -should find the debug server useful. Docker recommends disabling it in -production environments. - -The `debug` section takes a single, required `addr` parameter. This parameter -specifies the `HOST:PORT` on which the debug server should accept connections. - - -## notifications - -```yaml -notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 -``` - -The notifications option is **optional** and currently may contain a single -option, `endpoints`. - -### endpoints - -Endpoints is a list of named services (URLs) that can accept event notifications. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - name - - yes - -A human readable name for the service. -
    - disabled - - no - -A boolean to enable/disable notifications for a service. -
    - url - - yes - -The URL to which events should be published. -
    - headers - - yes - - Static headers to add to each request. -
    - timeout - - yes - - An HTTP timeout value. This field takes a positive integer and an optional - suffix indicating the unit of time. Possible units are: -
      -
    • ns (nanoseconds)
    • -
    • us (microseconds)
    • -
    • ms (milliseconds)
    • -
    • s (seconds)
    • -
    • m (minutes)
    • -
    • h (hours)
    • -
    - If you omit the suffix, the system interprets the value as nanoseconds. -
    - threshold - - yes - - An integer specifying how long to wait before backing off a failure. -
    - backoff - - yes - - How long the system backs off before retrying. This field takes a positive - integer and an optional suffix indicating the unit of time. Possible units - are: -
      -
    • ns (nanoseconds)
    • -
    • us (microseconds)
    • -
    • ms (milliseconds)
    • -
    • s (seconds)
    • -
    • m (minutes)
    • -
    • h (hours)
    • -
    - If you omit the suffix, the system interprets the value as nanoseconds. -
    - - -## redis - -```yaml -redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` - -Declare parameters for constructing the redis connections. Registry instances -may use the Redis instance for several applications. The current purpose is -caching information about immutable blobs. Most of the options below control -how the registry connects to redis. You can control the pool's behavior -with the [pool](#pool) subsection. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - addr - - yes - - Address (host and port) of redis instance. -
    - password - - no - - A password used to authenticate to the redis instance. -
    - db - - no - - Selects the db for each connection. -
    - dialtimeout - - no - - Timeout for connecting to a redis instance. -
    - readtimeout - - no - - Timeout for reading from redis connections. -
    - writetimeout - - no - - Timeout for writing to redis connections. -
    - - -### pool - -```yaml -pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s -``` - -Configure the behavior of the Redis connection pool. - - - - - - - - - - - - - - - - - - - - - - -
    ParameterRequiredDescription
    - maxidle - - no - - Sets the maximum number of idle connections. -
    - maxactive - - no - - sets the maximum number of connections that should - be opened before blocking a connection request. -
    - idletimeout - - no - - sets the amount time to wait before closing - inactive connections. -
    - -## Example: Development configuration - -The following is a simple example you can use for local development: - -```yaml -version: 0.1 -log: - level: debug -storage: - filesystem: - rootdirectory: /tmp/registry-dev -http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -``` - -The above configures the registry instance to run on port `5000`, binding to -`localhost`, with the `debug` server enabled. Registry data storage is in the -`/tmp/registry-dev` directory. Logging is in `debug` mode, which is the most -verbose. - -A similar simple configuration is available at -[config.yml](https://github.com/docker/distribution/blob/master/cmd/registry/ -config.yml). Both are generally useful for local development. - - -## Example: Middleware configuration - -This example illustrates how to configure storage middleware in a registry. -Middleware allows the registry to serve layers via a content delivery network -(CDN). This is useful for reducing requests to the storage layer. - -Currently, the registry supports [Amazon -Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in -conjunction with the S3 storage driver. - - - - - - - - - - - - - - - - - - -
    ParameterDescription
    nameThe storage middleware name. Currently cloudfront is an accepted value.
    disabledSet to false to easily disable the middleware.
    options: - A set of key/value options to configure the middleware. -
      -
    • baseurl: The Cloudfront base URL.
    • -
    • privatekey: The location of your AWS private key on the filesystem.
    • -
    • keypairid: The ID of your Cloudfront keypair.
    • -
    • duration: The duration in minutes for which the URL is valid. Default is 20.
    • -
    -
    - -The following example illustrates these values: - -``` -middleware: - storage: - - name: cloudfront - disabled: false - options: - baseurl: http://d111111abcdef8.cloudfront.net - privatekey: /path/to/asecret.pem - keypairid: asecret - duration: 60 -``` - - ->**Note**: Cloudfront keys exist separately to other AWS keys. See ->[the documentation on AWS credentials](http://docs.aws.amazon.com/AWSSecurityCredentials/1.0/ ->AboutAWSCredentials.html#KeyPairs) for more information. - - -blob -mark :59 -data 18954 -page_title: Deploying a registry service -page_description: Explains how to deploy a registry service -page_keywords: registry, service, images, repository - -# Deploying a registry service - -This section explains how to deploy a Docker Registry Service either privately -for your own company or publicly for other users. For example, your company may -require a private registry to support your continuous integration (CI) system as -it builds new releases or test servers. Alternatively, your company may have a -large number of products or services with images you wish to serve in a branded -manner. - -Docker's public registry maintains a default `registry` image to assist you in the -deployment process. This registry image is sufficient for running local tests -but is insufficient for production. For production you should configure and -build your own custom registry image from the `docker/distribution` code. - ->**Note**: The examples on this page were written and tested using Ubuntu 14.04. ->If you are running Docker in a different OS, you may need to "translate" ->the commands to meet the requirements of your own environment. - - -## Simple example with the official image - -In this section, you create a container running Docker's official registry -image. You push an image to, and then pull the same image from, this registry. -This a good exercise for understanding the basic interactions a client has with -a local registry. - -1. Install Docker. - -2. Run the `hello-world` image from the Docker public registry. - - $ docker run hello-world - - The `run` command automatically pulls a `hello-world` image from Docker's - official images. - -3. Start a registry service on your localhost. - - $ docker run -p 5000:5000 registry:2.0 - - This starts a registry on your `DOCKER_HOST` running on port `5000`. - -3. List your images. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - registry 2.0 bbf0b6ffe923 3 days ago 545.1 MB - golang 1.4 121a93c90463 5 days ago 514.9 MB - hello-world latest e45a5af57b00 3 months ago 910 B - - Your list should include a `hello-world` image from the earlier run. - -4. Retag the `hello-world` image for your local repoistory. - - $ docker tag hello-world:latest localhost:5000/hello-mine:latest - - The command labels a `hello-world:latest` using a new tag in the - `[REGISTRYHOST/]NAME[:TAG]` format. The `REGISTRYHOST` is this case is - `localhost`. In a Mac OSX environment, you'd substitute `$(boot2docker - ip):5000` for the `localhost`. - -5. List your new image. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - registry 2.0 bbf0b6ffe923 3 days ago 545.1 MB - golang 1.4 121a93c90463 5 days ago 514.9 MB - hello-world latest e45a5af57b00 3 months ago 910 B - localhost:5000/hello-mine latest ef5a5gf57b01 3 months ago 910 B - - You should see your new image in your listing. - -6. Push this new image to your local registry. - - $ docker push localhost:5000/hello-mine:latest - The push refers to a repository [localhost:5000/hello-mine] (len: 1) - e45a5af57b00: Image already exists - 31cbccb51277: Image successfully pushed - 511136ea3c5a: Image already exists - Digest: sha256:a1b13bc01783882434593119198938b9b9ef2bd32a0a246f16ac99b01383ef7a - -7. Use the `curl` command and the Docker Registry Service API v2 to list your - image in the registry: - - $ curl -v -X GET http://localhost:5000/v2/hello-mine/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - > GET /v2/hello-mine/tags/list HTTP/1.1 - > User-Agent: curl/7.35.0 - > Host: localhost:5000 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Sun, 12 Apr 2015 01:29:47 GMT - < Content-Length: 40 - < - {"name":"hello-mine","tags":["latest"]} - * Connection #0 to host localhost left intact - - You can also get this information by entering the - `http://52.10.125.146:5000/v2/hello-mine/tags/list` address in your browser. - -8. Remove all the unused images from your local environment: - - $ docker rmi -f $(docker images -q -a ) - - This command is for illustrative purposes; removing the image forces any `run` - to pull from a registry rather than a local cache. If you run `docker images` - after this you should not see any instance of `hello-world` or `hello-mine` in - your images list. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - registry 2.0 bbf0b6ffe923 3 days ago 545.1 MB - golang 1.4 121a93c90463 5 days ago 514.9 MB - -9. Try running `hello-mine`. - - $ docker run hello-mine - Unable to find image 'hello-mine:latest' locally - Pulling repository hello-mine - FATA[0001] Error: image library/hello-mine:latest not found - - The `run` command fails because your new image doesn't exist in the Docker public - registry. - -10. Now, try running the image but specifying the image's registry: - - $ docker run localhost:5000/hello-mine - - If you run `docker images` after this you'll fine a `hello-mine` instance. - -### Making Docker's official registry image production ready - -Docker's official image is for simple tests or debugging. Its configuration is -unsuitable for most production instances. For example, any client with access to -the server's IP can push and pull images to it. See the next section for -information on making this image production ready. - -## Understand production deployment - -When deploying a registry for a production deployment you should consider these -factors: - - - - - - - - - - - - - - - - - - -
    - backend storage - - Where should you store the images? -
    - access and/or authentication - - Should users have full or controlled access? This can depend on whether - you are serving images to the public or internally to your company only. -
    - debugging - - When problems or issues arise, do you have the means of solving them. Logs - are useful as is reporting to see trends. -
    - caching - - Quickly retrieving images can be crucial if you are relying on images for - tests, builds, or other automated systems. -
    - -You can configure your registry features to adjust for these factors. You do -this by specifying options on the command line or, more typically, by writing a -registry configuration file. The configuration file is in YAML format. - -Docker's official repository image is preconfigured using the following -configuration file: - -```yaml -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development -storage: - cache: - layerinfo: inmemory - filesystem: - rootdirectory: /tmp/registry-dev -http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-8082 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true -``` - -This configuration is very basic and you can see it would present some problems -in a production. For example, the `http` section details the configuration for -the HTTP server that hosts the registry. The server is not using even the most -minimal transport layer security (TLS). Let's configure that in the next section. - -## Configure TLS on a registry server - -In this section, you configure TLS on the server to enable communication through -the `https` protocol. Enabling TLS on the server is the minimum layer of -security recommended for running a registry behind a corporate firewall. One way -to do this is to build your own registry image. - -### Download the source and generate certificates - -1. [Download the registry -source](https://github.com/docker/distribution/releases/tag/v2.0.0). - - Alternatively, use the `git clone` command if you are more comfortable with that. - -2. Unpack the the downloaded package into a local directory. - - The package creates a `distribution` directory. - -3. Change to the root of the new `distribution` directory. - - $ cd distribution - -4. Make a `certs` subdirectory. - - $ mkdir certs - -5. Use SSL to generate some self-signed certificates. - - $ openssl req \ - -newkey rsa:2048 -nodes -keyout certs/domain.key \ - -x509 -days 365 -out certs/domain.crt - - This command prompts you for basic information it needs to create the certificates. - -6. List the contents of the `certs` directory. - - $ ls certs - domain.crt domain.key - - When you build this container, the `certs` directory and its contents - automatically get copied also. - -### Add TLS to the configuration - -The `distribution` repo includes sample registry configurations in the `cmd` -subdirectory. In this section, you edit one of these configurations to add TLS -support. - -1. Edit the `./cmd/registry/config.yml` file. - - $ vi ./cmd/registry/config.yml - -2. Locate the `http` block. - - http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - -3. Add a `tls` block for the server's self-signed certificates: - - http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - tls: - certificate: /go/src/github.com/docker/distribution/certs/domain.crt - key: /go/src/github.com/docker/distribution/certs/domain.key - - You provide the paths to the certificates in the container. If you want - two-way authentication across the layer, you can add an optional `clientcas` - section. - -4. Save and close the file. - - -### Build and run your registry image - -1. Build your registry image. - - $ docker build -t secure_registry . - -2. Run your new image. - - $ docker run -p 5000:5000 registry_local:latest - time="2015-04-12T03:06:18.616502588Z" level=info msg="endpoint local-8082 disabled, skipping" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.617012948Z" level=info msg="endpoint local-8083 disabled, skipping" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.617190113Z" level=info msg="using inmemory layerinfo cache" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.617349067Z" level=info msg="listening on :5000, tls" environment=development instance.id=bf33c9dc-2564-406b-97c3-6ee69dc20ec6 service=registry - time="2015-04-12T03:06:18.628589577Z" level=info msg="debug server listening localhost:5001" - 2015/04/12 03:06:28 http: TLS handshake error from 172.17.42.1:44261: remote error: unknown certificate authority - - Watch the messages at startup. You should see that `tls` is running. - -3. Use `curl` to verify that you can connect over `https`. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -## Configure Nginx with a v1 and v2 registry - -This sections describes how to user `docker-compose` to run a combined version -1 and version 2.0 registry behind an `nginx` proxy. The combined registry is -accessed at `localhost:5000`. If a `docker` client has a version less than 1.6, -Nginx will route its requests to the 1.0 registry. Requests from newer clients -will route to the 2.0 registry. - -This procedure uses the same `distribution` directory you created in the last -procedure. The directory includes an example `compose` configuration. - -### Install Docker Compose - -1. Open a new terminal on the host with your `distribution` directory. - -2. Get the `docker-compose` binary. - - $ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose - - This command installs the binary in the `/usr/local/bin` directory. - -3. Add executable permissions to the binary. - - $ sudo chmod +x /usr/local/bin/docker-compose - - -### Do some housekeeping - -1. Remove any previous images. - - $ docker rmi -f $(docker images -q -a ) - - This step is a house keeping step. It prevents you from mistakenly picking up - an old image as you work through this example. - -2. Edit the `distribution/cmd/registry/config.yml` file and remove the `tls` block. - - If you worked through the previous example, you'll have a `tls` block. - -4. Save any changes and close the file. - -### Configure SSL - -1. Change to the `distribution/contrib/compose/nginx` directory. - - This directory contains configuration files for Nginx and both registries. - -2. Use SSL to generate some self-signed certificates. - - $ openssl req \ - -newkey rsa:2048 -nodes -keyout domain.key \ - -x509 -days 365 -out domain.crt - - This command prompts you for basic information it needs to create certificates. - -3. Edit the `Dockerfile`and add the following lines. - - COPY domain.crt /etc/nginx/domain.crt - COPY domain.key /etc/nginx/domain.key - - When you are done, the file looks like the following. - - FROM nginx:1.7 - - COPY nginx.conf /etc/nginx/nginx.conf - COPY registry.conf /etc/nginx/conf.d/registry.conf - COPY docker-registry.conf /etc/nginx/docker-registry.conf - COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf - COPY domain.crt /etc/nginx/domain.crt - COPY domain.key /etc/nginx/domain.key - -4. Save and close the `Dockerfile` file. - -5. Edit the `registry.conf` file and add the following configuration. - - ssl on; - ssl_certificate /etc/nginx/domain.crt; - ssl_certificate_key /etc/nginx/domain.key; - - This is an `nginx` configuration file. - -6. Save and close the `registry.conf` file. - -### Build and run - -1. Go up to the `distribution/contrib/compose` directory - - This directory includes a single `docker-compose.yml` configuration. - - nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 - registryv1: - image: registry - ports: - - "5000" - registryv2: - build: "../../" - ports: - - "5000" - - This configuration builds a new `nginx` image as specified by the - `nginx/Dockerfile` file. The 1.0 registry comes from Docker's official public - image. Finally, the registry 2.0 image is built from the - `distribution/Dockerfile` you've used previously. - -2. Get a registry 1.0 image. - - $ docker pull registry:0.9.1 - - The Compose configuration looks for this image locally. If you don't do this - step, later steps can fail. - -3. Build `nginx`, the registry 2.0 image, and - - $ docker-compose build - registryv1 uses an image, skipping - Building registryv2... - Step 0 : FROM golang:1.4 - - ... - - Removing intermediate container 9f5f5068c3f3 - Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf - ---> 74acc70fa106 - Removing intermediate container edb84c2b40cb - Successfully built 74acc70fa106 - - The commmand outputs its progress until it completes. - -4. Start your configuration with compose. - - $ docker-compose up - Recreating compose_registryv1_1... - Recreating compose_registryv2_1... - Recreating compose_nginx_1... - Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 - ... - - -5. In another terminal, display the running configuration. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 - 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 - aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 - -### Explore a bit - -1. Check for TLS on your `nginx` server. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -2. Tag the `v1` registry image. - - $ docker tag registry:latest localhost:5000/registry_one:latest - -2. Push it to the localhost. - - $ docker push localhost:5000/registry_one:latest - - If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. - -4. Use `curl` to list the image in the registry. - - $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 32777 (#0) - > GET /v2/registry1/tags/list HTTP/1.1 - > User-Agent: curl/7.36.0 - > Host: localhost:32777 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Tue, 14 Apr 2015 22:34:13 GMT - < Content-Length: 39 - < - {"name":"registry1","tags":["latest"]} - * Connection #0 to host localhost left intact - - This example refers to the specific port assigned to the 2.0 registry. You saw - this port earlier, when you used `docker ps` to show your running containers. - - -blob -mark :60 -data 741 -# Project - -## Contents -- [Docker Registry Service 2.0](overview.md) -- [Architecture](architecture.md) -- [Build the development environment](building.md) -- [Configure a registry](configuration.md) -- [Deploying a registry service](deploying.md) -- [Microsoft Azure storage driver](storage-drivers/azure.md) -- [Filesystem storage driver](storage-drivers/filesystem.md) -- [In-memory storage driver](storage-drivers/inmemory.md) -- [S3 storage driver](storage-drivers/s3.md) -- [Notifications](notifications.md) -- [Docker Registry HTTP API V2](spec/api.md) -- [Docker Registry v2 authentication via central service](spec/auth/token.md) -- [Docker Distribution JSON Canonicalization](spec/json.md) -- [Docker-Registry Storage Driver](storagedrivers.md) - -blob -mark :61 -data 1239 -# Glossary - -This page contains distribution related terms. For a complete Docker glossary, -see the [glossary in the full documentation set](http://docs.docker.com/reference/glossary/). - -
    -
    Blob
    -
    - The primary unit of registry storage. A string of bytes identified by - content-address, known as a _digest_. -
    - -
    Image
    -
    An image is a collection of content from which a docker container can be created.
    - -
    Layer
    -
    - A tar file representing the partial content of a filesystem. Several - layers can be "stacked" to make up the root filesystem. -
    - -
    Manifest
    -
    Describes a collection layers that make up an image.
    - -
    Registry
    -
    A registry is a service which serves repositories.
    - -
    Repository
    -
    - A repository is a collection of docker images, made up of manifests, tags - and layers. The base unit of these components are blobs. -
    - -
    Tag
    -
    Tag provides a common name to an image.
    - -
    Namespace
    -
    A namespace is a collection of repositories with a common name prefix. The - namespace with an empty common prefix is considered the Global Namespace.
    - -
    Scope
    -
    A common repository name prefix.
    -
    - -blob -mark :62 -data 47143 -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Broadcaster

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Listener

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    handler

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    repository

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    request

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

     

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Registry instance

    ","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Remote

    Endpoint_N

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Remote

    Endpoint_1

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    . . .

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    http

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    retry

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    queue

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Endpoint_1

    ","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    http

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    retry

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    queue

    ","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Endpoint_N

    ","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} -blob -mark :63 -data 37836 -�PNG - - IHDR�H_Z��sRGB���@IDATx� �U�����Ai�MnѠAѣL��y!=%zB���!�#�3� "D���QҠ��gu���;g�����{ϰ�9{��]u�Y{�����߽��v��e��H�H�H �Jf<� � � ���(�!� � ��GP�=R� � � P��H�H�H�#(��f�H�H�(�l$@$@$�e�T�A$@$@e�  ���G*��   �2� � � x�E�#�l� � �E�m�H�H�x�H�H���r�t��E֭[��ۺu��t�%��\ X�L�4IN<�DuC��駟�|P�~�����sϩa����8�ꪫT��@ǎ�0�[��0��{L��ٳ'|� q[=o=o��V�W�]�~}-�ɿ5b�0aB���k:�Ъ_��3��7.��(�c�9&��_^}�U���Z��~�I�QF� 8P�_|�E��رc���ꫯ4^pF��i�ͺ)�cѢE�H�H��!�W� ���#�T!��?��?�X�h��� ���V/R��j��B��PA�!�-9D�׸�-[����7�#N2 �Š� s3`���&�(���G���\��bh��5]#�k���V��2��IyDΘ�#G��gDy����<[����Çk�a7�������1cF7>p3��֭��jN�H�H��YƋ��޽{˵�^+6l�EM , ��������"X�c����Y�����7���` ��X� �yjK�u~XO��`���'���5kʩ��*W^y���׬Y�>-Q�o�Ac(n0��Z���`H��Xȅa�� ��۷o/%Kgʗ/b��ڭ �nt� ��֭���`dV����ˇ~(_��̝;W>��S���%�\�q� �|����z��q���0�ڵk�)P�`�c��t?~�.��"����5B�! H��gD9�(A�`�bs�����K�5�ҫW/�۵nR��V�hذ��+S��~�?F�ͱ�FZVoS�w!@�]v� -�ճ4^����1���}��5 ]d㈅l� n�޸.貆�_�~���2��n�w�������Ήc�:�� D�0�h�5��Y8묳�-� -��� �R�Ja����y  �� xJ�M/ũV��XC�*�X؄]k8[z�� -����.޲抵�k �job AAO Ƣ ��J�o�Q?f�D i�&�p�M�5īb��g赚�/ܓ1�'O�����:j���6mT�!�0e˖�o�5�g�l�EY=G�A���G��2@�� �1:�� c^y�ټy�9�7 � �� <��:�|�-�~��:̌��fFo+��R=��>[��T�^���v/^�C�ֆxCl1��^��@̱��Z4%�W��/�!��`�+���mFǁ�b2��W�����<��ec�e�7�o Ac��I�&��d3���x���_��̙3u���@����л�Lolp#�am�ܠ���Оy����F'p�a�����n$@$@��LO�3� ����G��bh�Z5,�~xW. �Z �th;uY �t���^zI�|1��G��<(z�0���?s��j�pr2��A|�v�*� Ҟ9��-H��"�s�9'ܻGf�`n����m���N��~X{2��i�w�y:���| x���0�����5�\��eE�~��Î^0����q��C�1�Ǭp��ys|`��嚞� � ��+JXمt%�#��+�n/!l b=��„c��w}��P3�E���!X�s�X�A-�@�����~� ��6nR�Ӈ�in2�v��&��1����-V>I�~I�H��#���r|�-�kg+�k����cB�]��,��Ɯta��]���āU��9�1B�G�X�e�%� �5j������H�����-���v  gxzN9�"b�wԨQ:'�2�{ы�<����t0��a����i1.  ��!����ܩ"��H�H WdUO9W*��$ �N��W��H�H��(�>�4f�H�H ; ������O�Nb, d�6��!�~�)g��$@$@>!�ZOٔ�w���I qqJ�C��� �����c�I�H���E9����! �3���k�y' �*嬪N�H�H��(�~�=�H�H �P���:Y  ?�(����w  ��"@QΪ�daH�H��L�����c�I�H���E9����! �3���k�y' �*嬪N�H�H��(�~�=�H�H �P���:Y  ?p�Ս�����?�_~�����֭[�⠃�ʕ+'�m�ժUs;���p�)W���/_�+�a>H�H c�se!� O�>=a16~�ƍ���^ˏ'�8� �`ԬYS6l� ���hH�r���Dy�ҥ*ȸ�6n�X/��� D9��u"�'��k� >���s�NٵkWR�AY���J�����P7n��hFI�6r�$z&H+ω�u�@�-���� aZ)��׆��ɮ��!����WaƔz�����ٳ�6���oӦ {��U �I��s�lzL�]�u�C�׬Y3Y���e#ȸ��[��kb �_�z��?�l׮]4�C`�ҥ�꘢�=�ʒ� �G�� �Л�����a�/����Ӭ�:�J�� H_��y����7 �q�������H�H �|)ʙE��I�H�H�ew�2V  H�E9ad @$@$@��(�Õ�� � �@�(� #c  p�E������ ��#3G$@�H���c��s�N���e�ԩ9Vr�H��O����:r4�x��D�R�LG�ed$@$@���6��۽���+C��f͚Ʉ �U�V����]ƌ#��7o�\���*�[�`���G�Xw�q���?K�=i<���r�wJ�*Uq2D�u�&�[����{N�̙�a�?�x�ٳ��,YRV�X�a7mڤn��~��w�y2p�@}��]s�5r�QG��1� � $D�����=�޽[֬Y#+W�Էu��A�x� ��o�s��r衇� /� w�u�<��#*�d�G)�?�����͛51|�ٳG�{ݺu�c�ٿ� -��!�{��m۶�;C������g���+ -�g���z��əg���i׮�4hР�� � �@�P�F]�T)�b|�W�p�}�Y�q׮]��|�Wʈ#d�����W_ zνz��p7�t�<�����p3��v��rÆ ҧOi߾=N����e�ĉ!�����0�!7i�D_�X�B5j���kԨ����H�H�(�.�V6�h�"3�qʔ)� ��,[�n�W��t��tQ{z�6~F�)�w�ݽ{wy��W���S�F���c.�۷o� �o  ��(�\H|�t�"�{��q$ 5O�6M0���{��ܰ�z��/c0 ?�Q&�an��V��b8��x�!�1t=|������|�M������/�p�^���  ����k���R�JR�zu7n��^�Z=[$n���ˌ3t(b !5�t��*�cǎ�-[��SO=��b~b=z�h"�P�O<�� ^,{饗���m۶��ֈ���1� !�! ���]� �~6�ɷ�r� ��[g���{�1c�.�+, ���޽�`N��H�R%�kQƪ[.I�YLjF0*�iS�ti��i�>���*U*��a�$@9@����ޱY�Gh��l$�1+��z5u�aa/�+��K�(�|Y ��(c{Ml��m����!vi�u�QN$��K$@������`ي1UP �yyy������;L��ݻW�}) � ���/E��#c��&�`���q&K��>�h_g��6 ��Z�U8�?��M����ߏ���N:I�l�"?����}��|$*�H��%@QN��e=��s�����c��Y�f�G����k�ܹsL?e���w�}'�>����"��4��"/ a��;�N�z������]�?�x����M��� ��E����f�l�2�3g�>C�h�"���eÆ >-�w��n�:��Y�p�q��v����:c�H��8�����ѣ���{2c�f�Ӝ�fϞ=��$D�(���2y�d�8�dȐ!z�ӳg�\D�2� �L���`��4t�P�c1&L�V�Z��W_�󎯼�� [D���G_���A/����N�2ES<�����B��c��'��d�c��+��B�3!����n�Q�lY����&pG����ڵ��0s��������V���q~��u��?��C�՜��/x���.�|a�������/���:* :���o�rr�U�� �bD�"#��;N�`�W�f��ݎ�4ě6�� ������/\d׬Y#�ƍ�C=T:t� �f͒#F��\ %W�TI�o�ڹs� - �����SN9����/e̘1*���s������s��Q�XC䱸�_��� -� /��~W�Z%?����s"}�;��L�2RX|(*n~������sϕC9D&M�$۷oWH��.���� F�� -O�'2�]t� 8P�9��1B��k�ĉq'�MF�MC܁�H ' ���D����A ѣ��K�5�\��7�p��y睂�q�-4�[o�U5j$-[��UB�;�0��CH!�� ��`Cx7�����kUt�_ Y��m)�{���x≲r��B�ۼy������{�'��~��M0}��WR�^=����b�G��b,�A�۵k'�nÊ���~;&���#�^�iР�֛�i��)S�[Q�эH��(�I��҈��C��]w�9�o#�V�[����b���竸Bh����G}�ˆ�v��jkā�k�ۍ�'����+4> ��n ̍ ��0�<�Y��;��(C�!�?|�"̨_0�(`�����[o�%�8C�͛7/�[7 �&L$@�#@Qv��p�η�go��v��#Z q�Wl�zc0� S�~}��a�9� ��[�n��J�� ���Ot�� ����b�� ��~���W�8�2�]~3z�F��Œ�p�ԩ��D��f߾}r�� z��s�0{���Mn���[�i�_�F�>���j���U�:�S�67��7n�aKg��رCG�p�n~:VHF�V�Sv7.��b���(�sԨQ��I@�G��C˘�Ņ�}��:O w,�°5����OuX=>�-chb�����c� z� �X�v1�0<��g̘�=q�K�����1ntPɾs�`��~��u��7���ND� K\��ߦ7�ow `�r�Ȇ�&�&�&�0�.��|ԫ���a�k�ԩN$W�n X������c��$��`�|N9�6��VF�����'�;V�t�"Gyd���ŋu��`����!��_.�A�����"1��X|�� s���1�ꍷm�V�$M�q���D` -�n�"�丹�E��,pÃ9z#̸x&jp���c�����x��;t�\�����i�Y�fJyK&�\ ӻwo�G�nP�N��� b�6��(B �0�@�07�cm ��p�����E� -�L�q[=8W�5$�'c�&|��|e�/��5/����ٳ6�X��u�zlG�Î!���X?�%�N�oa���,�;_ؓ����,� X�n���X��_k���;L�m̚?�u�]���L�y着��>`MS�^s��7�H�5�y@��#M�w����^��_ X�y$���s�G�N�b��S��>��@0`=��I�v���9` g��+��냵�$p�hx�G�A�C��j��������F�� R7k�:`=��q[�Z�י��u�x�g֚uC��uC������� ~p-@~��5��E:��5ݥ��.]���c=ׯ��穧� -X����&���X}��n<^�;���_?��36�Gaj��5Xw��'+�@��u�J@�!U�EdX[�Q~c�=�) �U�0z�x�hy�zܮW�^:����C{���z���t�?F�pl �<��/ڦ%��2z�;�������M��a� �a��k���o����b�'��E��ͨ��K��'�D@���=eL�� �s��(�£�!x��w�%�>��Ӻ ���� �<[�(�i�Y�01׋�� ."�t�cǎ:_��%�'"���p�#�بÓ�@�b�yĔn�h�KC��&�Q9LC�:� � �G��z�.�� ��� -A�0�@�̾V�Vو�1-u�-���7�X���=3}�'C�/�q�����i\�7̃�z�i,�<��7 �P�����ө4��e��J������;�l��Gi�#@Q��SJ�pWڿ���p+0��v+�����/���a��!�L��y|�J��=|���g�a�%�md����ˌ  .�4���.���Y��!6�3c��qӜL/�X����)�ub;V�ڢ��3 �0�I�� -��YC�:"� ��� �X�m ?Y��5T�����/Fs��1h��|�����ܹsuQ)� �������7 -��\ŊuT~�T z�����͊�7�|S�?��x#,M�(��O��p�y�̈]*Y��ԃ>�=�d���ЋH�gK�S�#�O�(16�H�zӃ�0��6��jo�d!�X� ���D/���]�̈́���{�UW���Aw���/����C�ּ���� -��/�F/C�0چ7/g�( �u�2p� ���:����~��<�E96����Ǽ1.��bkz��K����ˉ9J���<��(J���;��Y�m�37�|�9����$��MX���`�?z��F�{������8�@oƤ;z���B�5m���u{��` ��k׆EO��q.���1;B��p���'��;�0�|�L�6M����rr��C�dN��)�u�I'�M��7*�=bM -S5N���,��G/[�ba�؅�bAT�g�1, ����Shg�_�.��bQ5���n�,;�!�h�x��x�7v��H��Jc0���g����p��b/#�K�,�0`�n�ƍ�G3�/F��x ���2a���hz�7�t��gC�ʆEbf���i�'@Q��}z���+�������S�ܹs�Ը��'��+�lBA#̈�c|�4Ç|��[� Vu��,̂1=Lر�<{<��C8%T����+���{��GQa�f8�����*�xS+��AO�$����m`�w�Y�ҽ{w9묳�/VNc6n�;�S �5X����qS�Q���� � .�3 �ć ��y���̩��)�@ �+�1f�/���[��%���fL�&����? $Z�X� -z%�=VfH`a ->�3�w�-�o�\��i��A/��i�+[<��Ǵq�$�p�����N,t�ћDOBl�SF�x� 7���L���ƾw�q�7����^l�pr�>o�bs�p7��� +��y5e��� ��w��$��:�S=e�*��n��;��S���<�6��3�zŐ!��?s��hsH��3��0���t�/��Џw@ �w�b��L&Ʀ���]\��v�� 2�)� &�����D9�lcn=j<��Lo'�t�/�̂3������؇�~aD�&��P�L�s�4$@$P_�2�(!���wQC���;���"�D{U���Ъ�5z�\��ui^�P�0t<�����q�\ �ǰ��Ӑ ���|)��Eae ��E�\ȝ����0<����5��ѣ3=�L�=�tQf��z���E0�`�*V��`C���RM�� O$��|)ʨ2����|l�-��!N �b!VY�7V�0A�)F ���� ?�@��+s��.x����φb�# p��oE p�e��5��� �F�[�j]#��Ot���Ɣm�a� l��`�7���K�+;8q;�j��!��(��,0vX ��"`�td�;9���ǘ��fg�y�>S�ժ����Ol�0e�gdl$@$`�({���k���� ��톂l�����(�1x�b�y{��/��o H�E9U�.��0c� �A�a(Ȋ��?d,��k�����o����ѯ�s=CL�H '�zN9�k�l�!l|0����S�x� V]cS~�vv쐄�b�! � P��&�p|f�0Ǫl -��p���e,���x샍�p��H�� @Qv���q�E�0e��vn�}���:|m�� s�xT -�c%6 � 8I���$M����Q͚5ӗ-�'� �-��^�r��4/u�#�� �@�(�q�ʌG��?�a�9���$�R��P�� ���{���^�W����H� \}�E���p�yN�f�K�1�8��� ��^�lY��H�#��rb�������i{�f�9m�p@BX�u��wp�'H�H� �);A��8�^l��9L�̰�� ����0lFG$@"@Q�Pe +�9s��7z�f�5z�f�U�C$@�(; 4����(��:�e�T�2< � x�E�C��-41�lv��w��0G�1 � d���>*�z��R��e�)J�M��B/�/�p�B}Q�q��7F��͆ � ������ ��S|+�d3���Lƃ�(��߯C��lbf�,�@���� -�cY�����& �D�V��Cƅ��)aQ�� n2��S"�0c�G�񳐡�x��ڸqc?W%�N$@I��2zU0� �(DC���� ����0Bl�5Q�O$@�@��=e��`�YfC�ek� ���l-'��mxgv�f�ԩ�}���F�p$ �e� � 8M Q:t�fgذaNg����rU6�J$���>����ٳg�G'�/���ڲe��Z�J3R�~}�R�J�3��P�}^��> �@��R'�J�d��0k�,���/5�^�z�׆��jv��ۅ^�]-, � x���M�Œ�4��(�ΐ1� � ��#(ʎ`,�d„ 2|�p����+V�<���������'�8�c$ H+�);��~��'׷���!�x�h�̙�E4$@$@�&��������ef�%�92:��t�y�9���#  ?`Oم�ڽ{������3fh�^x��u�Yj���������رC���N�nݺɞ={�76j�Hߧ�EիW��n�M�V����&N�u���b�@ cƌ��?�X{�͛7����J�#����ɓ�N�:��w������F����9  � `O�a���۷O�-[&�\s�@G�-�����N|�����C�;�C:w�,�ƍ��g�}�f����o�]�vr����!�/��b����{�t��I�Ν3�o��2�\{�����?�]w�%��ڵ���L�"g�q� -u�Hx�H�H #<�S�p��y/gh�R�dРA�-�Y0`�,_�\��N8A���2ʍ00��z�t��]��E� ��7���SNQ7 W���j70�����A*U���Ҙ�^�z� -�ȑ#��?,>�! �6����֢{l汥�;����eȐ!�*|a��Q��� ѦL�2*��r�n2p�@���&^�ap �3Ɯ�9w� 7荀RW�! ����U�V��9\�y��'��~�r����.���� ��v��E�vl��������aq���Z��x| -�VmذA�r�!*�88������C$@$�9�4W z��6 ��]؆�=��#�7�?�E��H�H��8����a�H�H�r�E9�*�E% �6���뇹# �!��l�H�H��(�ޮ�H�H �P�s��YT  o�(� :������G����z-�?�I�H  �V��U������9gcc+�L�:��kQ~�F$@�J����4l�Pw������U͚5�����\�?�'�+ � �*ߊ2zT-Z��ŋ��\�s�"�^n [7mڔ=e�W$�O$�ߊ2J]�n]�U���5���AFc�Q�dq>9����I�<@�ע ~��c���hM� � �@J|��+�R30 � � x�Eك��,� � �&�rn�;KM$@$�AeV -�D$@$��(ʹY�,5 � �� P�=X)� � �@n�(�f���$@$@$@Q�`�0K$@$@�I�S�\�\9��Ơܬ�:|KX"��H��<%�� As��ᶙ^o9��X�1m��l��<�)��&Ӥ]\�N$��<��f�f�d۶m*�S�NM�V����.~�˗3�T�6`�%��F�o��?����ˋv�yl�Z�z�4n�����G>aL�13Ɠ$@$`�(C0ڵk'x�u�d׮] W��^Lazr ��&�� ��$c^~�z�8q���ĭ�&�_�c3�k7C�� ���(��f���d D9�C���C���\o��`�-7.:��$�w0�i�FfϞ�oC��m�|"M (���D�� ����,������ܜ����M�a9���I���N��\�{z ��k7q�%��H��F�S�����!  �t�(��6�"  �"P���C'  H'�r:i3-  (�E�8t"  �t�(��6�"  �"P���C'  H'�r:i3-  (�E�8t"  �t�(��6�"  �"P���C'  H'�r:i3-  (�E�8t"  �t�(��6�"  �"P���C'  H'�r:i3-  (�E�8t"  �t�(��6�"  �"T��#N��~�#�0  �v�)g{ �|$@$@�!�ZOyܸq����� � �����Z`H�H���V�*�lڴI?>,��LQ�\�0C$@~#�k�.�r�r������[�N�gժU�I)2Ve6 H�����5�C=4Ř���ڵk nJV�X�޲�GQv"� �m�f�Rm۶�)h޼�ԯ__~��1r -�Å�(; �ё ��o��F �k��B��/����˼y��>3j�{���S����XH�r�D��>�;u�s���k׮����_�}�Y�?��3�X8Q�R-�DD��H�r��R��n�#���v����?�Hɒ%u�W�2ed���2m�4�_޺u��ݻW �t����x%�)�}� � � �����/˃>����/Jǎ�^r���o����o�)ӧO�J�*�|3VgרQC���f1����(�����&]�M$�5�+��]�V.\(�G�Atn��v���k����d۶m��k�ĉ�k^�h�lذAWe��oƈ2���5'�|�~�%�e���|Fb��3z��d����Q�0�<��С����K.���S�c���3X�����}�v7�r5N�7xe���]|�ź~b���(;M�� � �?^{�xTƈ2��h�k�E�|�����C�~��D�le�.�ځI�&����_�7c�v�P���ɸH��.f�Ŝ�)ST��<+ �t�hʖ-d�#�c�9�g,XCo���^���zK����N�ҥ�.ds*�^H�T ����P���.���.�����IXAުU+];����_]>��c�9s�F���d�1. ��5V$C���w�޾��:I ��e��/�a'3'E���D�1 �@�Ѓ:t��cԀ�`%�1$ `�]�v����G}۩�)�N�d<$@�8 z��9@��8����w���}cDO8e(�N�d<$@�yz�g�u�^������Y�Q��)CQv�$�!�%�� �1d;���A/�=�l�m��|J�_`��. d3����J9�� ����հl$�ffc̷ѐ $N���83�   WP�]��HI�H�H q�ę1 � � �B��� -VFJ$@$@��('Ό!H�H�H�eW�2R  H�E9qf A$@$@��(����� � �@�(ʉ3c  p�E����H�H�'@QN�C� � ��+(ʮ`e�$@$@$�8�r���H�H�\!@Qv+#%  �� P�g�$@$@$� -��+X) � � $N���83�   WP�]��HI�H�H q�ę1 � � �B��� -VFJ$@$@��('Ό!H�H�H�eW�2R  H�E9qf A$@$@��(����� � �@�(ʉ3c  p�E����H�H�'@QN�C� � ��+(ʮ`e�$@$@$�8�r���H�H�\!@Qv+#%  �� P�g�$@$@$� -��+X) � � $N���83�   WP�]��HI�H�H q�ę1 � � �B��� -VFJ$@$@��('Ό!H�H�H�eW�2R  H�E9qf A$@$@��(����� � �@�(ʉ3c  p�E����H�H�'@QN�C� � ��+(ʮ`e�$@$@$�8�r���H�H�\!@Qv+#M7�w�yGJ�(q���Γ���ǝ�]�vI�f�d�ƍq��{D� ���~��?���H�� ⊷�H����/Z�H�}���@ �����a��=�ܓ�W\!*��I����+7�|�������\�ti)Y2�{�R�J��&c9蠃�7ސ�+|�ر�~�z9��3���~�A�9�b��Cn��>o�<�ė_~Yn��Viذ��\�R�.]�œ 5��*h���%��p�F�$??_?�����C�2=������OK�6m�N|�С�o�>u[�d��r�)z����i�&=��_�����[���AO���{���R������z7�����޽[�/_.�_~�<��#���N��s� -�C����� ��? .�c�9FF�!eʔ)����lm�B܀�lݺU0"�� ~��`�S΅Z΁2B�&L� M�6�={�h�!��{�ԭ[W�!�_}��g�~���lѢ����ңG9r��u�]aZ��Ç�+��"��ݻ z���n�M>��c����U��S�N��g���'��i m|^}�U�\��`X��G�;�S�z�-���������˂a�m۶� 0 ���������֏�,�AC�ѳ6l����~�%��~X���t�MR�B��͛���ر����S{�mx�~�T�?��Ciٲ�~����I'� .&��jժiϸ[�nR�lY��^�z����E��>��C�v���K/�$�}�]D�(��G���G�8Ƀ�dk[�:��~�I���ՓK/�Tps��0�y��6��ZӦM�Ȃe���ĉ ��+�4� }S��E�^%��b��-��3�;v쐶m���9d��˕+�n�˗��U��=�.]�� ��`sz�;Ƃ.�_� @IDATc*U�7�Pq���Y��D�����m��s� /ʉ�G\X�C�4��ֶ��u���4n�a.��"�������9=��#��w��(g����?@1�k�E�+c�:2���Y�L���CmXAm�3�"�ڵk�� =�l0�q)�؇���G7g�����cL�`d�O�>:�*^�Ov��s��3�ƒaXa��U�aϘ1#�������M�&zģF��o C�9i��cK�o��͂9]� 1dդIy��'u +C�v��W �W��9s��� s�y�E [���)S�s���"Ic0|�+��׹R�Y^N,�¢,|�s�Xd�X�9����~*� �ի�Ec�ׯ/�;w�a5|4H���� �)|����Pb!���Ƹ�s���#�йj�6����e�0�&;�ll�X3QƓf��}��r�G�BB� �S��E��Ε��$�����و&���&,>��tH �� V\b���IG�ul�� _s�5Z�g�}��bJ�������`[Oe��8^��=����|N�0��G�� ?�d�1w��'j�&��}��:��x9P�."���� 6K��)z��������� ��Zf��?� �@[�BT�f2ޖ���A.lE��y�ۻG*"�Hg{gO9�mI��P��+�ʃ�d�s�ŕ��B\!��s6�� ۻ��?�^n�e�)O����촄-F��ld�2�{�u�4��{_�V�biI e؆�o߾�O�f�o��FA�.#���@��;E9��KH�(�BEAv9#� �t�w�r+�I��� � v���)�~�U�0�j�S.�x�H�X�P�wSaF ���ʺ�B�C�HG{�(�l�b�I�Ya�1 �Y���{�j�e��5sD�$�ǣ�xl�o�!�:3�1n�w�rƪ� �@v�w��=� -�Ґ@!�j�\�Up�&  �t�(��8�#  �BP� ��$@$@$�n�tgz$@$@$P�r!`x�H�H��M���n�L�H�H� -!@Q. O� � �@� �9�tgz$�%~���,) �A�HW{�(_�A$�@�f�b��)�N�j������R� � ��{�>��l���)7n��[�fK�|U�ʕ+K�j�䠃xHGű���r�i�����Xx��E�W��ŋ .T4�#Anڴ�ԭ[7s�ȁ��޽Q�~h�eo����z� ,�2���M� �L]�/_���Rƈ���%�qD��NQ��2��Y���Fظqc��&s�.]*��Nxs�N=����5�X��޹�+��e�����aÆ)��������HL��#c�&`ز�G�I���;E9��"�S4��\`����akS'��Q����e{�|����S�3�N�  Pe6  ���G*��   �2� � � x�E�#�l� � �E�m�H�H���#)Q��w�q���?K�= <���r�wJ�*Ud�޽2d��֭��n�Z��{�9�3g��=���gϞj��g�v��r�h�&L� �'O�[o�UJ�,)3gΔW^yEv�ء�Z���G_���3�\#���gO���\:L�믿VA��^s�52m�4Y�v�l޼Yv�ܩ�{���Tw��-�֭S!ݿ� 8P�͛'�{�V��4i�<��3�wŊ��dwժU�7�5k��1B�4i"7�x�T�TI}�Qu7��Mn`{w��;qR����X=L��W_I��ͥW�^r�QG�M7�t@�у�)U�T� ��ܰa� -2z�g�q��|��2�|ٲe���؄C�ҥKkv��9�� �0�p� ��)S�LC�`{w�k�r��5��ثv�ڥ�,�����CUA-,�Fh���#G�|����Y�s��������"��;�!����E֝x)��pe�&P�lY)S��� �lB͋�9#�8FoC�0�#nԨ�`8C�x=z�0�p��6����/��~{X�'N�(-[�4^�M�`{w��r��U��܋�� -�8c� v޶m� ><�U 9C�ǎ�C�O=���۷O��ի��;z�h�{�P�O�Pf�Hl�E�q͑��ZF��p���(,T�8�P��d���W(��Ϝ)�`3��7���̃�a��˗ˎ;��j׮����u� .z�������:VJ~i��X��s�@=�l��� �Uڿ�����Ls���cu0�{���c�H�~h��Dj�~!�ޱY(��>4�'�z0u��QX�ԁa�����aO�~i��xj�~%`H5n�X��쪆ꄢ�N=����5�X��޹�+��e�����[;��ё�yyy��G"e$ [��,9�C{�(g�i�v�f�;4�%�akS'��Mv�nز�g�~���)ʙo'� � � (�2 � � x�E�#�l� � �E�m�H�H���se��CI�=����=묳�モ[n�E��cƌ�W<�z�*�98F�w��%�:uҽ��͛'-[���l�29��å~���o���O>�D.��"���k�i��i/��{�l2�žr&�3mOضm��]�V���c�Q!8p�@,z��-ݺu�I�&�3�<��ѠA)U���r�)�}����͛7�-[��o���W�T��l���[� -��k�5k֨�3�4�0mSl��st*$E�)��'+��u�a�iY�^�#G�|���={�H�JT{X�=��ӴW�n�:�.W�ߊe ���m���U"eo�s�A�w��\a�Q�F:�Ex�!�Rl�1? -!~��d�…�yf wӐ� ��{�V8��z`.}��k׮Xa6���51P�lY��/z��xo2�Pf�Hl�E�q͑��ZF��p���(,T�8�P��d���W(��Ϝ)�`3��7���̃�a��˗ˎ;��ɵk���p��� ��WKl��c+%��w�r���9� ����f6�rA��*�_��uvN&��C���:��=l��M$f?�w�r"5J��@��,�BO ��@=����(�w��0F�l��0�'V��w�r<�I?�0 �7n,��d�vU�uBQv�����L�~h�\�L�2LJp� -íS��H༼<��ԉ#�2��-�{�����S�3�4r;Q3�������5�����&;S7l��3_�~h��̷�H�H��E� �H�H���������?��t۶m�ꫯ��#G�ڵk�S�B�3g�e?��s�>}�t=� ֭['U�V�믿���y�^�����cs��Y��3A�i�����.�3�(!�H�� X������ʹ|M�=��|���g�}&/�������2�Ͱ5.fO>��������ؾ.<3�s��3S���pg�&��\#F���z�}����� +��E�;��^1�}��Ҿ}{����e���:/m�iG@ x�ۻ�*�� -E�[�����@Ŋ#rZ�R%�K��6v�¦"yyy~x@~%������(��5S��uԯ_?J�"���#�l�ζ���<[�0w�^�r���K��ݩm��v�+c%  �� P�F�$@$@$���;\+ � � $L���02   wP����XI�H�H a儑1@�� /�y�C��1|��L5����cb���l�J�y?�w�r��S -�V���~��2�0�5k�hL�d8;Y��a������C{�sʙo'9��� ��K�.��5kz�v�*_����1�̮^�Zw�ڽ{w"���ۀnݺU�B�иC��������NQN�f�?e�9�h�B/^��`�!�]��\�rҡC���z�ϟ/�3��{��q9w�E��զM����% CE���(:�u�C{�(��M0���u�J�Z�d�ƍ�ޚW�����]�v���ӥ]�v� -��%K -Fʖ-��U�\Y���yz.�Y�!���W�_�;E9�m%gs1�`�a�:??_�zn�6m -�7#�(`{g��z�C�~r�.�r�7�,X� 9&�����UE9s왲���k����+z5-�g��I���H<�9R�]Cˈ��.TX��jg -�b��,#���� -�r��3E��� a��l|0��9d�W.�����z�=eW�2�l%� U^^�������rl���)�Ϙ)d)��-[j� ��!gi%�Xal�a�Z(ʮ�e��N�Y�f��0�FC�N���������B�<29�+�E� ���Õ��+X) � � $N���83�   WP�]��HI�H�H q�ę1 � � �B��� -VFJ$@$@��('Ό!H�H�H�eW�2R  H�7I�CD3fL��@�`{�޺�B�(�^���ᣏ>�y �}���{���3q�ę1D�@�.]r�źu�d�ܹZ�֭[K�Z�r�E����=m����Y�ƹ瞛�%+�X�Ǐ��^{M=v��U:v�X| ��5�w��t4`.�Je�A$@$@q�(��^H�H�H (���4H�H�H �8 � � � ��E9�� � � �A��$z!  �t�(��2�   �8P��D/$@$@$��tPf$@$@$�r��H�H��A����L�H�H�� @Q��� � �@:P��A�i� � �@(�q@�  H�r:(3   ��E9H�B$@$@� @QNe�A$@$@q�(��^H�H�H (���4H�H�H %����� �,_�\Ə�g:v�(����i�&�={���5k����c7�o� �6m�HժU��h'���Վw��%K�,� 6D��ކag;����E9)l DAߗ_~Y����7Fm۶� [�^�vH��tP�/ �r|�����}���c -qӦͤI�&�W7O���I��< g���k֬|����� �I~���p���ѻwo�ׯ{�dx��c'(:E�y��1K �"6h� y饗�%l֬�t�r�4m�T�n�k6H�Y�BA��pN'� -��~m��n�*&L�Y3gʤIe۶m�t ���s� -u�$-$���$��1E9����? `����n -�qݺu�G�ˤ�)�Hݼ�!>Px-� r�@��:h -�����>��c?���9 �o��F���ٌ2�k��WA\�(Dž��r�s]p��h�n�zr�?����;_�V;�6ᵋ�����.� -wO���G�_Ⱥv�@n�}�T�\)B��WHeP\ z�(�1x>�~�G /0������P�8���G�=�5CaΡgQَ��Ao�Zƃ�b�H �".d�<���2e���8�B_!�z �K/�mJ��E9����������;L����L�6MV�X!]�v��'� ����)������=.:S�NU!����nX �>�@v�?�0[ :E1d+��4r����8�a -�����=����::��A��R�Z5�n�dj�vT����`=�G�W_G��� @(�zs��.�@��9����^��������O�'�|J��u�94� {���,ç�;�������y8�P�ê'p�f�_ ���g�� m�|�Xy��a��믿ֲ��x��1Z.�qq���\!�g���l_��1 4���H�V-�J�*���X����[d��y2e��]{��-j� �ߨQ#ٿ����+�\��^<��6x=�.+�����W,;Ω[PD��X�4��� ��Ņ��� A{�Mt~��/>��E�-[.?-�%�v�v�h �('J��]%��v��7��,�;w��?��/e88�8�L�C�5�\�0��U������K5�a��,����H!v��@ƒa!�v��ǶÏ���մ5>�:�� � � �0��ȏ�G�v������\셜f��J۶mu3lIJ����'�q|-�h_����!f�|�~���� �'��-B@xB��a���f��D4,vA5�aH� -.$a��d�!�-&�����/b F�qG��㐟h��S7D�"J�[��Ǘ�ny���x�c�ڵA"6h��s����e�ҥ�3�/l���Hc�x�1�؎��扖��e�n�A�6l��0v��_E9�"�J�y -��C?��� tQП7B��;l/��G�3qj8+�]�B=îy dQ�`6m�BZ�)���б&���Ag�"���S���s��>�ӦN�u��������i" �[x!@�rH�jR�^}����a?�mG\��a�9���f;,�Y��G݂v������I���G�� F��A��p8B�8�.�����<���d����R����\�K��ش�m@��e�n�h��o�_�=�~�`w+�kz�mU����(�-W ���Z��g����R�h��~`٭�M���`}�6s��#�d̘1z�碋�j��b��,U�<�aE�X��R�-^{*v�I3�� -eť��|���[��M7�S��x��l�2V�é�/H%��T�@�1u�Dg�����v�r���V���'e��e�g�sY�َ�f�?%���놓��<�I��+& ���x� ����m6 ��b,����25jԐSO=U~Z�D~�i�,^�D,\$Æ ��/ϱ@MA�p!� -���� -� ���{(�Gٲe�,\�Pʕ+k�:�!��g�*��S�g ����� Lt���"�H~�F2pн���H��q6#L�09da;Fe��B?+m lǩ� (ʩ3d �0�W��N���X�/ -8(p��#m�V�dI)Y���*UJJTJʔ)c�5�T���e����O�.]��c�E��\uUK� -^���^x�y;v� 4P��Zn��V�m�jy˚�=���rY�Ke�삭.7n�h��y��?������wg�����ִ{��\&|����X�/������'I�[n��� �����K���?!a{�0��!'��H*�=*��o�2�Y�����ݶNQv�/c�h��Q����Ka��mٲEv�����]�Vձ��H��ˣ�kA@���i���F��(EQ((آ#X�łX)����dE�C� $Jx� ��7��s���uw�e�9��Ι33�ܹs���S^�v��ͥ�6�$v��7�k}���X[�Պ�{�y�͛�\taWq��[�~= ��V�U��}?����{ng�9s&=4��{�n6l����y��ח�i�J���#�?����n�P!�Z��D�e�_Nu���'�|B7��Kg�y&=��T�V-�ݫ��3�%�yW�&5zVI �q��(�,���hq�#念�:Z���nV����ՎՎK��uL�4���KAp�q,F�bN;� "����c3�3���w � N���x�5��w����鼬 ����^�A��*�W^u�r� ��d�����ҟ�<�jT�A�D���p=�ڵ�>�l���_��2egg�D�Ï8��2��[�]�駞� <�֭O=� ]z�e4�O������F�6m�Y�𯿡�8�H �C��%�+G��Cvc�q�T�g/B!�1����ѐ��I6&=����@{�E��7 #jǡaZ}�v�N� -�����?�P"⹎���[则�ɢ�*���a�o4*<�J���=��k���/O�[ � Mp��w��8��ɓ���7P�f�2�������;v�&M�H9���ԭK8٦Z��R�T�bLJ9�]�vָ&�'$4kv���ڹs��Dd"�u�Vz��Wh�ƍ�;����?Ҟ=���=�g+�f����"',�@���a��0-+�1�|���DW�F�*wNY�X��|�{U�\��U�쯌�8L)�s%a$�Y;�ՠAjѲ����;Ɣѝ|6�E���fܥlFv�9�Ժukq��Ю}{��N�B���v����o�O�K��c�&�Y>�|�d�V�`���ɲ藿lMU�Ue�ч� -Y>�@r�9��8d� ��"��>���i֬�2�\�Z�j�b��M�H��i�d�:�\2zT)����E� -AU8�+Ñ��!Ƶ�1� ��#�;]�k]Ŋ��q���/���u�.�h����C��'̒��h��R,����~��P�ٳK�*UB�_&����t�>��t����]h';g�v��alf|���`o&�� -��\L��iH �_>�O�B;v����E�x���8;T;.�/@�r��S��р�CyѢh �4d�F�2s�a؃F�$�4Np0��L�\?�0���z�*z�� ]�=z����_���N&i :��fE���2�`�]Bt�3�K��xLv/[��K~��ҍޠ��|b���0.z��Ⱥ]ޝ&LO��x�6l�([`��������a2��j ��Bd)-�E�W���I���n"�La-���(��ͤ!Վ�C�1/_��d>u�%�G�R `��K(���ϳ�Kf������� ?��,?3 zK��� ��8G=& Ƥ�^����)�#�� 'WQ^�����v����u�>��X2 s�iVHD��I��o�\���d�/�$���{��$"ѧ囓ɢ<^Kˎ�/_�6�iZ;�A(��8�{~a��0��a'�-ֆ�c�""� -��Gkr��5�9���r�[�}n��|:����� �4<�0{��{^&�+�<�����-6s]o����Ů�����OO<�K��*N/=�1�1�+9+7(br^et�wYCi�����/6�Ύ���#���_�S�p$�c9�%K����6�|E� I)Ԃ�Ŋ�5��֭޶u�#��=3S2?l��޽��n��J�` pݺ�ԯo���~��玏�_�ZO7�ԟ�N'G�Ö3�§�%-N��v�N90&�����cg��]��cv%�rHe9'y!��W�Xɕ�7����i���|.���h�G�@C.� aC�ʕx�-��Sp,bH����^��/ժU���` �ե��Kn���K�� ڈ�m�.�r� -�ˇ_���oS}�_{u{���էx�VXDn�"��ϒ�TI$W�n%m�Guo��J�6��7+�Ϟ3�*U��e����n g �+�}ذ[O�Dm�-���9x�@׫_�fΚM�k�2a0�q���jX����!7�mtժ�H�0{�,Y���k� -��V�� ��i������~������cޗ�a��V�C�V� B��IM5����z�j�瞻iWp��ա���ң���5��[ڄ�ӧS���|d�-M�2;lM{��|3��| - t ���\�A���2�m�Z�|�ڰq�$����i�ڵ���ᅲ�X@ݻu��ӝw��� �l�FO�k��w��ʮ3u����ƭ!^GݻWO:����W^���.�4�p>�@#��I�{.w:K���łƻ,����䛀�b�c�Ǝ��sE�wblx6խ[G����nܛ�6m�`n���h�ĉƆ���Սϻı �|�MԨ!lx �^����&�v̘G��T>l�4z���[���efl����������������9b8mܴQ�`�ײ ϟ?��1ƀ«Ae���"���C빅 ��z���t��Ѿ�,��`r;V�l�@o�W�Z�6��y��� -�j^�f�T^p¨�p�!~��d'TJ��q�c�>*�\?���4}�4��1m��rR�XN��bJY�~ �� q��W��������c�y�C��e#�o�~�8����[s�+���� ���m۶�LlQٿ_?�A g+�v,O�æ"��C(p�r=���_ ��bc�����o�ƍS^n�M`"_] ��B�O�Y�%��:����u��nݮɬ�]Kڎ7m������{�va�+� �F������N���lüg;v|�6m��C�il�7��L���l��U���� �g�q-艹�'�m�뮻������u�=r��w��8���q�x����~|F��]LΆ�<�X�l���D6cR�a�)3��A���������w�䫯���6�"��L`�q�M�剒3׎�=���U5��4� - � {NN]}��<�ۗ� b�� -Q���q��k��lC�2��%�Q����JE�d���˕��Ə�@�_W4�Y&ѳ����i_s��.]��`v��oܸ��e[9���O�V������<.x����� /ҹ�;�Z�N;����5�]#G��_ �W oƌ|F���x�'��KdgKY� 6~Ҕk�J��O=����-)X}0����Q��i4i ��9s���ׯ/'����{-);��{r�g�(s��Լy ymH|�m�a�F2�q�c�?��/]l�'R�p&O~��*�l��� "C�����ms$);|L��6m�p� bOGq��ߏz���SD�����s�?���!�\N���ҵ���E찳O� c�� ƌ����\Z�r%w����mtۭ�h�o�/�i-?R����G�DD�kmR>��X[��5+PH$☝�7�ϼ��5M��b����;e�kz���飏�����ާ�~�k/��#���u�H<+���ԅȞ={i��MԩS'C�x6����N2>����:�بt8��9�X:��1�B?�p:��s8�S��q�G;v�k���W�!��d�讬���8��-�"T��D��X|����h8"��e/�<�͝+���/nIDAT��,'{^[5x+);n��,�]gð�����K�����x%�1W���r��h� -u� ��5R"6|���cǏކ�<��cd{���rD�g�I�� N1ҼE ��� fF;�&��jp2۰Eȑ�lð��<{�ذ%�[~v�_6s�Qަ|:ZS����j„q���5��7 -�U -�c��Q҄�^B8�WAd��S��h(s `��\����*�’�f�N/�s6�a�̕D";���Z�G��0�K:��b���ydj�b(p���N�����HL�/d�� Ƃː��ݻ�����z��- e�)��pPEE�,d9�dr�a��|s����B�����1&e9��>��a�5k֊g�m8�?�M����<�Vg$,+6 ̜���C~��,"�0���{"Q�3Hb3�B����o� ��Ƨʷص��OMC�1�+��n����F ��(�@�l�ꔣ��Pk�m۶2�駟Nq��xm~��9��E�$�z�$�?Z�01l��w%sylv�2�5p���ټy3��ߣ��:Zdc"̒���|�l�*�0���� aƾ*U -� �yKͷ�/�m�&t6Unq��b,�c,��T�q.���k��m:{�K̎9�(-��� ^fr����Flؒ��wن���0c0�jÆ q*���h���%(���N3��`cW�Xj����'s��͚��/ �"E<� �XP���֒G�%���v�����T �O �[+V����cx��Xn=�.��Ⱦ��{C{V����_�&����v�O�D�\T�J�ݻ�L�IdsZ��4b�a��xFl5Œl�r�y�&t�ݣ�sk�3]�L�E��4x�2� ���St����g��X�hV:�r�p(�d�b�?R��e[�$CD@j���7s�#�0�(�|ģ-� �(� .� ����J+c��J�x)���O?��^zi�3�P�= /'A�J�*�6�7��`J$�� �!njh@�cA'M��6|Ϯn�6�� וK�h�Mh% ɒ;�{E1|�YYBg�Ŏ��\�OK����р9d� ���Ö_��@��>0�- Ci~����4��Z���\�a���������0.�Q����gj�Fy����hؠe�z������ S��r�Z� -5;Kh85#3>w��]ڂ [�!LVd=z�P�lTY����c���1ܖy���e�M�a��b6N�<��30�9L -��_\;Fk:'��o�t���]<ٱN��T?Q��%0EE��Ҏ�)G��Pj똱̨(M~M�)��E�$�|����I��@���< g�u�q�&��E� sJ�Y�o�� �;����Mj��� \�]ѱ�i���G����32*��@I�ON�$y#_8dt[k(�2ŎaÉDB¼n1�)DpI��b> CX�����q.oZ�u� �����˕���W�X�����)�~�Z�p����k)K�{8��p7�w�T�9�� �dX�@��ҙ4ß 3 8l��,�^�i!]�L��xRSF����>���`#X�oʵh��4q�ٱ ��lxLZ�P| �þ��%ۮ�."����6��rd��R.�w������Ѡ�������E��6���Q+����wv�%�4,�����E@̕O�$=Br�A�2�W��օ�mƑ����/���;���b����v��Pv��w~��� � �,�0I��nM$L���%%X��{��Y�r��_s/���bQ!��w�Ÿ -+^�q��0�J_�&$98_��ʋ+=++� ��8�|jȨ\��!V�j�A��p����6m�����?���m ��I���njǬDk�0Q��[[�3�<ٱ:���TD�i��}�cRX[Y�`?m!�;7nHW��Z����?�Ba�&D aKÒ��pX�ATΤr%�8`F����/3d�k+�����b�h�����˖-�3j�Q�r�sA���D��ՎC˴0�ԎSmM�r�N�A��H8����:�ⴘ?Z�!�:�;4Si5$U&�2��B�F��:�%�@VJ�9�9�@p��Z� �a��'��袆�q(Hy?�)��J�v���靰׫���jL�`ʣ�S�ơ@&j�Uf/��KW\qe��h>m�F��p-�oy��n|L�ԩ/�Y��l��cbX�k�"���-���2 ��+@:'�(�sWjA|�Y ��C���D"!�� �?�k���J��Վ��]��d��kp:�:_�hB&h[mfggKQp|�ٲee�%3����?��e�Cy����!�w��ńC�M`��G#�T���|�J�� h�QĒ�ED� � l�|?C9��� G��F1��m$�P;��� -���,R[��iF�����1d�>4bH�e3�H�j��#�˯��/u8�K/��O�i&c�;��&�x�a�c�3����$��|2OjZDǢQ��!e5e�0-�c����iWv���L�������~MT;�ۻ�qz�8�C�$Ū2Cp,8bn�s�{���䤦�J��^ac. ���!�vΜ9���LժU������ԵkW9�I�#�F�A�q�eD���Gh�]�dWN CĆ�^�f%�����?��x��8���рڱ{�v%ja�^��X�����=�5�#��Qt������W$栭w�:u*�o�N��֑�DBX�.]�[x3�����$KX�)[�8��' �I���҅M��ܑ���}d~ݻ8<i_�<奔#��D��)Z'*�4pA:�?k��|��N���q̠[��3�}�P��G�͛7O�� ��ڱ�q~�N9?�(>�5�n@L���Lp�84}Ѣ��9�n�?5 -N��/�p̣��05j�0� �eZ�  -?m�%��� *� -Eg�J"��X��Y3���r�k�K�^2Vj�j��ƩN9Y#?`4���'*6ץ���h�#�����tt�8�7���K�h���jߡ;Cx\S����pj�8�KM������[�$ Y������m4v�#4��2�:dQ�yQ;V; W�r� �X �bC�3�S�%N�+�#��:f� (��f�Zp����4�t�����E�0�����A�����{�u��IW<ʨ[d���A����%���S�G*������鹮�:u�����ԢeKxb��U��pA�df�<�F���'v�ki�ĉ4s曢t�FGO����8�ߞ>�Ӏڱ�Df��)g����e�Ђ�䯰��we�1|���D�����-����u�!,r�n�:�0q<�|�8c�d��2��Sc��>-F�h@�8C^D�P�\�r4I5��L.5jT �q�=zR�-�s-���ysiƛ3h>��]hӦ�8c�@Р(�Ԏ�����S�\�:�2 `���eYx�F��$�]�vt�)���6͵����{scB��}�u�5�v��:, �KC�*�\hct�d��3ϤRegistry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N -blob -mark :65 -data 16364 -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":358,"height":310,"nodeIndex":182,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":20.000000000000004,"y":10},"max":{"x":357.50000000000006,"y":310}},"objects":[{"x":254.50000000000006,"y":246.0,"rotation":0.0,"id":179,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":179,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":70,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":72,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.2575759508250144,3.0],[-2.2575759508250144,13.5],[-50.125,13.5],[-50.125,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":225.50000000000006,"y":117.0,"rotation":270.0,"id":177,"width":220.0,"height":44.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":178,"width":211.19999999999987,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Notifications

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":-23.999999999999886,"y":117.1999999999999,"rotation":270.0,"id":175,"width":220.0,"height":44.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":176,"width":211.19999999999987,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Authentication & Authorization

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":-67.99999999999999,"y":117.20000000000005,"rotation":270.0,"id":173,"width":220.0,"height":43.99999999999999,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":174,"width":211.19999999999993,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Docker Registry Service API V2

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":117.31462811656212,"y":201.0,"rotation":0.0,"id":140,"width":77.5,"height":30.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.document","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.document.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.192307692307692,"y":0.0,"rotation":0.0,"id":142,"width":75.1153846153846,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Logging

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":192.31462811656212,"y":201.0,"rotation":0.0,"id":136,"width":88.93537188343794,"height":29.999999999999996,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.form","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.form.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.3682364905144297,"y":0.0,"rotation":0.0,"id":138,"width":86.19889890240907,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Reporting

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":52.50000000000007,"y":10.0,"rotation":0.0,"id":109,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Registry

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":79.81462811656212,"y":55.0,"rotation":0.0,"id":108,"width":201.43537188343794,"height":124.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":11,"lockAspectRatio":false,"lockShape":false,"children":[{"x":92.5,"y":54.0,"rotation":0.0,"id":102,"width":86.43537188343794,"height":30.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":9,"lockAspectRatio":false,"lockShape":false,"children":[{"x":22.5,"y":8.0,"rotation":0.0,"id":97,"width":45.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    . . .

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":57.5,"y":0.0,"rotation":0.0,"id":95,"width":28.935371883437952,"height":30.0,"uid":"com.gliffy.shape.aws.aws_v1.non_service_specific.disk","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.aws.non_service_specific.disk","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":96,"width":52.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    image_n

    ","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":28.935371883437952,"height":30.0,"uid":"com.gliffy.shape.aws.aws_v1.non_service_specific.disk","order":4,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.aws.non_service_specific.disk","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":92,"width":51.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    image_1

    ","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[]}]}]},{"x":43.93537188343794,"y":24.0,"rotation":0.0,"id":85,"width":157.5,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.multiple_documents","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.multiple_documents.flowchart_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":0.0,"y":0.0,"rotation":0.0,"id":103,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Repositories

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}]},{"x":127.50000000000006,"y":270.0,"rotation":0.0,"id":72,"width":153.75,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.cylinder","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cylinder.basic_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.1000000000000005,"y":0.0,"rotation":0.0,"id":74,"width":145.54999999999998,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

    Storage

    ","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":103.75000000000006,"y":29.0,"rotation":0.0,"id":70,"width":210.0,"height":220.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.aws.aws_v1.non_service_specific":{"fill":"#FFFFFF","stroke":"#333333","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#FFFFFF","stroke":"#333333","strokeWidth":2},"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#003366"}},"lineStyles":{"global":{}},"textStyles":{"global":{"color":"#000000","bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.aws.aws_v1.app_services","com.gliffy.libraries.aws.aws_v1.compute_and_networking","com.gliffy.libraries.aws.aws_v1.database","com.gliffy.libraries.aws.aws_v1.deployment_and_management","com.gliffy.libraries.aws.aws_v1.non_service_specific","com.gliffy.libraries.aws.aws_v1.on_demand_workforce","com.gliffy.libraries.aws.aws_v1.sdks","com.gliffy.libraries.aws.aws_v1.storage_and_content_delivery","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} -blob -mark :66 -data 24298 -�PNG - - IHDRyJ��/> IDATx^� ��E��**K I�QiA���H *��"T�J��o�'Ѧh��*K��/J)���Y۵RJ����y��\�����ޙ{��{�g^���w�,g��|�̙3�[�n�:ѤP(�EɁr -�E9��)�r@9`8� �A9�P1�xp�k��r@A^�r@9�(b(���jה�倂���r@9P�P�/��ծ)��y���r��9� _ă�]S(� -�:���@s@A��W��P(�#<~��wiٲ�̚5k=*�<�H8p���~YS�t�R�e�]䮻���?��z���+�裏�#�Ⱥ=-�P���p��U@�iӦ`/��R�Q�����r��7�6�l# ,�-��2P]ə���[:t�z�r��G�Z�_�%�76�\�U[ZH9�� -���>mˀ� ?���,Y�D*V�h�\x�r�w��ٳ�&��{���~�m���^+ժU3yǎk��q��{��2z�h���ۥz��r��K����_6娃��r�!r����W\!�5�O����ڨ��������n���@9�Ȟ -���.�%A>�1}�ĉҬY3y�'��O�_|�H��6@�2D}�Q�;w��z�!�ڵ��-��Bv�uW�Z�2��SM� d����2�ӧ�9 ��P�s��f����Mۜ&O�,����)G=�4)��r@A>\��ٺ�O?�T�L�"O=���pER��m������� �6m�<��c2~���=�5o�|�?�s����&6�Q�F��onTBV'oA��0�O�駟d��w�3�<Өm�����k�sWISă -�g �\���A�Һuk��]w�\y�����i��&�&M�-Zp���? ��x�r��w˻�k�ot��ӓ%y��O>�Ā��?�lT4���'�'[��{؉4o�q����)�����U�,_��D�������ʕ+�t~ꩧU j���4j�;H�Æ 3�My$n�lP��c��ɷ�{5��=z3Jt�� oO ��,3�lWҔE���������*U�j�D�E+j�Y:u�d~#R=�M3f�1��P������\��\�����������W�^%����o߾�����s�9) >܀|���B��lDԭI9�� -�Ꮑ -����U�V�r�ʙ:��A�����r@~�}�ݠ]�<Ա�&�H�J���}͚5�{$��Dۜ*P����k)�y�V�Pd����UX��̙#���zD#acy�~�Wj߾�<��s�:,l��ѤPD� -����Q� Y@ �ױ��o�uc�R�• ^M��@t8� ��PJ���w(�{g�V�P(���茅R�P(�s@A�;K�B�r@9x�s�=7:= @ɢE�L.. ��V�Z%���Ѧ�(����F��H^A>���"��r@9'D��ツ{�(4z�#�I9�H��=��3F*�%�o5��@��ޜH�B���f?)��r �P�Q�1� -!ڦT ]�_(�+�+��w�k�c�yy�,t�b|9� � � ���=����c�е��倂����||׿�<P�W�W���B�.Ɨ -� -� -��]���p@A^A^A> ]�_(�+�+��w�k�c�yy�,t�b|9� � � ���=����c�е��倂����||׿�<P�W�W���B�.Ɨ -� -� -��]���p@A^A^A> ]�_(�+�+��w�k�c�yy�,t�b|9� a���?d��Ų����;�\2K���o���d�����7��i��� -�N��ʁhs@A>� ��O?��,}���Ag�}��}��R�bEY�z�T�\YV�X![m��� S�wb�VD� -��Q�F���?/<��|��wҾ}{#�?��S�$���{ˌ3d�-�t�d -�N���ʁ�s@A>� ߧO9餓�Y�ff���/ҢE 9��#eРA8O�2EA>�%֯_?����+��~�Ɉ#�aÆ����?��;����寿���'�)��"�˗\�C=$˖-�+��2p�O(�G�x� #�#�o��ffv~���R�V-�׿�%�>��,X�@A>�u۩S'�s�����c��-��"K�,�*U�dP[�Yٰ����ߴ{�'�ܹs�\�r�+�����~�=��#p�O(�G���駟��^�H�{l��6��u��ɸG�뭨(Ћ�|��K�ƍKh�����0���� �ɟ{�W^yE&M�$�V���~X����O  �*U�o������� ��>iҤ�����Kz��m��N��s�5����o�z�2k�,���ٳ�k׮%�� �f��P��9ң�d�MJf����;�8s�|���m���I#�"蠃Z����Bo�n����7@�`�x�2g����e�ʕ��}��F-��k��fT*�#����ҥ�Q����̙����C����x�뮦�֭[�UW]e@�l��:�0���^.��"�Q0G�x&O�l�_�t��(R�O�5Ň -�y��뮻�H|,�M7�4'��4SȠ��\$z�;�c=��C��v�m'��J$k� �����|��w�$�V��d'L�`�I8q�V|�K9�� ���K�q���ԨQ��E'ȳXu ����O?m�@R�[��<��s� ��a �s1����ڵk�ڵk�#�����Z~�����o��F��3�P��(ȣ@jtH�P#:>� h���AR(��v[����\��N�̆�H�ԇ� p�?�tr�/ .4��} ]�>��#����pIn���81��ٳ���جI�͆�)͂<��?���_��H7m�Ԩ�8!aMu�m���,ȟp� f#HN���9眔��h��# -�v -�����{�ɽ��k;�!Rr�V��\��f�e�ę -t����۷�ل,P#c�r�g�>|�����w�r~��w7ߡ�G EB�߮];#�#�sWbUjl&�q����#��v�޽��q�r�Ǯ�1'$�7�x��ȣ[��7 �Ѐ�z�V�jN ���3�� U�v���Ż� -�����m�6��\�����*t�a��у'�u�[o�e�Gϫe���_��_�h��i��l"����9YP � �;6T8�' �\ps�{��Px��f�P.U� -��}�y�K8,,��s18nܸ�]���=1����/S?���%�\R"�C+|��`D�'�9� R�w~G_� 7�W_}e@�#yo�����|OB�F�φ3N$�<��c�2*6�O�_~����b��N��_|р8N�*'�oN l��;�S�?�|s��}�W����﭂|�A闣7j{a��S�������@Ł9��H�H������^bJ�B�ozSт� tT�v��&�e6�v��G-�E)�'_l���\&�m�ږ�g�Af��TP��I>�k?�v4oaq@A>� ���ʀt�Yg�K4tɾ-#|�&�͛77ҭ��C�GRN���v���7(��4(�4_�8� Q�G�mu��z��9�7�Aʗ_~���,\OU�M�|��N$H����A�j>�@�P��(��kR�M$yN �&�\.��X��$���Тe���yy3O�����%c*�=��\�a��KM.=]���+�|�8� � ����y�OP�;>���.jq �����+"�ؐs*H� ���|���U9`9� Q���G:8�J��D�MuT��`���Ϛg��"MNQx��^9G(�G�1A�>}z�^ 6���y�D�%J2гp!���w�TuMaJ�������T�$���~_��C!^��'�8V�� - 5�=Q鸾��E�ˢѲʁB |DA���x/DeC�{!�r}|��6jUi��g�X�$���t���7�v5�r�P9� Q�G��'>���W>�o��N�\8벉'�xG��|��&�C9e(�G��z�j&�/_}{�h���ٜP3q�k���y���4��i�7ȃ�ϸ$��|�k�d/��&�o��C� J<"Z:�1���5• � -�fMaף ��#^�u��ʭC���\@(��D��|O�"@/�>�/z-M�3ǡ�ھ}{�I��G<�r�@v�ׂ<�r����h]~9``�� �k� ��7s$�J�� -���w���T�\�UL�6MP/��㏗�-[��HG.IAޅ{�.� �O2��L� ȿ��+Ƣ�Z�j��X��&�?��sI� -���f�v ����{�;�c�-�Y 2��S% G�Ѐ���B�J�p���*�+�{��. ����R����h����)&���O>1>� -ӄRA>� +�| -� -�^fv.A�K�'�|R�;�����r.-y)z�Ew�l�I�%y@��Νk���������UN���5��r�i�"ုj�d��|DA�N@3D�:�|\4j�Hʕ+����y�e�g��� - -���|�,6v�ɬ�:I�K� &H�:u�?�8.�7�C�ʕ+�f͚%daa��?��Xݸ$z]t�x.|в��˼�/����|�%�TS��{�H�Qy��KW�1�s�و�7��~�ͼ� �^���p�W�����B����|���� ���'�J*��kD��\p��:�m��TI�%�t����'/��7@�w}���.��$���м�p�wasRA^A��A_J����m�Vf͚%�n��������Gl*t�wa3IA�@��L.[_~�ec3���ozW�6����Ŋ&��m�٦(L(}��{�k��9�{�o�=��|�A�ע�~�1I\�j���0`�|��ס�gO5�x���?i�ڵ%Y�y�A;���T�w�|�˺̻�9� q�G�qꩧ -��y�F�oܸ�yM� �l_�ى��� �b≗L|���\� �ծ���va�Q��8�#����oH�@���[ �$�L�.A�.��2�ր[D�������n��&z�]���倂|p��΄_H�'O�A�I߾}��!�^z�,Z�(Ԙ��� e�2�ʦv��& ='�bx�N�ec -�44g9�"X��J����g�}�<2JLS�N��>�y� 6#.�o����h�~~����Q�Kr��e��H.4��Jˆ��9�z�y"A�W�\��&�KL�7!�v�a�Pr��ըQÉlz]���ӰŶ�˜��4���<�5G}��ks���2���n���n2q_]� h�j� �{��&�I�6m̦�y��OpM.��,8yב�gy�9�c -�y��C�F�m�z������waJ�eMB����^�=���˜U���F�$OP�Ϝ�.Ui�E�wM�uMb�qP��<�4Kc(�p9���MH�q����3_:Z" (�g��؁�}A����K�-�@����/��Qs5������˨�S�_�*�g�Ћ���|�#;��E�ƍ3�RSb�������hڴi&� ��]}ǧ� �. �e���2�Z6|��9_��J'�AE�z���'N��v��rM.k� �5�h\�����O�b_}jCM�4�u�S�w��e��L<�s�D�4op�s����?���r�y��g�}&�;w��^zɐ��C��� - -� !G"C���K��/������K�V�J��f���FA�e�v�by��r|h���N��w�}g����>j^�$��r/ -A8�8�'�|���'�X�$&���s�d�Z����,Y�,�z����J��)�{�5�ˎ;� {ԯ�[�*#�g�%��G �k���Ap�GBU���d�Z�d�]�I�� K���ّbyz�FzG-K�7(�y衇�3�`�~�7;�j6�t�L����[��.���OL����\� �. �E�p�9S^�}u�k2}�;���zHK9�y��e��8�wdʔ7��o� L�~�6�֭Z�v�� -\�5�˜smۖυ�����Ǐ7k���͚5�����}��'��pI.k��v#mB��k�w6����O�8� cs^�\9>���@�M~��W��ȫ��j._y�EB��xG_��f� z]���s�9����y����B�t�x����ϡL��W���^�OxR.Z�u���-G�&��t�s��SV�\�<��Q����_F7�.�w1`T� -�HwYk ��>�䲃W�H^잧�~�3C};z�Z�KH�rx[�R%� �B�˂s�x.4e�c�F��Y��f/5/��<��4q�n�LH� ->��Cy��Q%ƒKM�-g�y�7a�4Z\�K��������Tz��62��lJ>�g���V� o ���*��U��ꘊ���Ϝ9�ܸ��&庻ۺ\�e��L<��,��>$s���5p�3N?M�m�4p�\g|���#�6���K�s�T �r�s���{.@��{={n@w�)\pAP�R�sYk ��ܹs�|�wp�7;/^}�P� @�� � ꨗ4h�l��B�˂s�x.4�[-�/o�=-]��~��w/�W�nVe}��er��a�D�;��߾r�i�H|��2�|���x��ꫯ^ς��.k� A5MÆ M� �_�x���TQ�؜��W�Z�i.����\&� �e1k֬���ǝ�YV�Z�n+��_�����[��&�}�y���;��r���3.��˜ R�<�AޚP�z��|R%��e�$�㠌���_�Y�����Ķ0�<餓̽���� �. �e��\�"��…w.��G)|�J�O�"���29���k�_�<7S��2��$�o��M"� 2ĘLZc -���ypеkW/A��yڍ�C5�5��BXȉOg9����|��;?b ���9��o -�)W��˜���\�<��Q''��g1j6��R۵k'[l�����惹R�ʕ���E|K��K"�[�<7����3�4h�l��J,|Ӑ[]u�T}�e�]v�jժʟ�%+�I�̙���s��'I�f~��nTD�̝+>�H�"�5TL���1��o��.P����J\}U���d*V��<��3JW��D��T��M����c�i&.��6q4�߿�@>�N}����K^�f�H��lJ. �e�М�W��]7��@llذ���Y�b��}�f�o��{�_zYV���?��R����sztK����>��̚�~c��QG!o��,^�T*V�d^�n��2g�\���vI��d�w �7h&�9��t�r!ɣ��뮻�]�M#F�0Z�;C��V� o�����4ib��5m�ԙ����R1� i����݁�xM�$�����Y2���i36o~���f�6�Y��?�뮻Aa��yҲe �V��y5���_ˬk�M7����=��8H��XQf�?G�i-�������k�4_�JY�ɧ�ۚ5Һu+������HK� �w0/}�b���[�<��D�d IDAT�c?g�#��09u���ȳc^s�5r���^�+⢋.2/H��/q@ٌ�|�I�"��$��[f:���wٔ\���s�9����L~���|���,yc�T��ye9��%�-^,w�u��q��������] \{M�*�WBٷ���m���N;ʄ�OI��ݤa��Jʰ~��WҾm[����u5;�@9����_�\�/|K�`�ѿ;¦ɓ'6��KrYk��iu� ��N�#�T�^�07lwi �={������Jw������o�2��� `�,8���Bs*���s�������^Z�>�贇�r�����|�ݷ�t�}d��qiǥ�%}d���/mC�̀�0p��i��x��h�b�T����� �_x�%rZ�N2s�l�B����{�y��H�f&\�\&픕7 �/-.^�z��.�~����y�����S�N&�~��'�۷7�>��K�@\�������(��_�׮][Zz���O�81CW}�~���cƦ�B���xs�WZ��k���!i�;夎�p�"�Q��Ӿ��9 �x��F��d��2�C�����Ӷ�I�9�I;�y�­ -v�F�2�Q%w�q���fЬ$�'�>}��ǢW�\r���H��9.]�;�y���j_��J.����W^��X����#Ժա�O�s��.{%R>�Acm��3�t���g�ލ�����7�N��&���}���z���ղ�7h.��=�:9���d�w�O>)��]�Օ {�J�f&\�\&���q[�$饗�4��#�H�.]�� -]�Zi|���&Y'o;����\@3����:v�h��7�� c*��kX��Gzl����̴U޺��\�R>��S9�G�r/�����t��2o����G���s���uci ���OrͿ���nݺ��~M͆���Ǵ7e��.�kT7�?^y�մu��}���K���$C����`Fa*m�eŸTyc� �x�J�?|�O�G��@�"���+��R��w5�rݔ\������g�yV^��Z�᯽�v����̙ƁYժU� %�2-Zl�L�>=m=�+o.��+p�-�Xs������{��W�X��J�駟�����+���k~KK3�ߡ����4��q�E}����1�ƾ�����=�H9���^�9����|���­�SO=e�4�l����e��ƒ�J�D�a ��6��F��|윾(q�G���u„ R�N��C��\�uYp.υ�T����ӁL)�E̾M�1�����悒�@��4��@CQ�B9z�-e�eS���|�Q�FI^��e˖�-��8gQ�!�:�<�KN�������G��;�$�}��y��c�_~Y%�͙c�I��R���!H��y\�\�F�d��B��q���./�7~GM�*�����y� <�@cϫW&�!�"g�u��M�°a� ��$������s�uN�/t]�����c�i�q�]�K��ı�m�]d�����x����I{ԯ/={�$��<��w�|���u��q��b�M�o� .[y��� {K͚5� ��e��?H_ ���X̺�U�V�5�1�]��\�ZA�<0��ȑ#e��63�;�J0�K�?��O>Ȥs���. �e��\��u�.�Wv��dp�����r�A�wk��g�{A~�`3 ҧTyx8����f[��r.s�1�@�;�h�� -%��pD�� F�SN�s�=�<:@��U�0�au�ء'�?3h�d�eJC&�]�uYpQ�W^}U��%��o�>��������'�c�f2ƉyO;� �z���T�J�ʲ��K���w�s������&w���.��2s���5')�W���Z+(I���'�X����^\��G�λ��{�_A>�D�� >�}�4P���Ty't�Kݺ�N�eZ|���h���������Nu�+�2����wߒ<�\�-�^0f�X�aލ���;�$t`����`�v�f͚gdL��G�mV�$l7 . .� �\��֡�ڔ)Ro��D�W�x�|��F�������"!�'w��u�E�Ֆ[�Vի��[�q?�U�*��ϓCZ�̩o s�sw���AU쩧�j�Y �c7�� *|l=��N�������c('N,� �9����><���q�ԩF�gR�z��\�T�"H����[}Y��o��W_��֙�����ʠ�FJ=�n���}��W���*H��ե|�rƝu�M*�xk׭�_V��_�����Ν ����eΥ�A��s���`��͍E ��X���۷$�wP��󹬵��� ���[oɑG��q2\@3��/������=�z��i,�.���lǽ�� �. �e����0�o�M�!�gL��~��D�"��h���K���:0Ғ{��s);}� �6c�q��۴���A$q� LGGi��̹l�L.� ����{���� -zyN�={�t~��� -�N@� M�B0��50$��|�rs�c��ߎ��B�˂s�x.4����\�/�s�<����o�m�@�p=��c�i7��\�ZA��%��V�� zJ���%����\&� �.c�e��˜�E�o�_�v�q:7n�8�n����@-{�=�8?�rYk ��KW������X�8�'$`6��ȅ^��2�\h�fL�Lt8�2�|��7�s���Ǧu��%d�~x޼y�5Ӂ�&�#�x..�R�ɸS=�3��Gb��L�(4r��e�)�g2�4��˜��E� ]���T�GQp�����E~�x���e��$��C�� �l�ׯ�^?x��j�*�Ei6�4�jW6b �p�@�9��B�˂s�x�Ќ�e˖�@+QN�rd|���U(}���9�}���S/� {,��չsg�W�=��Z+H����xQ�3b�) -�6���yUr"x�+л���\&^&4����{���y�� ��o��<��뮻���3|�>�d�˜�E�O��k����F��.qYk ��� @��~�͢&��P�I��*�7�X/B��p��Ѕ^��2�2������T�(������I��pV��W]u�yq������Zpu�N�2�\�>��d��o���q�����oy�gL;$~# *�Ғ��b���e�ۊ s��}��� I�娏%و#�bŊr�q��<��$��O?ɀ ��k��Ƙ�B�u�E�1��Cb�^���L�H�*�����$�fS�THq�s�𴬼�@~���f�Ч�o�Yƌ�A����B]�ZA���� 5�R*��)qϕ[�L3�>��2�2����5��wxax뭷ʐ!C��a^� - +��cu�w�.$;y�ر����Y+��X�j�����_���5���[���Z)�׍������~xͱ�;&h�X���u�m۶& -�M7�d��>�}􏍐�#�{l��a@)�>�i�A�� AK�����˜��3>l���r� J�GY6�Ĥ�|��c(&-���M\Ȳ� ���;���ѤI��]u�bF����lAI�0��a�&�-�5����q%���}���.]j��/���<��&�2���!� 0GRŮ�86�����%Kg6 �X�� -@M=lhlF�?���{�_7`����8��28��郓 /��7k`G bcÜ/�>�>��J��x��T,&��Ĉ\6;N6�':� LR�O�rR��Tg��؟a*��c;R�K�D� �����U�|f ���-|c���� �pLTI0FH�9�N~�aHlXb�[��Î�-dN�yN\�q"���{o�1c���� ����C^b(���as�"|I���i�U�d�GN ���� �TCl�V���SL O�:N��KK�� :3�f��K��TvN{�A f&^c�"��� 9�N>sI�ȪV���fd�]"@O��l(ԏ� ���4�]1�X�po�flj�i�0��HK��d����ڔ2{����Ly౩b}��**,SJ;�]{��������Ӂ�5W�}H�� ,�П㥕�*T4�Z�j�w!�o60/EE��!Z��<&|�>��vb}#`` -��v��fm���1!c�1��#F��x�Ͱ���� $+7�B�{!���r����*�o��M���1�c��"��;���ÅY� �JY�Yr��20�%�������Hl�8���5��`n�]�oЄb<&4v��o��\� M�H��M��>���y����$��Lhм�p��@����>��S�o��<��ͻ�؁< �q��]�!輸D6lX��e &�#�H��t��Ll�����m_�]�p���'�k�,�A�~�*ɇ:/v o��Q���zp���� 6wH����2\ʠrr��� �'��ڛSFA>(�4(V��]� ���X;a�K�(��/4�,4z3�����GA>S��;����7>���O�#������A �ǗkBM(3�A�˲��6���� �=UӾ���0^��>�$ ��f�����A�,���;|��$;R9��$]Z��\ҋ��f�I��i������$�k <~&���;'��H�O>����ޝW�;��� -���A �D�+�l�oJ ����f�*� @w��Ce��7���,U�"���T�U3� 勞B�ErKT��'����;֞}$� �&$z�M��+&I>_�'eb6I8R{��2�b'�[fH�8b7e��'��r��4��A�ĝ�:�[�+�.���2�I��li/f���9�b�����Zɘ��|6t��xW�|?S� �k5 �&����b ��Q}p���X�]�v�r����CN��y,�C""��� �հ�I�IqBI -�b@1ܙ<0 5�v?�d�N��s�5普�����Ʃ��!_�6�#�|�51���r'�;�G��^�`H�j�Ċ���\�b/K fTM�|��WG��N� (H9�i�6����=�ǩ���,�|��ф���4��$ߠ�����n��D�¼�y���VTO.� R^XS<6e����q����@�KV��b*Ꮯ���.)v 8 4h���g��6� ��Y�p��m;*���蹐ż�%�ޔ\hɤ,�.&o�������š�f� ��UV^�:y_t�VO�@F�p�/��� X��4-���&C�� ٚzں|��ʿL�+�+�g:�_A^$���T�L )�����Q��$ߠi�0p���:ٙ��Ş�5|�oz]x�R6�ۇ5Q� /���^����˸F���|�A���X�����c�-��0۶m���{q���%4X����d�����QT�a"��2��M!��g��(�G�1�L�ѹsg@�O�>r�=��$::�a\�/�䤁{��\�r��w��|*�����L��G^����1��,c�WJ.h��Ά�����F������A!ש Q�G��M*/G�-[f⤒0]z��'�u��� 4����Ә1cJ‚%Ӈ_|�չ$_��А벥��E·�o�C.���7^2��η�^��z� -�~��<�GyQF4t^�.Y�D&M�� ���h�$���� ��&9�8-�xe��u�M���OvK-R�u�[�;_�X2��֚+(��3������ۛ;�|������t={�,���P�`U,��s� ˪U�b��L|Ӓkw������怂|DA�^lY��!��gľ@�Fo'� /��H���r� ����s�|+��Rm6Ȇ�cC��='�İx~)��� |DA�|�.]J�x��r6*��J K ���/��bsk���=�|mJ�thy�@�p@A>� ���l��&)�*�ƻz� ����"+��������F���P�ҩȖ -��l4�r�A��;v4�g��Z�d::�_9��yy3�|�:��x0CD(��N�}�j ʁl8� � ����e3!ӕ�}�Hמ��(t(�G� ���SO�����]&�o���0�w��7iҤ䅫��]FI�*�瀂|�A��� -*d?ҥ�� �/.5�����V�H������'��/����JH���t��K}פ]w�A9�7(�G�yAJ�����E�x@��O�O>�D�>�$���ƞ��Ą��8)� o�5�{�L�0A�;�<��q�ԩ���x���>���^�/��� -�eK� -��!v ˸x�q�K:餓dذa&�k�h���;�x�ڵ%���_~���rA� =�P��S�3|�g��/^eJ�o��w}��)u �<�z���9�^�W�v�T�V-�y�^~ߠ��_ -��وp�$|����OA�i��+� -򥵪 /�4���kvs3U��I�|���x�FES�R%���W�^�� /����!v���G�)=�E�)�ƨՙ-ȗ���E�_�Г �Ʉ{����y\t��G��yg�q� �{�1c�Ȏ;���Qߠ�l�2iժ� xb�8�P�oz}�պ|�����ʷ t)��Rfyb�K\p����=z�Ȍke���o���<���R�|y���L��:�x�̷�&[��c#X�/����UdQ�$��&k�mP06 hb�Hx?��}��g�yu�b�^9J���V�XQO�ū����NA�%פ �y�Gd%��^dF�Z�モ9s��QGU2�D��ꪫ�v���B���}M� лM��l%q�I�Y]�a���o_"��z뭂{�d;�LGY���DZ؀|�$Ვ ���}��2�.�{������qk�裏�,\$d�~����&�%͠m�)�x���\����b��?���7(��/<�U������3�vk���NT$��-[�}��'{����� -�A���o��|ժU7(r�Yg��1T.KPD-��kr7"����� ��gc#饗�)��"����Y���[��]å� -��b �].�q�@�ۻ�� �)�b��*r[��|���{���OA�@?��c9���eʔ)���>�i͚5�goP�ti��l6�-��ҥ*�f�4{��2���bq"<�¾�+�� �5�M��g��S�/��L.3?��#c7��{���4]�„�W�7�|s Hg[_Pzs�X��9�r�A�w}a�ǥ}���w} -���e�]f�Q�F����ᅲ�o�]z���2?M٠��!���{�fd���Ӆ�R%�њK -Jo.� �a�UuM��{���OA�@`Ē��ޓڵk���1I����d�<� ����5�U&�R.K�"�5+�玿����� ������/ˍ7޸�L�E����;_�T2vi�JL@ ]h/d��/(��K�>}d�]v1��r�pD�뮻��d���t�V��UB �HN�_�1�Uv� t��C N��ʜ4�k�Y�|ק _ Ϣ�o<���n����E�-]�TjԨ�4y��f���~a�̙2t�P�8q�yf� ���5ݺu36��y��������>S>A��pxJ%�<�F�_|!]�t����Z0��u���.,�|�a%���}ק _ ���޽�<���ҦMY�j�&�%�\�^|�<���~�i�ٳ����#�,�J��ڕ����k�����W�� �»b��0ltX�q����Ý -VR�s�y睍;iN 7�t��X�B���/{キ��å5f���e�S�?���K� ���I7�R9<�ˉ^:��w���Ѳ|�rs��{ -����>��J��X�1w�u��{�N�:�*90�y3��O��.��A�L��s��?���{�ҵkW9�# �1�n<��5^�"��y��W����ᙂ|���;n 6�xct{�g��&e�����1N��X@,`��ʧ��\�<�8@9��� R�(�V�90>cǎ5���!� SX�9�o�� �x�]w��g@�f�ԉ,�p",S�'oi)� �*�g�)y��eޤI��fv衇������� I9����Ϧ��y�O��{�%��WތL�+���M� -��ƈ�����N0�q���+�8Y,X��3�/~K�:S�O� ��;�5\db�DBd(&1�Rq&�q��B���Ĥ���p��>��W� -�<�C�����2�����ʀ���Et�֭[�M�Kt�z!SGn�^FMy�Y�a�&��T�}l�v3�?lB٤�t�~�v� ���0$@G�p�����/�q�]�n]�o$tx�&GJ�)�1b�1F !�j�Sc�[�g�rz�5H� ̦��/�� ��G84CU ?.\h6'Nf�L�&�n ���)`�B����$���]&LP�L���t�Dɑ -0�@�����Z�̐�y�f7\��q�Ʋr�Js�lM\��ԛo�i݂}��D�G�G�lБ�cǎ��T�}���C*64�5�G���O?��� 0�"�79q�AI?qU O�Α��n�U�������8_}�U��"�6o"�s�BmV�V-S��T��Δ���Y�D��>$r�Z�Q���&��u�%�<��/ o绯����QI>���c�=f�鸈KN�p�|L ����B��Б� ]3� ��^_� -���X�D%��S&z�O�Νe�ԩF%���X�$ؠ�N�7 �VHw��6m�)�$� �m"p^|��)�G�9����� ��咕�5 �|���z��Q�� u ��e<�)0O��?�l�XuMi o�@R�W:�|й�k>m/1��|DA�J ��ىR��$ -��L��2�X���A�����L~�ТE c"��E"��V.YI�Ӑ�9����?��#]t���L��Ҩ!@1Y���,Uy,Q\�SK6�D�-M\v�_�`�D5c��&h��G'�<�\�s�AW����� ���FxY���ƥ?4p�d���˦Rȳ�J/ |$_�1Z�# �8�B�IN���K6�O����m����7_�������J���J9�1v��)����;����tDG���Q@�uN��Ӎ��O�F������1( Y�K,n�PJ定�4�tQ�x���*H*m�UNA�)+O��c*�# -�O<�Qs`m���D���uu���HU>j ��F�\�!��{Ǘ���q*�t�gܸq�8�/��I���j#� �L �x�Tl��xk�/k���I�< -�A���<��x�1 ^i��,��& ��6g!�<@�‹_$}L����a7��uD6/\]˗6Ba�w#�|T�W�73;]���M��jV�ό_��V�τ[��U��/U��U��AA3Pey���0Uؐ�&�s�VSi��Q%y��U����.���s7\ -��y��|p^�T2TY2�7�E�6� ���U%� ٪ �!��e -������A�͕ �|�S�)N���C��QuMQ�k�.ͧ���sY%��<�(GP�8�Js�����!+�j�@ �$��|QJ�f�fRĀ -� -� -�1X����r@A^A^A>��_{(�+�+��`�k��yy����y 8� � � ���]�/�������1�������| �v1�P�W�W����מǀ -� -� -�1X����r@A^A^A>��_{(�+�+��`�k��yy����y 8� � � ���]�/�������1�������| �v1�P���7mڴ fջ�k�,4z ��Jd�8�n�:����d�ڵ�馛J� -"E��>�9���#�[��{J6r����r����o���寿��ڵkK�J�<լ���y+�ӹ�sѢE���v�-���B9y�Zq����o�C�����ĉ��̸-P��*�& �j�@�s@A��Ǹ�*��w��1 |�;�� -��{�y�8� ��V���`k�������J��{�yL8��?�������c����x� �W�^Lz��T��9�(b`:��G�m��&�F����:JƎ++V,�^k�9� ��A9P$��ʕ+����~�I���Y�t����[��s��N;�$C��#�8�Hz���� \�<ʁ$��X�f��������/�4�I���{��?B�y�ނ����o>HV���E�#��/_.Ç�g�yF�͛'[o��y1��6�`�߰���ʕ��Q�ԩS'㣝L�y�ʁ�X�j��}��2l�0��ڵ����;2 ��-fP��ـkw��5C<�㌓�k������Q�Fvj)�@�8� �'Fk3�́իW�ȑ#�_�~����G M-S؜T����|s<���X�B�$�ҾO̷p�Œ9��t�J|�?��SfϞ-x.�"��τ�'�9��~�2I�w|�''ڣ���怂|q����Y��� �ĹX��Ȳ�D���sÆ ��)���n'o �76�tK�{/���5�i{ �##e����Or -�I6Ҧo)��k��F~�a0`�\|��iy�I� ��T�a�ꔔ��n�����fBgr^ ����7c �up��� -��yZP5�'O6j�t*��$9�MA����_bs �Q�I��c��9a�{����+��:蠬6���P�/�Q̲�ƍ3���$�Tf���"geɎ2���3�{��!��' Na��m֬Y&Uk^�P����B�jڴiF���m/}_&?���믿�C=$/����u�Yұc�tEb�;�?w�\���i���)Uv�eR;� �GfG�)^l>���(mA��=�Rso��/�W��"-{��zr��_�*�X�Ų�e$3���b\�����]�527&٭��^Σ�����,P�oR(ȇ��P[� o/�0�Lw�f��K3���M!ԡͺ�� �D˪�K� �@��� �JA>�aq.� ���¬��|����c�)鄵�aQ[k��L������>��9��+��2Hۅ���V)���� �-d�qWkIe�e��Iжs�-��y� d��&��|n��ZK��O4�KeZg˺�G�W�&��&�m7�L�d�ß, -��A(d��Z��Z��i� -�HEW����k�lo�� ���NK�8�(Ȼ� �� -��<,�� �� %� 佰ѩy'�na���B�\A>��R� B�@A>�ǮQ���\A>�1��P��F��r���  -�Ca{�U�����P(P����kTA>�!W� B�@A>�ǮQ���\A>�1��P��F��r���  -�������?�"ϳMA>� Oќ�|�c -qy��w�y2a„>?��sҶm[��{�g���we��Q��GXA>�1��8���u���SO�m��F��� - e�����u��|��u��� e��Q��GXA>�1��8�<�>;קO�ԩS �{�1�pٲer��Ǜ�}��馛 �W�REy��С�����ҭ[7�a��oxW�^=9���eʔ)&������O6u���+ҥK}�С��[o�w�!�*U�aÆɥ�^jʌ3��D�$�C� -�y`r�&���P(���ؑ#G�nժ��v�ir��ʞ{�ix��?��ѣ��p� r�W -��I�&�Gu� �w�!�HӦM ����_��g9�����.3�@��˂ L�(�;v���^�Q�۝w�iB�m�l�R�|�MiѢE(s �*���e�� ��BA\@���;��Ȓ�֭+��Y���~��)�aÆ�g҃>h�y@uΈ#�&��/�Ȝ9s��+���.]*�v�r6�*�5����H�֭M8<�8p��<ʕ+'7�p� ���n e�Q�|pYA>|.G��8�<�/��Bv�yg�X��H�g�}� dq�)�H߾} ����5k�T�ZՌ�O_LƄ�l���^���8&��G]A>�1��ܱ�̙3�5O� J,rr�btkV�l���P(P����kTA>�!W� B�@A>�ǮQ���\A>�1��P��F��r���  -�Ca{�U�����P(P����kTA>�!W� B�@A>�ǮQ���\A>�1��P��F��r���  -�Ca{�U�����P(����?l��7n��|p¥I9�ʁ5k���D��C,��v���d�-�~k?�p0'�a���˒ϊ+J �.N�pɬ)(ȇ��ȴʂ�.\hlY �g�"�'�gO�tS���fD�m[Ȃ7�@Nb�$y�F����Gl1:ϙ����7�Ot3��������"�S�N���++q����_(�M2�f�g� -0.-%�sr�qe÷�=�ٓ`&}м�倂|n�[T�[PH�d�p��bZuƂ�%9�o�!'_@]�%� -���v�4I�,)�RD5��7ٖ�/g��'�l�V�.���NK�r� kL��]�bI�r@9�H�y���r��9� _ă�]S(� -�:���@s@A��W��P(�u(�ʁ"怂|�vM9�P(��P(�E��"\�r@9�P��9�P(�� -�E<��5�r@9� �s@9�P1�xp�k��r@A^�r@9�(b(���jה�倂���r@9P�P�/��ծ)��y���r��9� _ă�]S(��!�ryzUIEND�B`� -blob -mark :67 -data 20840 -Storageimage_1image_n. . .RepositoriesRegistryReportingLoggingDocker Registry Service API V2Authentication & AuthorizationNotifications -blob -mark :68 -data 1036 -# Migrating a 1.0 registry to 2.0 - -TODO: This needs to be revised in light of Olivier's work - -A few thoughts here: - -There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. -The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. -One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. - ------ - -The Docker Registry Service 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: - -1. Configure and test a 2.0 registry image in a sandbox environment. - -2. Back up up your production image storage. - - Your production image storage should reside on a volume or storage backend. - Make sure you have a backup of its contents. - -3. Stop your existing registry service. - -4. Restart your registry with your tested 2.0 image. -blob -mark :69 -data 611 -- ['distribution/overview.md', 'Reference', 'Docker Registry Service 2.0'] -- ['distribution/deploying.md', 'Reference', '-- Deploy a registry' ] -- ['distribution/configuration.md', 'Reference', '-- Configure a registry' ] -- ['distribution/storagedrivers.md', 'Reference', '-- Storage driver model' ] -- ['distribution/notifications.md', 'Reference', '-- Work with notifications' ] -- ['distribution/spec/api.md', 'Reference', '-- Registry Service API v2' ] -- ['distribution/spec/json.md', 'Reference', '-- JSON format' ] -- ['distribution/spec/auth/token.md', 'Reference', '-- Authenticate via central service' ] - - -blob -mark :70 -data 10274 -page_title: Work with Notifications -page_description: Explains how to deploy a registry service -page_keywords: registry, service, images, repository - -# Notifications - -The Registry supports sending webhook notifications in response to events -happening within the registry. Notifications are sent in response to manifest -pushes and pulls and layer pushes and pulls. These actions are serialized into -events. The events are queued into a registry-internal broadcast system which -queues and dispatches events to [_Endpoints_](#endpoints). - -![](../images/notifications.png) - -## Endpoints - -Notifications are sent to _endpoints_ via HTTP requests. Each configurated -endpoint has isolated queues, retry configuration and http targets within each -instance of a registry. When an action happens within the registry, it is -converted into an event which is dropped into an inmemory queue. When the -event reaches the end of the queue, an http request is made to the endpoint -until the request succeeds. The events are sent serially to each endpoint but -order is not guaranteed. - -## Configuration - -To setup a registry instance to send notifications to endpoints, one must add -them to the configuration. A simple example follows: - -```yaml -notifications: - endpoints: - - name: alistener - url: https://mylistener.example.com/event - headers: - Authorization: [Bearer ] - timeout: 500ms - threshold: 5 - backoff: 1s -``` - -The above would configure the registry with an endpoint to send events to -"https://mylistener.example.com/event", with the header "Authorization: Bearer -". The request would timeout after 500 milliseconds. If -5 failures happen consecutively, the registry will backoff for 1 second before -trying again. - -For details on the fields, please see the [configuration documentation](configuration.md#notifications). - -A properly configured endpoint should lead to a log message from the registry -upon startup: - -``` -INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry -``` - -## Events - -Events have a well-defined JSON structure and are sent as the body of -notification requests. One or more events are sent in a structure called an -envelope. Each event has a unique id that can be used to uniqify incoming -requests, if required. Along with that, an _action_ is provided with a -_target, identifying the object mutated during the event. - -The fields available in an event are described in detail in the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). - -**TODO:** Let's break out the fields here rather than rely on the godoc. - -The following is an example of a JSON event, sent in response to the push of a -manifest: - -```json -{ - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, - "digest": "sha256:0123456789abcdef0", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } -} -``` - -## Envelope - -The envelope contains one or more events, with the following json structure: - -```json -{ - "events": [ ... ], -} -``` - -While events may be sent in the same envelope, the set of events within that -envelope have no implied relationship. For example, the registry may choose to -group unrelated events and send them in the same envelope to reduce the total -number of requests. - -The full package has the mediatype -"application/vnd.docker.distribution.events.v1+json", which will be set on the -request coming to an endpoint. - -An example of a full event may look as follows: - -```json -GET /callback -Host: application/vnd.docker.distribution.events.v1+json -Authorization: Bearer -Content-Type: application/vnd.docker.distribution.events.v1+json - -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, - "digest": "sha256:0123456789abcdef0", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 2, - "digest": "tarsum.v2+sha256:0123456789abcdef1", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 3, - "digest": "tarsum.v2+sha256:0123456789abcdef2", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} -``` - -## Responses - -The registry is fairly accepting of the response codes from endpoints. If an -endpoint responds with any 2xx or 3xx response code (after following -redirects), the message will be considered delivered and discarded. - -In turn, it is recommended that endpoints are accepting of incoming responses, -as well. While the format of event envelopes are standardized by media type, -any "pickyness" about validation may cause the queue to backup on the -registry. - -## Monitoring - -The state of the endpoints are reported via the debug/vars http interface, -usually configured to "http://localhost:5001/debug/vars". Information such as -configuration and metrics are available by endpoint. - -The following provides and example of a few endpoints that have experience -several failures and have since recovered: - -```json -"notifications":{ - "endpoints":[ - { - "name":"local-8082", - "url":"http://localhost:5003/callback", - "Headers":{ - "Authorization":[ - "Bearer \u003can example token\u003e" - ] - }, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":76, - "Events":76, - "Successes":0, - "Failures":0, - "Errors":46, - "Statuses":{ - - } - } - }, - { - "name":"local-8083", - "url":"http://localhost:8083/callback", - "Headers":null, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":0, - "Events":76, - "Successes":76, - "Failures":0, - "Errors":28, - "Statuses":{ - "202 Accepted":76 - } - } - } - ] -} -``` - -If using notification as part of a larger application, it is _critical_ to -monitor the size ("Pending" above) of the endpoint queues. If failures or -queue sizes are increasing, it can indicate a larger problem. - -The logs are also a valuable resource for monitoring problems. A failing -endpoint will lead to messages similar to the following: - -``` -ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying -WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off -``` - -The above indicates that several errors have led to a backoff and the registry -will wait before retrying. - -## Considerations - -Currently, the queues are inmemory, so endpoints should be _reasonably -reliable_. They are designed to make a best-effort to send the messages but if -an instance is lost, messages may be dropped. If an endpoint goes down, care -should be taken to ensure that the registry instance is not terminated before -the endpoint comes back up or messages will be lost. - -This can be mitigated by running endpoints in close proximity to the registry -instances. One could run an endpoint that pages to disk and then forwards a -request to provide better durability. - -The notification system is designed around a series of interchangeable _sinks_ -which can be wired up to achieve interesting behavior. If this system doesn't -provide acceptable guarantees, adding a transactional `Sink` to the registry -is a possibility, although it may have an effect on request service time. -Please see the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) -for more information. - - -blob -mark :71 -data 3753 -page_title: Docker Registry 2.0 -page_description: Introduces the Docker Registry -page_keywords: registry, images, repository - -# Docker Registry 2.0 - -Docker Registry stores and distributes images centrally. It's where you push images to and pull them from; Docker Registry gives team members the ability to share images and deploy them to testing, staging and production environments. - -Docker provides a hosted registry as part of [Docker Hub](https://hub.docker.com). Docker Hub is a cloud service that securely manages your images. It features organization accounts, automated builds, and much, much more. - -Docker Registry is the core technology behind the Docker Hub. You can run your own registry instance if you want to host your images privately. Docker Registry features: - - - **Pluggable storage drivers**: Images can be stored in Amazon S3, Microsoft Azure or the local filesystem. - - **Webhook notifications**: When an image is pushed to your registry, webhooks can fire off to launch CI builds, send notifications to IRC, etc. - -To get started with your own Docker Registry, head over to the instructions on how to [deploy a registry](deploying.md). - -## Understanding the registry - -A registry is, at its heart, a collection of repositories. In turn, a repository -is collection of images. Users interact with the registry by pushing images to -or pulling images from the registry. The Docker Registry includes several -optional features that you can configure according to your needs. - -![](../images/registry.png) - -The architecture supports a configurable storage backend. You can store images -on a file system or on a service such as Amazon S3 or Microsoft Azure. The -default storage system is the local disk; this is suitable for development or -some small deployments. - -Securing access to images is a concern for even the simplest deployment. The -registry service supports transport layer security (TLS) natively. You must -configure it in your instance to make use of it. You can also use a proxy server -such as Nginx and basic authentication to extend the security of a deployment. - -The registry repository includes reference implementations for additional -authentication and authorization support. Only very large or public registry -deployments are expected to extend the registry in this way. - -Docker Registry architecture includes a robust notification system. This system -sends webhook notifications in response to registry activity. The registry also -includes features for both logging and reporting as well. Reporting is useful -for large installations that want to collect metrics. Currently, the feature -supports both New Relic and Bugsnag. - -## Getting help - -Docker Registry is an open source project and is under active development. If -you need help, would like to contribute, or simply want to talk about the -project with like-minded individuals, we have a number of open channels for -communication. - -- To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/distribution/issues). -- To talk about the project please post a message to the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) or join the `#docker-distribution` channel on IRC. -- To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/distribution/pulls). - -For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/project/get-help/). - -## Registry documentation - - - [Deploying a registry](deploying.md) - - [Configure a registry](configuration.md) - - [Storage driver model](storagedrivers.md) - - [Working with notifications](notifications.md) - - [Registry API v2](spec/api.md) - -blob -mark :72 -data 80486 -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -docker/docker#8093. The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -
    -
    2.0.1
    -
    -
      -
    • Added support for immutable manifest references in manifest endpoints.
    • -
    • Deleting a manifest by tag has been deprecated.
    • -
    • Specified `Docker-Content-Digest` header for appropriate entities.
    • -
    • Added error code for unsupported operations.
    • -
    -
    - -
    2.0
    -
    - This is the baseline specification. -
    -
    - -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least two lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*` and the - matched result must be 2 or more characters in length. -2. The name of a repository must have at least two path components, separated - by a forward slash. -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -#### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large, so care should be taken by the client when parsing the response to -reduce copying. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| --------|----|------|------------ -| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | -| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | -| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| POST | `/v2//blobs/uploads/` | Intiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | -| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | -| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | -| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | -| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | - - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| --------|----|------|------------ - `UNKNOWN` | unknown error | Generic error returned when the error does not have an API classification. - `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. - `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. - `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. - `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. - `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. - `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. - `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. - `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. - `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. - `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. - `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. - `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. - `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. - - - -### Base - -Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization. - - - -#### GET Base - -Check that the endpoint implements Docker Registry API V2. - - - -``` -GET /v2/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| - - - - -###### On Success: OK - -``` -200 OK -``` - -The API implements V2 protocol and is accessible. - - - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authorized to access the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -``` - -The registry does not implement the V2 API. - - - - - -### Tags - -Retrieve information about tags. - - - -#### GET Tags - -Fetch the tags under the repository identified by `name`. - - - -``` -GET /v2//tags/list -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - - -### Manifest - -Create, update and retrieve manifests. - - - -#### GET Manifest - -Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -GET /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| - - - - -###### On Success: OK - -``` -200 OK -Docker-Content-Digest: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": -} -``` - -The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The name or reference was invalid. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The named manifest is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - - -#### PUT Manifest - -Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -PUT /v2//manifests/ -Host: -Authorization: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": -} -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Location: -Content-Length: 0 -Docker-Content-Digest: -``` - -The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location url of the uploaded manifest.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Manifest - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | -| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | -| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have permission to push to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Missing Layer(s) - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] -} -``` - -One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - -#### DELETE Manifest - -Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -DELETE /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`tag`|path|Tag of the target manifiest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -``` - - - - - - - -###### On Failure: Invalid Name or Tag - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `tag` were invalid and the delete was unable to proceed. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Unknown Manifest - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - - - -### Blob - -Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest. - - - -#### GET Blob - -Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. - - -##### Fetch Blob - -``` -GET /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Docker-Content-Digest: -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob content.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - -###### On Success: Temporary Redirect - -``` -307 Temporary Redirect -Location: -Docker-Content-Digest: -``` - -The blob identified by `digest` is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location where the layer should be accessible.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -##### Fetch Blob Part - -``` -GET /v2//blobs/ -Host: -Authorization: -Range: bytes=- -``` - -This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Range`|header|HTTP Range header specifying blob chunk.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Partial Content - -``` -206 Partial Content -Content-Length: -Content-Range: bytes -/ -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob chunk.| -|`Content-Range`|Content range of blob chunk.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. - - - - - -### Intiate Blob Upload - -Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. - - - -#### POST Intiate Blob Upload - -Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. - - -##### Initiate Monolithic Blob Upload - -``` -POST /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octect-stream - - -``` - -Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|| -|`name`|path|Name of the target repository.| -|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Upload-UUID: -``` - -The blob has been created in the registry and is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -##### Initiate Resumable Blob Upload - -``` -POST /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Initiate a resumable blob upload with an empty request body. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Location: /v2//blobs/uploads/ -Range: 0-0 -Docker-Upload-UUID: -``` - -The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - - -### Blob Upload - -Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. - - - -#### GET Blob Upload - -Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. - - - -``` -GET /v2//blobs/uploads/ -Host: -Authorization: -``` - -Retrieve the progress of the current upload, as reported by the `Range` header. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Progress - -``` -204 No Content -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The upload is known and in progress. The last received offset is available in the `Range` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - -#### PATCH Blob Upload - -Upload a chunk of data for the specified upload. - - - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Upload a chunk of data to specified upload without completing the upload. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Chunk Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. - - - - -#### PUT Blob Upload - -Complete the upload specified by `uuid`, optionally appending the body as the final chunk. - - - -``` -PUT /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| -|`digest`|query|Digest of uploaded blob.| - - - - -###### On Success: Upload Complete - -``` -204 No Content -Location: -Content-Range: - -Content-Length: 0 -Docker-Content-Digest: -``` - -The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location of the blob for retrieval| -|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| - - - - -#### DELETE Blob Upload - -Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. - - - -``` -DELETE /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Cancel the upload specified by `uuid`. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Deleted - -``` -204 No Content -Content-Length: 0 -``` - -The upload has been successfully deleted. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -An error was encountered processing the delete. The client may ignore this error. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - - - -blob -mark :73 -data 28492 -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -docker/docker#8093. The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -
    -
    2.0.1
    -
    -
      -
    • Added support for immutable manifest references in manifest endpoints.
    • -
    • Deleting a manifest by tag has been deprecated.
    • -
    • Specified `Docker-Content-Digest` header for appropriate entities.
    • -
    • Added error code for unsupported operations.
    • -
    -
    - -
    2.0
    -
    - This is the baseline specification. -
    -
    - -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least two lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*` and the - matched result must be 2 or more characters in length. -2. The name of a repository must have at least two path components, separated - by a forward slash. -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -#### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large, so care should be taken by the client when parsing the response to -reduce copying. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| --------|----|------|------------ -{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | -{{end}}{{end}} - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| --------|----|------|------------ -{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} -{{end}} - -{{range $route := .RouteDescriptors}} -### {{.Entity}} - -{{.Description}} - -{{range $method := $route.Methods}} - -#### {{.Method}} {{$route.Entity}} - -{{.Description}} - -{{if .Requests}}{{range .Requests}}{{if .Name}} -##### {{.Name}}{{end}} - -``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{if .QueryParameters}}?{{range .QueryParameters}}{{.Name}}={{.Format}}{{end}}{{end}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if or .Headers .PathParameters .QueryParameters}} -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| -{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| -{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| -{{end}}{{end}} - -{{if .Successes}} -{{range .Successes}} -###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if .Headers}}The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{end}}{{end}} - -{{if .Failures}} -{{range .Failures}} -###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Headers}} -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}} - -{{if .ErrorCodes}} -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| --------|----|------|------------ -{{range $err := .ErrorCodes}}| `{{$err}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | -{{end}} - -{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} - -{{end}} - -blob -mark :74 -data 17523 -# Docker Registry v2 authentication via central service - -Today a Docker Registry can run in standalone mode in which there are no -authorization checks. While adding your own HTTP authorization requirements in -a proxy placed between the client and the registry can give you greater access -control, we'd like a native authorization mechanism that's public key based -with access control lists managed separately with the ability to have fine -granularity in access control on a by-key, by-user, by-namespace, and -by-repository basis. In v1 this can be configured by specifying an -`index_endpoint` in the registry's config. Clients present tokens generated by -the index and tokens are validated on-line by the registry with every request. -This results in a complex authentication and authorization loop that occurs -with every registry operation. Some people are very familiar with this image: - -![index auth](https://docs.docker.com/static_files/docker_pull_chart.png) - -The above image outlines the 6-step process in accessing the Official Docker -Registry. - -1. Contact the Docker Hub to know where I should download “samalba/busybox” -2. Docker Hub replies: - a. samalba/busybox is on Registry A - b. here are the checksums for samalba/busybox (for all layers) - c. token -3. Contact Registry A to receive the layers for samalba/busybox (all of them to - the base image). Registry A is authoritative for “samalba/busybox” but keeps - a copy of all inherited layers and serve them all from the same location. -4. Registry contacts Docker Hub to verify if token/user is allowed to download - images. -5. Docker Hub returns true/false lettings registry know if it should proceed or - error out. -6. Get the payload for all layers. - -The goal of this document is to outline a way to eliminate steps 4 and 5 from -the above process by using cryptographically signed tokens and no longer -require the client to authenticate each request with a username and password -stored locally in plain text. - -The new registry workflow is more like this: - -![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) - -1. Attempt to begin a push/pull operation with the registry. -2. If the registry requires authorization it will return a `401 Unauthorized` - HTTP response with information on how to authenticate. -3. The registry client makes a request to the authorization service for a - signed JSON Web Token. -4. The authorization service returns a token. -5. The client retries the original request with the token embedded in the - request header. -6. The Registry authorizes the client and begins the push/pull session as - usual. - -## Requirements - -- Registry Clients capable of generating key pairs which can be used to - authenticate to an authorization server. -- An authorization server capable of managing user accounts, their public keys, - and access controls to their resources hosted by any given service (such as - repositories in a Docker Registry). -- A Docker Registry capable of trusting the authorization server to sign tokens - which clients can use for authorization and the ability to verify these - tokens for single use or for use during a sufficiently short period of time. - -## Authorization Server Endpoint Descriptions - -This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) - -The described server is meant to serve as a user account and key manager and a -centralized access control list for resources hosted by other services which -wish to authenticate and manage authorizations using this services accounts and -their public keys. - -Such a service could be used by the official docker registry to authenticate -clients and verify their authorization to docker image repositories. - -Docker will need to be updated to interact with an authorization server to get -an authorization token. - -## How to authenticate - -Today, registry clients first contact the index to initiate a push or pull. -For v2, clients should contact the registry first. If the registry server -requires authentication it will return a `401 Unauthorized` response with a -`WWW-Authenticate` header detailing how to authenticate to this registry. - -For example, say I (username `jlhawn`) am attempting to push an image to the -repository `samalba/my-app`. For the registry to authorize this, I either need -`push` access to the `samalba/my-app` repository or `push` access to the whole -`samalba` namespace in general. The registry will first return this response: - -``` -HTTP/1.1 401 Unauthorized -WWW-Authenticate: Bearer realm="https://auth.docker.com/v2/token/",service="registry.docker.com",scope="repository:samalba/my-app:push" -``` - -This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) - -The client will then know to make a `GET` request to the URL -`https://auth.docker.com/v2/token/` using the `service` and `scope` values from -the `WWW-Authenticate` header. - -## Requesting a Token - -#### Query Parameters - -
    -
    - service -
    -
    - The name of the service which hosts the resource. -
    -
    - scope -
    -
    - The resource in question, formatted as one of the space-delimited - entries from the scope parameters from the WWW-Authenticate header - shown above. This query parameter should be specified multiple times if - there is more than one scope entry from the WWW-Authenticate - header. The above example would be specified as: - scope=repository:samalba/my-app:push. -
    -
    - account -
    -
    - The name of the account which the client is acting as. Optional if it - can be inferred from client authentication. -
    -
    - -#### Description - -Requests an authorization token for access to a specific resource hosted by a -specific service provider. Requires the client to authenticate either using a -TLS client certificate or using basic authentication (or any other kind of -digest/challenge/response authentication scheme if the client doesn't support -TLS client certs). If the key in the client certificate is linked to an account -then the token is issued for that account key. If the key in the certificate is -linked to multiple accounts then the client must specify the `account` query -parameter. The returned token is in JWT (JSON Web Token) format, signed using -the authorization server's private key. - -#### Example - -For this example, the client makes an HTTP request to the following endpoint -over TLS using a client certificate with the server being configured to allow a -non-verified issuer during the handshake (i.e., a self-signed client cert is -okay). - -``` -GET /v2/token/?service=registry.docker.com&scope=repository:samalba/my-app:push&account=jlhawn HTTP/1.1 -Host: auth.docker.com -``` - -The server first inspects the client certificate to extract the subject key and -lookup which account it is associated with. The client is now authenticated -using that account. - -The server next searches its access control list for the account's access to -the repository `samalba/my-app` hosted by the service `registry.docker.com`. - -The server will now construct a JSON Web Token to sign and return. A JSON Web -Token has 3 main parts: - -1. Headers - - The header of a JSON Web Token is a standard JOSE header. The "typ" field - will be "JWT" and it will also contain the "alg" which identifies the - signing algorithm used to produce the signature. It will also usually have - a "kid" field, the ID of the key which was used to sign the token. - - Here is an example JOSE Header for a JSON Web Token (formatted with - whitespace for readability): - - ``` - { - "typ": "JWT", - "alg": "ES256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" - } - ``` - - It specifies that this object is going to be a JSON Web token signed using - the key with the given ID using the Elliptic Curve signature algorithm - using a SHA256 hash. - -2. Claim Set - - The Claim Set is a JSON struct containing these standard registered claim - name fields: - -
    -
    - iss (Issuer) -
    -
    - The issuer of the token, typically the fqdn of the authorization - server. -
    -
    - sub (Subject) -
    -
    - The subject of the token; the id of the client which requested it. -
    -
    - aud (Audience) -
    -
    - The intended audience of the token; the id of the service which - will verify the token to authorize the client/subject. -
    -
    - exp (Expiration) -
    -
    - The token should only be considered valid up to this specified date - and time. -
    -
    - nbf (Not Before) -
    -
    - The token should not be considered valid before this specified date - and time. -
    -
    - iat (Issued At) -
    -
    - Specifies the date and time which the Authorization server - generated this token. -
    -
    - jti (JWT ID) -
    -
    - A unique identifier for this token. Can be used by the intended - audience to prevent replays of the token. -
    -
    - - The Claim Set will also contain a private claim name unique to this - authorization server specification: - -
    -
    - access -
    -
    - An array of access entry objects with the following fields: - -
    -
    - type -
    -
    - The type of resource hosted by the service. -
    -
    - name -
    -
    - The name of the recource of the given type hosted by the - service. -
    -
    - actions -
    -
    - An array of strings which give the actions authorized on - this resource. -
    -
    -
    -
    - - Here is an example of such a JWT Claim Set (formatted with whitespace for - readability): - - ``` - { - "iss": "auth.docker.com", - "sub": "jlhawn", - "aud": "registry.docker.com", - "exp": 1415387315, - "nbf": 1415387015, - "iat": 1415387015, - "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", - "access": [ - { - "type": "repository", - "name": "samalba/my-app", - "actions": [ - "push" - ] - } - ] - } - ``` - -3. Signature - - The authorization server will produce a JOSE header and Claim Set with no - extraneous whitespace, i.e., the JOSE Header from above would be - - ``` - {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} - ``` - - and the Claim Set from above would be - - ``` - {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push"]}]} - ``` - - The utf-8 representation of this JOSE header and Claim Set are then - url-safe base64 encoded (sans trailing '=' buffer), producing: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 - ``` - - for the JOSE Header and - - ``` - eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - for the Claim Set. These two are concatenated using a '.' character, - yielding the string: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - This is then used as the payload to a the `ES256` signature algorithm - specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) - draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) - - This example signature will use the following ECDSA key for the server: - - ``` - { - "kty": "EC", - "crv": "P-256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", - "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", - "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", - "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" - } - ``` - - A resulting signature of the above payload using this key is: - - ``` - QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - - Concatenating all of these together with a `.` character gives the - resulting JWT: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - -This can now be placed in an HTTP response and returned to the client to use to -authenticate to the audience service: - - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} -``` - -## Using the signed token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) - -## Verifying the token - -The registry must now verify the token presented by the user by inspecting the -claim set within. The registry will: - -- Ensure that the issuer (`iss` claim) is an authority it trusts. -- Ensure that the registry identifies as the audience (`aud` claim). -- Check that the current time is between the `nbf` and `exp` claim times. -- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has - not been seen before. - - To enforce this, the registry may keep a record of `jti`s it has seen for - up to the `exp` time of the token to prevent token replays. -- Check the `access` claim value and use the identified resources and the list - of actions authorized to determine whether the token grants the required - level of access for the operation the client is attempting to perform. -- Verify that the signature of the token is valid. - -At no point in this process should the registry need to call back to -the authorization server. If anything, it would only need to update a list of -trusted public keys for verifying token signatures or use a separate API -(still to be spec'd) to add/update resource records on the authorization -server. - -blob -mark :75 -data 2220 -# Docker Distribution JSON Canonicalization - -To provide consistent content hashing of JSON objects throughout Docker -Distribution APIs, the specification defines a canonical JSON format. Adopting -such a canonicalization also aids in caching JSON responses. - -## Rules - -Compliant JSON should conform to the following rules: - -1. All generated JSON should comply with [RFC - 7159](http://www.ietf.org/rfc/rfc7159.txt). -2. Resulting "JSON text" shall always be encoded in UTF-8. -3. Unless a canonical key order is defined for a particular schema, object - keys shall always appear in lexically sorted order. -4. All whitespace between tokens should be removed. -5. No "trailing commas" are allowed in object or array definitions. - -## Examples - -The following is a simple example of a canonicalized JSON string: - -```json -{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} -``` - -## Reference - -### Other Canonicalizations - -The OLPC project specifies [Canonical -JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in -[TUF](http://theupdateframework.com/), which may be used with other -distribution-related protocols, this alternative format has been proposed in -case the original source changes. Specifications complying with either this -specification or an alternative should explicitly call out the -canonicalization format. Except for key ordering, this specification is mostly -compatible. - -### Go - -In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library -will emit canonical JSON by default. Simply using `json.Marshal` will suffice -in most cases: - -```go -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} - -canonical, err := json.Marshal(incoming) -if err != nil { - // ... handle error -} -``` - -To apply canonical JSON format spacing to an existing serialized JSON buffer, one -can use -[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) -with the following arguments: - -```go -incoming := getBytes() -var canonical bytes.Buffer -if err := json.Indent(&canonical, incoming, "", ""); err != nil { - // ... handle error -} -``` - -blob -mark :76 -data 7938 -# Image Manifest Version 2, Schema 1 - -This document outlines the format of of the V2 image manifest. Image manifests -describe the various constituents of a docker image. Image manifests can be - serialized to JSON format with the following media types: - -Manifest Type | Media Type -------------- | ------------- -manifest | "application/vnd.docker.distribution.manifest.v1+json" -signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" - -*Note that "application/json" will also be accepted for schema 1.* - -References: - - - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) - - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) - -## *Manifest* Field Descriptions - -Manifest provides the base accessible fields for working with V2 image format - in the registry. - -- **`name`** *string* - - name is the name of the image's repository - -- **`tag`** *string* - - tag is the tag of the image - -- **`architecture`** *string* - - architecture is the host architecture on which this image is intended to - run. This is for information purposes and not currently used by the engine - -- **`fsLayers`** *array* - - fsLayers is a list of filesystem layer blob sums contained in this image. - - An fsLayer is a struct consisting of the following fields - - **`blobSum`** *digest.Digest* - - blobSum is the digest of the referenced filesystem image layer. A - digest can be a tarsum or sha256 hash. - - -- **`history`** *array* - - history is a list of unstructured historical data for v1 compatibility. - - history is a struct consisting of the following fields - - **`v1Compatibility`** string - - V1Compatibility is the raw V1 compatibility information. This will - contain the JSON object describing the V1 of this image. - -- **`schemaVersion`** *int* - - SchemaVersion is the image manifest schema that this image follows. - -## Signed Manifests - -Signed manifests provides an envelope for a signed image manifest. A signed -manifest consists of an image manifest along with an additional field -containing the signature of the manifest. - -The docker client can verify signed manifests and displays a message to the user. - -### Signing Manifests - -Image manifests can be signed in two different ways: with a *libtrust* private - key or an x509 certificate chain. When signing with an x509 certificate chain, - the public key of the first element in the chain must be the public key - corresponding with the sign key. - -### Signed Manifest Field Description - -Signed manifests include an image manifest and and a list of signatures generated -by *libtrust*. A signature consists of the following fields: - - -- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* - - A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) - -- **`signature`** *string* - - A signature for the image manifest, signed by a *libtrust* private key - -- **`protected`** *string* - - The signed protected header - -## Example Manifest - -*Example showing the official 'hello-world' image manifest.* - -``` -{ - "name": "hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - ], - "schemaVersion": 1, - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", - "kty": "EC", - "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", - "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" - }, - "alg": "ES256" - }, - "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", - "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" - } - ] -} - -``` - -blob -mark :77 -data 1002 -# Microsoft Azure storage driver - - -An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. - -## Parameters - -The following parameters must be used to authenticate and configure the storage driver (case-sensitive): - -* `accountname`: Name of the Azure Storage Account. -* `accountkey`: Primary or Secondary Key for the Storage Account. -* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. -* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. - - -[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ -[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -blob -mark :78 -data 276 -# Filesystem storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. - -## Parameters - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/tmp/registry/storage`. - -blob -mark :79 -data 265 -# In-memory storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. - -**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. - -## Parameters - -None - -blob -mark :80 -data 1891 -# S3 storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. - -## Parameters - -`accesskey`: Your aws access key. - -`secretkey`: Your aws secret key. - -**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. - -`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html - -`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transfering over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. - -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) - -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). - -blob -mark :81 -data 5119 -Docker-Registry Storage Driver -============================== - -This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. - -Provided Drivers -================ - -This storage driver package comes bundled with several drivers: - -- [inmemory](storage-drivers/inmemory): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. -- [filesystem](storage-drivers/filesystem): A local storage driver configured to use a directory tree in the local filesystem. -- [s3](storage-drivers/s3): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. -- [azure](storage-drivers/azure): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). - -Storage Driver API -================== - -The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. - -Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. - -Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process. - -Driver Selection and Configuration -================================== - -The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. - -Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. - -Driver Contribution -=================== - -## Writing new storage drivers -To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable. - -### In-process drivers -Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. - -### Out-of-process drivers -As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. - -Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time. - -## Testing -Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. - -## Drivers written in other languages -Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome. - -blob -mark :82 -data 3065 -package distribution - -import ( - "fmt" - "strings" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -var ( - // ErrLayerExists returned when layer already exists - ErrLayerExists = fmt.Errorf("layer exists") - - // ErrLayerTarSumVersionUnsupported when tarsum is unsupported version. - ErrLayerTarSumVersionUnsupported = fmt.Errorf("unsupported tarsum version") - - // ErrLayerUploadUnknown returned when upload is not found. - ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") - - // ErrLayerClosed returned when an operation is attempted on a closed - // Layer or LayerUpload. - ErrLayerClosed = fmt.Errorf("layer closed") -) - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown respository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrUnknownManifestRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrUnknownManifestRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrUnknownManifestRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrUnknownLayer returned when layer cannot be found. -type ErrUnknownLayer struct { - FSLayer manifest.FSLayer -} - -func (err ErrUnknownLayer) Error() string { - return fmt.Sprintf("unknown layer %v", err.FSLayer.BlobSum) -} - -// ErrLayerInvalidDigest returned when tarsum check fails. -type ErrLayerInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrLayerInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -blob -mark :83 -data 848 -package api - -import ( - "errors" - "net/http" - - "github.com/docker/distribution/health" -) - -var ( - updater = health.NewStatusUpdater() -) - -// DownHandler registers a manual_http_status that always returns an Error -func DownHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(errors.New("Manual Check")) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// UpHandler registers a manual_http_status that always returns nil -func UpHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(nil) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// init sets up the two endpoints to bring the service up and down -func init() { - health.Register("manual_http_status", updater) - http.HandleFunc("/debug/health/down", DownHandler) - http.HandleFunc("/debug/health/up", UpHandler) -} - -blob -mark :84 -data 2200 -package api - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/health" -) - -// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint -// /debug/health/down with METHOD GET returns a 404 -func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 404 { - t.Errorf("Did not get a 404.") - } -} - -// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint -// /debug/health/down with METHOD GET returns a 404 -func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 404 { - t.Errorf("Did not get a 404.") - } -} - -// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes -// the status code of the response to 503 -// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus -func TestPOSTDownHandlerChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } - - if len(health.CheckStatus()) != 1 { - t.Errorf("DownHandler didn't add an error check.") - } -} - -// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes -// the status code of the response to 200 -func TestPOSTUpHandlerChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - UpHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } - - if len(health.CheckStatus()) != 0 { - t.Errorf("UpHandler didn't remove the error check.") - } -} - -blob -mark :85 -data 916 -package checks - -import ( - "errors" - "github.com/docker/distribution/health" - "net/http" - "os" -) - -// FileChecker checks the existence of a file and returns and error -// if the file exists, taking the application out of rotation -func FileChecker(f string) health.Checker { - return health.CheckFunc(func() error { - if _, err := os.Stat(f); err == nil { - return errors.New("file exists") - } - return nil - }) -} - -// HTTPChecker does a HEAD request and verifies if the HTTP status -// code return is a 200, taking the application out of rotation if -// otherwise -func HTTPChecker(r string) health.Checker { - return health.CheckFunc(func() error { - response, err := http.Head(r) - if err != nil { - return errors.New("error while checking: " + r) - } - if response.StatusCode != http.StatusOK { - return errors.New("downstream service returned unexpected status: " + string(response.StatusCode)) - } - return nil - }) -} - -blob -mark :86 -data 5416 -// Package health provides a generic health checking framework. -// The health package works expvar style. By importing the package the debug -// server is getting a "/debug/health" endpoint that returns the current -// status of the application. -// If there are no errors, "/debug/health" will return a HTTP 200 status, -// together with an empty JSON reply "{}". If there are any checks -// with errors, the JSON reply will include all the failed checks, and the -// response will be have an HTTP 503 status. -// -// A Check can either be run synchronously, or asynchronously. We recommend -// that most checks are registered as an asynchronous check, so a call to the -// "/debug/health" endpoint always returns immediately. This pattern is -// particularly useful for checks that verify upstream connectivity or -// database status, since they might take a long time to return/timeout. -// -// Installing -// -// To install health, just import it in your application: -// -// import "github.com/docker/distribution/health" -// -// You can also (optionally) import "health/api" that will add two convenience -// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add -// "manual" checks that allow the service to quickly be brought in/out of -// rotation. -// -// import _ "github.com/docker/distribution/registry/health/api" -// -// # curl localhost:5001/debug/health -// {} -// # curl -X POST localhost:5001/debug/health/down -// # curl localhost:5001/debug/health -// {"manual_http_status":"Manual Check"} -// -// After importing these packages to your main application, you can start -// registering checks. -// -// Registering Checks -// -// The recommended way of registering checks is using a periodic Check. -// PeriodicChecks run on a certain schedule and asynchronously update the -// status of the check. This allows "CheckStatus()" to return without blocking -// on an expensive check. -// -// A trivial example of a check that runs every 5 seconds and shuts down our -// server if the current minute is even, could be added as follows: -// -// func currentMinuteEvenCheck() error { -// m := time.Now().Minute() -// if m%2 == 0 { -// return errors.New("Current minute is even!") -// } -// return nil -// } -// -// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) -// -// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to -// implement the exact same check, but add a threshold of failures after which -// the check will be unhealthy. This is particularly useful for flaky Checks, -// ensuring some stability of the service when handling them. -// -// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) -// -// The lowest-level way to interact with the health package is calling -// "Register" directly. Register allows you to pass in an arbitrary string and -// something that implements "Checker" and runs your check. If your method -// returns an error with nil, it is considered a healthy check, otherwise it -// will make the health check endpoint "/debug/health" start returning a 503 -// and list the specific check that failed. -// -// Assuming you wish to register a method called "currentMinuteEvenCheck() -// error" you could do that by doing: -// -// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) -// -// CheckFunc is a convenience type that implements Checker. -// -// Another way of registering a check could be by using an anonymous function -// and the convenience method RegisterFunc. An example that makes the status -// endpoint always return an error: -// -// health.RegisterFunc("my_check", func() error { -// return Errors.new("This is an error!") -// })) -// -// Examples -// -// You could also use the health checker mechanism to ensure your application -// only comes up if certain conditions are met, or to allow the developer to -// take the service out of rotation immediately. An example that checks -// database connectivity and immediately takes the server out of rotation on -// err: -// -// updater = health.NewStatusUpdater() -// health.RegisterFunc("database_check", func() error { -// return updater.Check() -// })) -// -// conn, err := Connect(...) // database call here -// if err != nil { -// updater.Update(errors.New("Error connecting to the database: " + err.Error())) -// } -// -// You can also use the predefined Checkers that come included with the health -// package. First, import the checks: -// -// import "github.com/docker/distribution/health/checks -// -// After that you can make use of any of the provided checks. An example of -// using a `FileChecker` to take the application out of rotation if a certain -// file exists can be done as follows: -// -// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) -// -// After registering the check, it is trivial to take an application out of -// rotation from the console: -// -// # curl localhost:5001/debug/health -// {} -// # touch /tmp/disable -// # curl localhost:5001/debug/health -// {"fileChecker":"file exists"} -// -// You could also test the connectivity to a downstream service by using a -// "HTTPChecker", but ensure that you only mark the test unhealthy if there -// are a minimum of two failures in a row: -// -// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) -package health - -blob -mark :87 -data 5345 -package health - -import ( - "encoding/json" - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - registeredChecks = make(map[string]Checker) -) - -// Checker is the interface for a Health Checker -type Checker interface { - // Check returns nil if the service is okay. - Check() error -} - -// CheckFunc is a convenience type to create functions that implement -// the Checker interface -type CheckFunc func() error - -// Check Implements the Checker interface to allow for any func() error method -// to be passed as a Checker -func (cf CheckFunc) Check() error { - return cf() -} - -// Updater implements a health check that is explicitly set. -type Updater interface { - Checker - - // Update updates the current status of the health check. - Update(status error) -} - -// updater implements Checker and Updater, providing an asynchronous Update -// method. -// This allows us to have a Checker that returns the Check() call immediately -// not blocking on a potentially expensive check. -type updater struct { - mu sync.Mutex - status error -} - -// Check implements the Checker interface -func (u *updater) Check() error { - u.mu.Lock() - defer u.mu.Unlock() - - return u.status -} - -// Update implements the Updater interface, allowing asynchronous access to -// the status of a Checker. -func (u *updater) Update(status error) { - u.mu.Lock() - defer u.mu.Unlock() - - u.status = status -} - -// NewStatusUpdater returns a new updater -func NewStatusUpdater() Updater { - return &updater{} -} - -// thresholdUpdater implements Checker and Updater, providing an asynchronous Update -// method. -// This allows us to have a Checker that returns the Check() call immediately -// not blocking on a potentially expensive check. -type thresholdUpdater struct { - mu sync.Mutex - status error - threshold int - count int -} - -// Check implements the Checker interface -func (tu *thresholdUpdater) Check() error { - tu.mu.Lock() - defer tu.mu.Unlock() - - if tu.count >= tu.threshold { - return tu.status - } - - return nil -} - -// thresholdUpdater implements the Updater interface, allowing asynchronous -// access to the status of a Checker. -func (tu *thresholdUpdater) Update(status error) { - tu.mu.Lock() - defer tu.mu.Unlock() - - if status == nil { - tu.count = 0 - } else if tu.count < tu.threshold { - tu.count++ - } - - tu.status = status -} - -// NewThresholdStatusUpdater returns a new thresholdUpdater -func NewThresholdStatusUpdater(t int) Updater { - return &thresholdUpdater{threshold: t} -} - -// PeriodicChecker wraps an updater to provide a periodic checker -func PeriodicChecker(check Checker, period time.Duration) Checker { - u := NewStatusUpdater() - go func() { - t := time.NewTicker(period) - for { - <-t.C - u.Update(check.Check()) - } - }() - - return u -} - -// PeriodicThresholdChecker wraps an updater to provide a periodic checker that -// uses a threshold before it changes status -func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { - tu := NewThresholdStatusUpdater(threshold) - go func() { - t := time.NewTicker(period) - for { - <-t.C - tu.Update(check.Check()) - } - }() - - return tu -} - -// CheckStatus returns a map with all the current health check errors -func CheckStatus() map[string]string { - mutex.RLock() - defer mutex.RUnlock() - statusKeys := make(map[string]string) - for k, v := range registeredChecks { - err := v.Check() - if err != nil { - statusKeys[k] = err.Error() - } - } - - return statusKeys -} - -// Register associates the checker with the provided name. We allow -// overwrites to a specific check status. -func Register(name string, check Checker) { - mutex.Lock() - defer mutex.Unlock() - _, ok := registeredChecks[name] - if ok { - panic("Check already exists: " + name) - } - registeredChecks[name] = check -} - -// RegisterFunc allows the convenience of registering a checker directly -// from an arbitrary func() error -func RegisterFunc(name string, check func() error) { - Register(name, CheckFunc(check)) -} - -// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker -// from an arbitrary func() error -func RegisterPeriodicFunc(name string, check func() error, period time.Duration) { - Register(name, PeriodicChecker(CheckFunc(check), period)) -} - -// RegisterPeriodicThresholdFunc allows the convenience of registering a -// PeriodicChecker from an arbitrary func() error -func RegisterPeriodicThresholdFunc(name string, check func() error, period time.Duration, threshold int) { - Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) -} - -// StatusHandler returns a JSON blob with all the currently registered Health Checks -// and their corresponding status. -// Returns 503 if any Error status exists, 200 otherwise -func StatusHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - checksStatus := CheckStatus() - // If there is an error, return 503 - if len(checksStatus) != 0 { - w.WriteHeader(http.StatusServiceUnavailable) - } - err := json.NewEncoder(w).Encode(checksStatus) - - // Parsing of the JSON failed. Returning generic error message - if err != nil { - w.Write([]byte("{server_error: 'Could not parse error message'}")) - } - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// Registers global /debug/health api endpoint -func init() { - http.HandleFunc("/debug/health", StatusHandler) -} - -blob -mark :88 -data 1151 -package health - -import ( - "errors" - "net/http" - "net/http/httptest" - "testing" -) - -// TestReturns200IfThereAreNoChecks ensures that the result code of the health -// endpoint is 200 if there are not currently registered checks. -func TestReturns200IfThereAreNoChecks(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - StatusHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } -} - -// TestReturns500IfThereAreErrorChecks ensures that the result code of the -// health endpoint is 500 if there are health checks with errors -func TestReturns503IfThereAreErrorChecks(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - // Create a manual error - Register("some_check", CheckFunc(func() error { - return errors.New("This Check did not succeed") - })) - - StatusHandler(recorder, req) - - if recorder.Code != 503 { - t.Errorf("Did not get a 503.") - } -} - -blob -mark :89 -data 3867 -package manifest - -import ( - "encoding/json" - - "github.com/docker/distribution/digest" - "github.com/docker/libtrust" -) - -// TODO(stevvooe): When we rev the manifest format, the contents of this -// package should me moved to manifest/v1. - -const ( - // ManifestMediaType specifies the mediaType for the current version. Note - // that for schema version 1, the the media is optionally - // "application/json". - ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json" -) - -// Versioned provides a struct with just the manifest schemaVersion. Incoming -// content with unknown schema version can be decoded against this struct to -// check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} - -// Manifest provides the base accessible fields for working with V2 image -// format in the registry. -type Manifest struct { - Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []History `json:"history"` -} - -// SignedManifest provides an envelope for a signed image manifest, including -// the format sensitive raw bytes. It contains fields to -type SignedManifest struct { - Manifest - - // Raw is the byte representation of the ImageManifest, used for signature - // verification. The value of Raw must be used directly during - // serialization, or the signature check will fail. The manifest byte - // representation cannot change or it will have to be re-signed. - Raw []byte `json:"-"` -} - -// UnmarshalJSON populates a new ImageManifest struct from JSON data. -func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - var manifest Manifest - if err := json.Unmarshal(b, &manifest); err != nil { - return err - } - - sm.Manifest = manifest - sm.Raw = make([]byte, len(b), len(b)) - copy(sm.Raw, b) - - return nil -} - -// Payload returns the raw, signed content of the signed manifest. The -// contents can be used to calculate the content identifier. -func (sm *SignedManifest) Payload() ([]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } - - // Resolve the payload in the manifest. - return jsig.Payload() -} - -// Signatures returns the signatures as provided by -// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws -// signatures. -func (sm *SignedManifest) Signatures() ([][]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } - - // Resolve the payload in the manifest. - return jsig.Signatures() -} - -// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner -// contents. Applications requiring a marshaled signed manifest should simply -// use Raw directly, since the the content produced by json.Marshal will be -// compacted and will fail signature checks. -func (sm *SignedManifest) MarshalJSON() ([]byte, error) { - if len(sm.Raw) > 0 { - return sm.Raw, nil - } - - // If the raw data is not available, just dump the inner content. - return json.Marshal(&sm.Manifest) -} - -// FSLayer is a container struct for BlobSums defined in an image manifest -type FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// History stores unstructured v1 compatibility information -type History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -blob -mark :90 -data 2174 -package manifest - -import ( - "bytes" - "encoding/json" - "reflect" - "testing" - - "github.com/docker/libtrust" -) - -type testEnv struct { - name, tag string - manifest *Manifest - signed *SignedManifest - pk libtrust.PrivateKey -} - -func TestManifestMarshaling(t *testing.T) { - env := genEnv(t) - - // Check that the Raw field is the same as json.MarshalIndent with these - // parameters. - p, err := json.MarshalIndent(env.signed, "", " ") - if err != nil { - t.Fatalf("error marshaling manifest: %v", err) - } - - if !bytes.Equal(p, env.signed.Raw) { - t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.Raw), string(p)) - } -} - -func TestManifestUnmarshaling(t *testing.T) { - env := genEnv(t) - - var signed SignedManifest - if err := json.Unmarshal(env.signed.Raw, &signed); err != nil { - t.Fatalf("error unmarshaling signed manifest: %v", err) - } - - if !reflect.DeepEqual(&signed, env.signed) { - t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) - } -} - -func TestManifestVerification(t *testing.T) { - env := genEnv(t) - - publicKeys, err := Verify(env.signed) - if err != nil { - t.Fatalf("error verifying manifest: %v", err) - } - - if len(publicKeys) == 0 { - t.Fatalf("no public keys found in signature") - } - - var found bool - publicKey := env.pk.PublicKey() - // ensure that one of the extracted public keys matches the private key. - for _, candidate := range publicKeys { - if candidate.KeyID() == publicKey.KeyID() { - found = true - break - } - } - - if !found { - t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) - } -} - -func genEnv(t *testing.T) *testEnv { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("error generating test key: %v", err) - } - - name, tag := "foo/bar", "test" - - m := Manifest{ - Versioned: Versioned{ - SchemaVersion: 1, - }, - Name: name, - Tag: tag, - FSLayers: []FSLayer{ - { - BlobSum: "asdf", - }, - { - BlobSum: "qwer", - }, - }, - } - - sm, err := Sign(&m, pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - return &testEnv{ - name: name, - tag: tag, - manifest: &m, - signed: sm, - pk: pk, - } -} - -blob -mark :91 -data 1410 -package manifest - -import ( - "crypto/x509" - "encoding/json" - - "github.com/docker/libtrust" -) - -// Sign signs the manifest with the provided private key, returning a -// SignedManifest. This typically won't be used within the registry, except -// for testing. -func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.Sign(pk); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - Raw: pretty, - }, nil -} - -// SignWithChain signs the manifest with the given private key and x509 chain. -// The public key of the first element in the chain must be the public key -// corresponding with the sign key. -func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.SignWithChain(key, chain); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - Raw: pretty, - }, nil -} - -blob -mark :92 -data 877 -package manifest - -import ( - "crypto/x509" - - "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" -) - -// Verify verifies the signature of the signed manifest returning the public -// keys used during signing. -func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { - js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") - return nil, err - } - - return js.Verify() -} - -// VerifyChains verifies the signature of the signed manifest against the -// certificate pool returning the list of verified chains. Signatures without -// an x509 chain are not checked. -func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { - js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } - - return js.VerifyChains(ca) -} - -blob -mark :93 -data 4569 -package notifications - -import ( - "net/http" - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -type bridge struct { - ub URLBuilder - actor ActorRecord - source SourceRecord - request RequestRecord - sink Sink -} - -var _ Listener = &bridge{} - -// URLBuilder defines a subset of url builder to be used by the event listener. -type URLBuilder interface { - BuildManifestURL(name, tag string) (string, error) - BuildBlobURL(name string, dgst digest.Digest) (string, error) -} - -// NewBridge returns a notification listener that writes records to sink, -// using the actor and source. Any urls populated in the events created by -// this bridge will be created using the URLBuilder. -// TODO(stevvooe): Update this to simply take a context.Context object. -func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { - return &bridge{ - ub: ub, - actor: actor, - source: source, - request: request, - sink: sink, - } -} - -// NewRequestRecord builds a RequestRecord for use in NewBridge from an -// http.Request, associating it with a request id. -func NewRequestRecord(id string, r *http.Request) RequestRecord { - return RequestRecord{ - ID: id, - Addr: context.RemoteAddr(r), - Host: r.Host, - Method: r.Method, - UserAgent: r.UserAgent(), - } -} - -func (b *bridge) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPush, repo, sm) -} - -func (b *bridge) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPull, repo, sm) -} - -func (b *bridge) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionDelete, repo, sm) -} - -func (b *bridge) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionPush, repo, layer) -} - -func (b *bridge) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionPull, repo, layer) -} - -func (b *bridge) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionDelete, repo, layer) -} - -func (b *bridge) createManifestEventAndWrite(action string, repo distribution.Repository, sm *manifest.SignedManifest) error { - manifestEvent, err := b.createManifestEvent(action, repo, sm) - if err != nil { - return err - } - - return b.sink.Write(*manifestEvent) -} - -func (b *bridge) createManifestEvent(action string, repo distribution.Repository, sm *manifest.SignedManifest) (*Event, error) { - event := b.createEvent(action) - event.Target.MediaType = manifest.ManifestMediaType - event.Target.Repository = repo.Name() - - p, err := sm.Payload() - if err != nil { - return nil, err - } - - event.Target.Length = int64(len(p)) - - event.Target.Digest, err = digest.FromBytes(p) - if err != nil { - return nil, err - } - - // TODO(stevvooe): Currently, the is the "tag" url: once the digest url is - // implemented, this should be replaced. - event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, sm.Tag) - if err != nil { - return nil, err - } - - return event, nil -} - -func (b *bridge) createLayerEventAndWrite(action string, repo distribution.Repository, layer distribution.Layer) error { - event, err := b.createLayerEvent(action, repo, layer) - if err != nil { - return err - } - - return b.sink.Write(*event) -} - -func (b *bridge) createLayerEvent(action string, repo distribution.Repository, layer distribution.Layer) (*Event, error) { - event := b.createEvent(action) - event.Target.MediaType = layerMediaType - event.Target.Repository = repo.Name() - - event.Target.Length = layer.Length() - - dgst := layer.Digest() - event.Target.Digest = dgst - - var err error - event.Target.URL, err = b.ub.BuildBlobURL(repo.Name(), dgst) - if err != nil { - return nil, err - } - - return event, nil -} - -// createEvent creates an event with actor and source populated. -func (b *bridge) createEvent(action string) *Event { - event := createEvent(action) - event.Source = b.source - event.Actor = b.actor - event.Request = b.request - - return event -} - -// createEvent returns a new event, timestamped, with the specified action. -func createEvent(action string) *Event { - return &Event{ - ID: uuid.New(), - Timestamp: time.Now(), - Action: action, - } -} - -blob -mark :94 -data 2085 -package notifications - -import ( - "net/http" - "time" -) - -// EndpointConfig covers the optional configuration parameters for an active -// endpoint. -type EndpointConfig struct { - Headers http.Header - Timeout time.Duration - Threshold int - Backoff time.Duration -} - -// defaults set any zero-valued fields to a reasonable default. -func (ec *EndpointConfig) defaults() { - if ec.Timeout <= 0 { - ec.Timeout = time.Second - } - - if ec.Threshold <= 0 { - ec.Threshold = 10 - } - - if ec.Backoff <= 0 { - ec.Backoff = time.Second - } -} - -// Endpoint is a reliable, queued, thread-safe sink that notify external http -// services when events are written. Writes are non-blocking and always -// succeed for callers but events may be queued internally. -type Endpoint struct { - Sink - url string - name string - - EndpointConfig - - metrics *safeMetrics -} - -// NewEndpoint returns a running endpoint, ready to receive events. -func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { - var endpoint Endpoint - endpoint.name = name - endpoint.url = url - endpoint.EndpointConfig = config - endpoint.defaults() - endpoint.metrics = newSafeMetrics() - - // Configures the inmemory queue, retry, http pipeline. - endpoint.Sink = newHTTPSink( - endpoint.url, endpoint.Timeout, endpoint.Headers, - endpoint.metrics.httpStatusListener()) - endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) - endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) - - register(&endpoint) - return &endpoint -} - -// Name returns the name of the endpoint, generally used for debugging. -func (e *Endpoint) Name() string { - return e.name -} - -// URL returns the url of the endpoint. -func (e *Endpoint) URL() string { - return e.url -} - -// ReadMetrics populates em with metrics from the endpoint. -func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { - e.metrics.Lock() - defer e.metrics.Unlock() - - *em = e.metrics.EndpointMetrics - // Map still need to copied in a threadsafe manner. - em.Statuses = make(map[string]int) - for k, v := range e.metrics.Statuses { - em.Statuses[k] = v - } -} - -blob -mark :95 -data 5257 -package notifications - -import ( - "fmt" - "time" - - "github.com/docker/distribution" -) - -// EventAction constants used in action field of Event. -const ( - EventActionPull = "pull" - EventActionPush = "push" - EventActionDelete = "delete" -) - -const ( - // EventsMediaType is the mediatype for the json event envelope. If the - // Event, ActorRecord, SourceRecord or Envelope structs change, the version - // number should be incremented. - EventsMediaType = "application/vnd.docker.distribution.events.v1+json" - // LayerMediaType is the media type for image rootfs diffs (aka "layers") - // used by Docker. We don't expect this to change for quite a while. - layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" -) - -// Envelope defines the fields of a json event envelope message that can hold -// one or more events. -type Envelope struct { - // Events make up the contents of the envelope. Events present in a single - // envelope are not necessarily related. - Events []Event `json:"events,omitempty"` -} - -// TODO(stevvooe): The event type should be separate from the json format. It -// should be defined as an interface. Leaving as is for now since we don't -// need that at this time. If we make this change, the struct below would be -// called "EventRecord". - -// Event provides the fields required to describe a registry event. -type Event struct { - // ID provides a unique identifier for the event. - ID string `json:"id,omitempty"` - - // Timestamp is the time at which the event occurred. - Timestamp time.Time `json:"timestamp,omitempty"` - - // Action indicates what action encompasses the provided event. - Action string `json:"action,omitempty"` - - // Target uniquely describes the target of the event. - Target struct { - // TODO(stevvooe): Use http.DetectContentType for layers, maybe. - - distribution.Descriptor - - // Repository identifies the named repository. - Repository string `json:"repository,omitempty"` - - // URL provides a direct link to the content. - URL string `json:"url,omitempty"` - } `json:"target,omitempty"` - - // Request covers the request that generated the event. - Request RequestRecord `json:"request,omitempty"` - - // Actor specifies the agent that initiated the event. For most - // situations, this could be from the authorizaton context of the request. - Actor ActorRecord `json:"actor,omitempty"` - - // Source identifies the registry node that generated the event. Put - // differently, while the actor "initiates" the event, the source - // "generates" it. - Source SourceRecord `json:"source,omitempty"` -} - -// ActorRecord specifies the agent that initiated the event. For most -// situations, this could be from the authorizaton context of the request. -// Data in this record can refer to both the initiating client and the -// generating request. -type ActorRecord struct { - // Name corresponds to the subject or username associated with the - // request context that generated the event. - Name string `json:"name,omitempty"` - - // TODO(stevvooe): Look into setting a session cookie to get this - // without docker daemon. - // SessionID - - // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and - // get the actual command. - // Command -} - -// RequestRecord covers the request that generated the event. -type RequestRecord struct { - // ID uniquely identifies the request that initiated the event. - ID string `json:"id"` - - // Addr contains the ip or hostname and possibly port of the client - // connection that initiated the event. This is the RemoteAddr from - // the standard http request. - Addr string `json:"addr,omitempty"` - - // Host is the externally accessible host name of the registry instance, - // as specified by the http host header on incoming requests. - Host string `json:"host,omitempty"` - - // Method has the request method that generated the event. - Method string `json:"method"` - - // UserAgent contains the user agent header of the request. - UserAgent string `json:"useragent"` -} - -// SourceRecord identifies the registry node that generated the event. Put -// differently, while the actor "initiates" the event, the source "generates" -// it. -type SourceRecord struct { - // Addr contains the ip or hostname and the port of the registry node - // that generated the event. Generally, this will be resolved by - // os.Hostname() along with the running port. - Addr string `json:"addr,omitempty"` - - // InstanceID identifies a running instance of an application. Changes - // after each restart. - InstanceID string `json:"instanceID,omitempty"` -} - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("sink: closed") -) - -// Sink accepts and sends events. -type Sink interface { - // Write writes one or more events to the sink. If no error is returned, - // the caller will assume that all events have been committed and will not - // try to send them again. If an error is received, the caller may retry - // sending the event. The caller should cede the slice of memory to the - // sink and not modify it after calling this method. - Write(events ...Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} - -blob -mark :96 -data 4733 -package notifications - -import ( - "encoding/json" - "strings" - "testing" - "time" - - "github.com/docker/distribution/manifest" -) - -// TestEventJSONFormat provides silly test to detect if the event format or -// envelope has changed. If this code fails, the revision of the protocol may -// need to be incremented. -func TestEventEnvelopeJSONFormat(t *testing.T) { - var expected = strings.TrimSpace(` -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, - "digest": "sha256:0123456789abcdef0", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 2, - "digest": "tarsum.v2+sha256:0123456789abcdef1", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 3, - "digest": "tarsum.v2+sha256:0123456789abcdef2", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} - `) - - tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - t.Fatalf("error creating time: %v", err) - } - - var prototype Event - prototype.Action = EventActionPush - prototype.Timestamp = tm - prototype.Actor.Name = "test-actor" - prototype.Request.ID = "asdfasdf" - prototype.Request.Addr = "client.local" - prototype.Request.Host = "registrycluster.local" - prototype.Request.Method = "PUT" - prototype.Request.UserAgent = "test/0.1" - prototype.Source.Addr = "hostname.local:port" - - var manifestPush Event - manifestPush = prototype - manifestPush.ID = "asdf-asdf-asdf-asdf-0" - manifestPush.Target.Digest = "sha256:0123456789abcdef0" - manifestPush.Target.Length = int64(1) - manifestPush.Target.MediaType = manifest.ManifestMediaType - manifestPush.Target.Repository = "library/test" - manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var layerPush0 Event - layerPush0 = prototype - layerPush0.ID = "asdf-asdf-asdf-asdf-1" - layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" - layerPush0.Target.Length = 2 - layerPush0.Target.MediaType = layerMediaType - layerPush0.Target.Repository = "library/test" - layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var layerPush1 Event - layerPush1 = prototype - layerPush1.ID = "asdf-asdf-asdf-asdf-2" - layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" - layerPush1.Target.Length = 3 - layerPush1.Target.MediaType = layerMediaType - layerPush1.Target.Repository = "library/test" - layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var envelope Envelope - envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - t.Fatalf("unexpected error marshaling envelope: %v", err) - } - if string(p) != expected { - t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) - } -} - -blob -mark :97 -data 3709 -package notifications - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" -) - -// httpSink implements a single-flight, http notification endpoint. This is -// very lightweight in that it only makes an attempt at an http request. -// Reliability should be provided by the caller. -type httpSink struct { - url string - - mu sync.Mutex - closed bool - client *http.Client - listeners []httpStatusListener - - // TODO(stevvooe): Allow one to configure the media type accepted by this - // sink and choose the serialization based on that. -} - -// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other -// sinks for increased reliability. -func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { - return &httpSink{ - url: u, - listeners: listeners, - client: &http.Client{ - Transport: &headerRoundTripper{ - Transport: http.DefaultTransport.(*http.Transport), - headers: headers, - }, - Timeout: timeout, - }, - } -} - -// httpStatusListener is called on various outcomes of sending notifications. -type httpStatusListener interface { - success(status int, events ...Event) - failure(status int, events ...Event) - err(err error, events ...Event) -} - -// Accept makes an attempt to notify the endpoint, returning an error if it -// fails. It is the caller's responsibility to retry on error. The events are -// accepted or rejected as a group. -func (hs *httpSink) Write(events ...Event) error { - hs.mu.Lock() - defer hs.mu.Unlock() - - if hs.closed { - return ErrSinkClosed - } - - envelope := Envelope{ - Events: events, - } - - // TODO(stevvooe): It is not ideal to keep re-encoding the request body on - // retry but we are going to do it to keep the code simple. It is likely - // we could change the event struct to manage its own buffer. - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) - } - - body := bytes.NewReader(p) - resp, err := hs.client.Post(hs.url, EventsMediaType, body) - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - - return fmt.Errorf("%v: error posting: %v", hs, err) - } - - // The notifier will treat any 2xx or 3xx response as accepted by the - // endpoint. - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - for _, listener := range hs.listeners { - listener.success(resp.StatusCode, events...) - } - - // TODO(stevvooe): This is a little accepting: we may want to support - // unsupported media type responses with retries using the correct - // media type. There may also be cases that will never work. - - return nil - default: - for _, listener := range hs.listeners { - listener.failure(resp.StatusCode, events...) - } - return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) - } -} - -// Close the endpoint -func (hs *httpSink) Close() error { - hs.mu.Lock() - defer hs.mu.Unlock() - - if hs.closed { - return fmt.Errorf("httpsink: already closed") - } - - hs.closed = true - return nil -} - -func (hs *httpSink) String() string { - return fmt.Sprintf("httpSink{%s}", hs.url) -} - -type headerRoundTripper struct { - *http.Transport // must be transport to support CancelRequest - headers http.Header -} - -func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var nreq http.Request - nreq = *req - nreq.Header = make(http.Header) - - merge := func(headers http.Header) { - for k, v := range headers { - nreq.Header[k] = append(nreq.Header[k], v...) - } - } - - merge(req.Header) - merge(hrt.headers) - - return hrt.Transport.RoundTrip(&nreq) -} - -blob -mark :98 -data 3964 -package notifications - -import ( - "encoding/json" - "fmt" - "mime" - "net/http" - "net/http/httptest" - "reflect" - "strconv" - "testing" - - "github.com/docker/distribution/manifest" -) - -// TestHTTPSink mocks out an http endpoint and notifies it under a couple of -// conditions, ensuring correct behavior. -func TestHTTPSink(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - if r.Method != "POST" { - w.WriteHeader(http.StatusMethodNotAllowed) - t.Fatalf("unexpected request method: %v", r.Method) - return - } - - // Extract the content type and make sure it matches - contentType := r.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) - return - } - - if mediaType != EventsMediaType { - w.WriteHeader(http.StatusUnsupportedMediaType) - t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) - return - } - - var envelope Envelope - dec := json.NewDecoder(r.Body) - if err := dec.Decode(&envelope); err != nil { - w.WriteHeader(http.StatusBadRequest) - t.Fatalf("error decoding request body: %v", err) - return - } - - // Let caller choose the status - status, err := strconv.Atoi(r.FormValue("status")) - if err != nil { - t.Logf("error parsing status: %v", err) - - // May just be empty, set status to 200 - status = http.StatusOK - } - - w.WriteHeader(status) - })) - - metrics := newSafeMetrics() - sink := newHTTPSink(server.URL, 0, nil, - &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) - - var expectedMetrics EndpointMetrics - expectedMetrics.Statuses = make(map[string]int) - - for _, tc := range []struct { - events []Event // events to send - url string - failure bool // true if there should be a failure. - statusCode int // if not set, no status code should be incremented. - }{ - { - statusCode: http.StatusOK, - events: []Event{ - createTestEvent("push", "library/test", manifest.ManifestMediaType)}, - }, - { - statusCode: http.StatusOK, - events: []Event{ - createTestEvent("push", "library/test", manifest.ManifestMediaType), - createTestEvent("push", "library/test", layerMediaType), - createTestEvent("push", "library/test", layerMediaType), - }, - }, - { - statusCode: http.StatusTemporaryRedirect, - }, - { - statusCode: http.StatusBadRequest, - failure: true, - }, - { - // Case where connection never goes through. - url: "http://shoudlntresolve/", - failure: true, - }, - } { - - if tc.failure { - expectedMetrics.Failures += len(tc.events) - } else { - expectedMetrics.Successes += len(tc.events) - } - - if tc.statusCode > 0 { - expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) - } - - url := tc.url - if url == "" { - url = server.URL + "/" - } - // setup endpoint to respond with expected status code. - url += fmt.Sprintf("?status=%v", tc.statusCode) - sink.url = url - - t.Logf("testcase: %v, fail=%v", url, tc.failure) - // Try a simple event emission. - err := sink.Write(tc.events...) - - if !tc.failure { - if err != nil { - t.Fatalf("unexpected error send event: %v", err) - } - } else { - if err == nil { - t.Fatalf("the endpoint should have rejected the request") - } - } - - if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { - t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) - } - } - - if err := sink.Close(); err != nil { - t.Fatalf("unexpected error closing http sink: %v", err) - } - - // double close returns error - if err := sink.Close(); err == nil { - t.Fatalf("second close should have returned error: %v", err) - } - -} - -func createTestEvent(action, repo, typ string) Event { - event := createEvent(action) - - event.Target.MediaType = typ - event.Target.Repository = repo - - return *event -} - -blob -mark :99 -data 4530 -package notifications - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "golang.org/x/net/context" -) - -// ManifestListener describes a set of methods for listening to events related to manifests. -type ManifestListener interface { - ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error - ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error -} - -// LayerListener describes a listener that can respond to layer related events. -type LayerListener interface { - LayerPushed(repo distribution.Repository, layer distribution.Layer) error - LayerPulled(repo distribution.Repository, layer distribution.Layer) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - LayerDeleted(repo distribution.Repository, layer distribution.Layer) error -} - -// Listener combines all repository events into a single interface. -type Listener interface { - ManifestListener - LayerListener -} - -type repositoryListener struct { - distribution.Repository - listener Listener -} - -// Listen dispatches events on the repository to the listener. -func Listen(repo distribution.Repository, listener Listener) distribution.Repository { - return &repositoryListener{ - Repository: repo, - listener: listener, - } -} - -func (rl *repositoryListener) Manifests() distribution.ManifestService { - return &manifestServiceListener{ - ManifestService: rl.Repository.Manifests(), - parent: rl, - } -} - -func (rl *repositoryListener) Layers() distribution.LayerService { - return &layerServiceListener{ - LayerService: rl.Repository.Layers(), - parent: rl, - } -} - -type manifestServiceListener struct { - distribution.ManifestService - parent *repositoryListener -} - -func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.Get(ctx, dgst) - if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository, sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) - } - } - - return sm, err -} - -func (msl *manifestServiceListener) Put(ctx context.Context, sm *manifest.SignedManifest) error { - err := msl.ManifestService.Put(ctx, sm) - - if err == nil { - if err := msl.parent.listener.ManifestPushed(msl.parent.Repository, sm); err != nil { - logrus.Errorf("error dispatching manifest push to listener: %v", err) - } - } - - return err -} - -func (msl *manifestServiceListener) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.GetByTag(ctx, tag) - if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository, sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) - } - } - - return sm, err -} - -type layerServiceListener struct { - distribution.LayerService - parent *repositoryListener -} - -func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (distribution.Layer, error) { - layer, err := lsl.LayerService.Fetch(dgst) - if err == nil { - if err := lsl.parent.listener.LayerPulled(lsl.parent.Repository, layer); err != nil { - logrus.Errorf("error dispatching layer pull to listener: %v", err) - } - } - - return layer, err -} - -func (lsl *layerServiceListener) Upload() (distribution.LayerUpload, error) { - lu, err := lsl.LayerService.Upload() - return lsl.decorateUpload(lu), err -} - -func (lsl *layerServiceListener) Resume(uuid string) (distribution.LayerUpload, error) { - lu, err := lsl.LayerService.Resume(uuid) - return lsl.decorateUpload(lu), err -} - -func (lsl *layerServiceListener) decorateUpload(lu distribution.LayerUpload) distribution.LayerUpload { - return &layerUploadListener{ - LayerUpload: lu, - parent: lsl, - } -} - -type layerUploadListener struct { - distribution.LayerUpload - parent *layerServiceListener -} - -func (lul *layerUploadListener) Finish(dgst digest.Digest) (distribution.Layer, error) { - layer, err := lul.LayerUpload.Finish(dgst) - if err == nil { - if err := lul.parent.parent.listener.LayerPushed(lul.parent.parent.Repository, layer); err != nil { - logrus.Errorf("error dispatching layer push to listener: %v", err) - } - } - - return layer, err -} - -blob -mark :100 -data 4738 -package notifications - -import ( - "io" - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -func TestListener(t *testing.T) { - registry := storage.NewRegistryWithDriver(inmemory.New(), cache.NewInMemoryLayerInfoCache()) - tl := &testListener{ - ops: make(map[string]int), - } - ctx := context.Background() - repository, err := registry.Repository(ctx, "foo/bar") - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - repository = Listen(repository, tl) - - // Now take the registry through a number of operations - checkExerciseRepository(t, ctx, repository) - - expectedOps := map[string]int{ - "manifest:push": 1, - "manifest:pull": 2, - // "manifest:delete": 0, // deletes not supported for now - "layer:push": 2, - "layer:pull": 2, - // "layer:delete": 0, // deletes not supported for now - } - - if !reflect.DeepEqual(tl.ops, expectedOps) { - t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) - } - -} - -type testListener struct { - ops map[string]int -} - -func (tl *testListener) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { - tl.ops["manifest:push"]++ - - return nil -} - -func (tl *testListener) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { - tl.ops["manifest:pull"]++ - return nil -} - -func (tl *testListener) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { - tl.ops["manifest:delete"]++ - return nil -} - -func (tl *testListener) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { - tl.ops["layer:push"]++ - return nil -} - -func (tl *testListener) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { - tl.ops["layer:pull"]++ - return nil -} - -func (tl *testListener) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { - tl.ops["layer:delete"]++ - return nil -} - -// checkExerciseRegistry takes the registry through all of its operations, -// carrying out generic checks. -func checkExerciseRepository(t *testing.T, ctx context.Context, repository distribution.Repository) { - // TODO(stevvooe): This would be a nice testutil function. Basically, it - // takes the registry through a common set of operations. This could be - // used to make cross-cutting updates by changing internals that affect - // update counts. Basically, it would make writing tests a lot easier. - - tag := "thetag" - m := manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: repository.Name(), - Tag: tag, - } - - layers := repository.Layers() - for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating test layer: %v", err) - } - dgst := digest.Digest(ds) - upload, err := layers.Upload() - if err != nil { - t.Fatalf("error creating layer upload: %v", err) - } - - // Use the resumes, as well! - upload, err = layers.Resume(upload.UUID()) - if err != nil { - t.Fatalf("error resuming layer upload: %v", err) - } - - io.Copy(upload, rs) - - if _, err := upload.Finish(dgst); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - - m.FSLayers = append(m.FSLayers, manifest.FSLayer{ - BlobSum: dgst, - }) - - // Then fetch the layers - if _, err := layers.Fetch(dgst); err != nil { - t.Fatalf("error fetching layer: %v", err) - } - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating key: %v", err) - } - - sm, err := manifest.Sign(&m, pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - manifests := repository.Manifests() - - if err := manifests.Put(ctx, sm); err != nil { - t.Fatalf("unexpected error putting the manifest: %v", err) - } - - p, err := sm.Payload() - if err != nil { - t.Fatalf("unexpected error getting manifest payload: %v", err) - } - - dgst, err := digest.FromBytes(p) - if err != nil { - t.Fatalf("unexpected error digesting manifest payload: %v", err) - } - - fetchedByManifest, err := manifests.Get(ctx, dgst) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if fetchedByManifest.Tag != sm.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) - } - - fetched, err := manifests.GetByTag(ctx, tag) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if fetched.Tag != fetchedByManifest.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) - } -} - -blob -mark :101 -data 4133 -package notifications - -import ( - "expvar" - "fmt" - "net/http" - "sync" -) - -// EndpointMetrics track various actions taken by the endpoint, typically by -// number of events. The goal of this to export it via expvar but we may find -// some other future solution to be better. -type EndpointMetrics struct { - Pending int // events pending in queue - Events int // total events incoming - Successes int // total events written successfully - Failures int // total events failed - Errors int // total events errored - Statuses map[string]int // status code histogram, per call event -} - -// safeMetrics guards the metrics implementation with a lock and provides a -// safe update function. -type safeMetrics struct { - EndpointMetrics - sync.Mutex // protects statuses map -} - -// newSafeMetrics returns safeMetrics with map allocated. -func newSafeMetrics() *safeMetrics { - var sm safeMetrics - sm.Statuses = make(map[string]int) - return &sm -} - -// httpStatusListener returns the listener for the http sink that updates the -// relevent counters. -func (sm *safeMetrics) httpStatusListener() httpStatusListener { - return &endpointMetricsHTTPStatusListener{ - safeMetrics: sm, - } -} - -// eventQueueListener returns a listener that maintains queue related counters. -func (sm *safeMetrics) eventQueueListener() eventQueueListener { - return &endpointMetricsEventQueueListener{ - safeMetrics: sm, - } -} - -// endpointMetricsHTTPStatusListener increments counters related to http sinks -// for the relevent events. -type endpointMetricsHTTPStatusListener struct { - *safeMetrics -} - -var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} - -func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Successes += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Failures += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Errors += len(events) -} - -// endpointMetricsEventQueueListener maintains the incoming events counter and -// the queues pending count. -type endpointMetricsEventQueueListener struct { - *safeMetrics -} - -func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Events += len(events) - eqc.Pending += len(events) -} - -func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Pending -= len(events) -} - -// endpoints is global registry of endpoints used to report metrics to expvar -var endpoints struct { - registered []*Endpoint - mu sync.Mutex -} - -// register places the endpoint into expvar so that stats are tracked. -func register(e *Endpoint) { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - endpoints.registered = append(endpoints.registered, e) -} - -func init() { - // NOTE(stevvooe): Setup registry metrics structure to report to expvar. - // Ideally, we do more metrics through logging but we need some nice - // realtime metrics for queue state for now. - - registry := expvar.Get("registry") - - if registry == nil { - registry = expvar.NewMap("registry") - } - - var notifications expvar.Map - notifications.Init() - notifications.Set("endpoints", expvar.Func(func() interface{} { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - var names []interface{} - for _, v := range endpoints.registered { - var epjson struct { - Name string `json:"name"` - URL string `json:"url"` - EndpointConfig - - Metrics EndpointMetrics - } - - epjson.Name = v.Name() - epjson.URL = v.URL() - epjson.EndpointConfig = v.EndpointConfig - - v.ReadMetrics(&epjson.Metrics) - - names = append(names, epjson) - } - - return names - })) - - registry.(*expvar.Map).Set("notifications", ¬ifications) -} - -blob -mark :102 -data 8562 -package notifications - -import ( - "container/list" - "fmt" - "sync" - "time" - - "github.com/Sirupsen/logrus" -) - -// NOTE(stevvooe): This file contains definitions for several utility sinks. -// Typically, the broadcaster is the only sink that should be required -// externally, but others are suitable for export if the need arises. Albeit, -// the tight integration with endpoint metrics should be removed. - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan []Event - closed chan chan struct{} -} - -// NewBroadcaster ... -// Add appends one or more sinks to the list of sinks. The broadcaster -// behavior will be affected by the properties of the sink. Generally, the -// sink should accept all messages and deal with reliability on its own. Use -// of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan []Event), - closed: make(chan chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts a block of events to be dispatched to all sinks. This method -// will never fail and should never block (hopefully!). The caller cedes the -// slice memory to the broadcaster and should not modify it after calling -// write. -func (b *Broadcaster) Write(events ...Event) error { - select { - case b.events <- events: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - logrus.Infof("broadcaster: closing") - select { - case <-b.closed: - // already closed - return fmt.Errorf("broadcaster: already closed") - default: - // do a little chan handoff dance to synchronize closing - closed := make(chan struct{}) - b.closed <- closed - close(b.closed) - <-closed - return nil - } -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - for { - select { - case block := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(block...); err != nil { - logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) - } - } - case closing := <-b.closed: - - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil { - logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) - } - } - closing <- struct{}{} - - logrus.Debugf("broadcaster: closed") - return - } - } -} - -// eventQueue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type eventQueue struct { - sink Sink - events *list.List - listeners []eventQueueListener - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// eventQueueListener is called when various events happen on the queue. -type eventQueueListener interface { - ingress(events ...Event) - egress(events ...Event) -} - -// newEventQueue returns a queue to the provided sink. If the updater is non- -// nil, it will be called to update pending metrics on ingress and egress. -func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { - eq := eventQueue{ - sink: sink, - events: list.New(), - listeners: listeners, - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// beend closed. -func (eq *eventQueue) Write(events ...Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - for _, listener := range eq.listeners { - listener.ingress(events...) - } - eq.events.PushBack(events) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *eventQueue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return fmt.Errorf("eventqueue: already closed") - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - - return eq.sink.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *eventQueue) run() { - for { - block := eq.next() - - if block == nil { - return // nil block means event queue is closed. - } - - if err := eq.sink.Write(block...); err != nil { - logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) - } - - for _, listener := range eq.listeners { - listener.egress(block...) - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *eventQueue) next() []Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.([]Event) - eq.events.Remove(front) - - return block -} - -// retryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Internally, it is a circuit breaker retries to manage reset. -// Concurrent calls to a retrying sink are serialized through the sink, -// meaning that if one is in-flight, another will not proceed. -type retryingSink struct { - mu sync.Mutex - sink Sink - closed bool - - // circuit breaker hueristics - failures struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - } -} - -type retryingSinkListener interface { - active(events ...Event) - retry(events ...Event) -} - -// TODO(stevvooe): We are using circuit break here, which actually doesn't -// make a whole lot of sense for this use case, since we always retry. Move -// this to use bounded exponential backoff. - -// newRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { - rs := &retryingSink{ - sink: sink, - } - rs.failures.threshold = threshold - rs.failures.backoff = backoff - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *retryingSink) Write(events ...Event) error { - rs.mu.Lock() - defer rs.mu.Unlock() - -retry: - - if rs.closed { - return ErrSinkClosed - } - - if !rs.proceed() { - logrus.Warnf("%v encountered too many errors, backing off", rs.sink) - rs.wait(rs.failures.backoff) - goto retry - } - - if err := rs.write(events...); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logrus.Errorf("retryingsink: error writing events: %v, retrying", err) - goto retry - } - - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *retryingSink) Close() error { - rs.mu.Lock() - defer rs.mu.Unlock() - - if rs.closed { - return fmt.Errorf("retryingsink: already closed") - } - - rs.closed = true - return rs.sink.Close() -} - -// write provides a helper that dispatches failure and success properly. Used -// by write as the single-flight write call. -func (rs *retryingSink) write(events ...Event) error { - if err := rs.sink.Write(events...); err != nil { - rs.failure() - return err - } - - rs.reset() - return nil -} - -// wait backoff time against the sink, unlocking so others can proceed. Should -// only be called by methods that currently have the mutex. -func (rs *retryingSink) wait(backoff time.Duration) { - rs.mu.Unlock() - defer rs.mu.Lock() - - // backoff here - time.Sleep(backoff) -} - -// reset marks a succesful call. -func (rs *retryingSink) reset() { - rs.failures.recent = 0 - rs.failures.last = time.Time{} -} - -// failure records a failure. -func (rs *retryingSink) failure() { - rs.failures.recent++ - rs.failures.last = time.Now().UTC() -} - -// proceed returns true if the call should proceed based on circuit breaker -// hueristics. -func (rs *retryingSink) proceed() bool { - return rs.failures.recent < rs.failures.threshold || - time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) -} - -blob -mark :103 -data 4505 -package notifications - -import ( - "fmt" - "math/rand" - "sync" - "time" - - "github.com/Sirupsen/logrus" - - "testing" -) - -func TestBroadcaster(t *testing.T) { - const nEvents = 1000 - var sinks []Sink - - for i := 0; i < 10; i++ { - sinks = append(sinks, &testSink{}) - } - - b := NewBroadcaster(sinks...) - - var block []Event - var wg sync.WaitGroup - for i := 1; i <= nEvents; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - if err := b.Write(block...); err != nil { - t.Fatalf("error writing block of length %d: %v", len(block), err) - } - wg.Done() - }(block...) - - block = nil - } - } - - wg.Wait() // Wait until writes complete - checkClose(t, b) - - // Iterate through the sinks and check that they all have the expected length. - for _, sink := range sinks { - ts := sink.(*testSink) - ts.mu.Lock() - defer ts.mu.Unlock() - - if len(ts.events) != nEvents { - t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) - } - - if !ts.closed { - t.Fatalf("sink should have been closed") - } - } - -} - -func TestEventQueue(t *testing.T) { - const nevents = 1000 - var ts testSink - metrics := newSafeMetrics() - eq := newEventQueue( - // delayed sync simulates destination slower than channel comms - &delayedSink{ - Sink: &ts, - delay: time.Millisecond * 1, - }, metrics.eventQueueListener()) - - var wg sync.WaitGroup - var block []Event - for i := 1; i <= nevents; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - if err := eq.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) - } - wg.Done() - }(block...) - - block = nil - } - } - - wg.Wait() - checkClose(t, eq) - - ts.mu.Lock() - defer ts.mu.Unlock() - metrics.Lock() - defer metrics.Unlock() - - if len(ts.events) != nevents { - t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) - } - - if !ts.closed { - t.Fatalf("sink should have been closed") - } - - if metrics.Events != nevents { - t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) - } - - if metrics.Pending != 0 { - t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) - } -} - -func TestRetryingSink(t *testing.T) { - - // Make a sync that fails most of the time, ensuring that all the events - // make it through. - var ts testSink - flaky := &flakySink{ - rate: 1.0, // start out always failing. - Sink: &ts, - } - s := newRetryingSink(flaky, 3, 10*time.Millisecond) - - var wg sync.WaitGroup - var block []Event - for i := 1; i <= 100; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - - // Above 50, set the failure rate lower - if i > 50 { - s.mu.Lock() - flaky.rate = 0.90 - s.mu.Unlock() - } - - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - defer wg.Done() - if err := s.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) - } - }(block...) - - block = nil - } - } - - wg.Wait() - checkClose(t, s) - - ts.mu.Lock() - defer ts.mu.Unlock() - - if len(ts.events) != 100 { - t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) - } -} - -type testSink struct { - events []Event - mu sync.Mutex - closed bool -} - -func (ts *testSink) Write(events ...Event) error { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.events = append(ts.events, events...) - return nil -} - -func (ts *testSink) Close() error { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.closed = true - - logrus.Infof("closing testSink") - return nil -} - -type delayedSink struct { - Sink - delay time.Duration -} - -func (ds *delayedSink) Write(events ...Event) error { - time.Sleep(ds.delay) - return ds.Sink.Write(events...) -} - -type flakySink struct { - Sink - rate float64 -} - -func (fs *flakySink) Write(events ...Event) error { - if rand.Float64() < fs.rate { - return fmt.Errorf("error writing %d events", len(events)) - } - - return fs.Sink.Write(events...) -} - -func checkClose(t *testing.T, sink Sink) { - if err := sink.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - // second close should not crash but should return an error. - if err := sink.Close(); err == nil { - t.Fatalf("no error on double close") - } - - // Write after closed should be an error - if err := sink.Write([]Event{}...); err == nil { - t.Fatalf("write after closed did not have an error") - } else if err != ErrSinkClosed { - t.Fatalf("error should be ErrSinkClosed") - } -} - -blob -mark :104 -data 599 -FROM ubuntu:14.04 - -ENV GOLANG_VERSION 1.4rc1 -ENV GOPATH /var/cache/drone -ENV GOROOT /usr/local/go -ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin - -ENV LANG C -ENV LC_ALL C - -RUN apt-get update && apt-get install -y \ - wget ca-certificates git mercurial bzr \ - --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ - tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ - rm go${GOLANG_VERSION}.linux-amd64.tar.gz - -RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint - -blob -mark :105 -data 637 -Git Hooks -========= - -To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. - -As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. -blob -mark :106 -data 413 -#!/bin/sh - -cd $(dirname $0) - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -set -e -set -x - -# Just in case the directory doesn't exist -mkdir -p $REPO_ROOT/.git/hooks - -ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit -blob -mark :107 -data 888 -#!/bin/sh - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -cd $REPO_ROOT - -GOFMT_ERRORS=$(gofmt -s -l . 2>&1) -if [ -n "$GOFMT_ERRORS" ]; then - printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr - exit 1 -fi - -GOLINT_ERRORS=$(golint ./... 2>&1) -if [ -n "$GOLINT_ERRORS" ]; then - printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr - exit 1 -fi - -GOVET_ERRORS=$(go vet ./... 2>&1) -GOVET_STATUS=$? -if [ "$GOVET_STATUS" -ne "0" ]; then - printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr - exit $GOVET_STATUS -fi - -blob -mark :108 -data 6642 -package distribution - -import ( - "io" - "net/http" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "golang.org/x/net/context" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name string) (Repository, error) -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Name returns the name of the repository. - Name() string - - // Manifests returns a reference to this repository's manifest service. - Manifests() ManifestService - - // Layers returns a reference to this repository's layers service. - Layers() LayerService - - // Signatures returns a reference to this repository's signatures service. - Signatures() SignatureService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. - -// ManifestService provides operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the identified by the digest, if it exists. - Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) - - // Delete removes the manifest, if it exists. - Delete(ctx context.Context, dgst digest.Digest) error - - // Put creates or updates the manifest. - Put(ctx context.Context, manifest *manifest.SignedManifest) error - - // TODO(stevvooe): The methods after this message should be moved to a - // discrete TagService, per active proposals. - - // Tags lists the tags under the named repository. - Tags(ctx context.Context) ([]string, error) - - // ExistsByTag returns true if the manifest exists. - ExistsByTag(ctx context.Context, tag string) (bool, error) - - // GetByTag retrieves the named manifest, if it exists. - GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) - - // TODO(stevvooe): There are several changes that need to be done to this - // interface: - // - // 1. Allow explicit tagging with Tag(digest digest.Digest, tag string) - // 2. Support reading tags with a re-entrant reader to avoid large - // allocations in the registry. - // 3. Long-term: Provide All() method that lets one scroll through all of - // the manifest entries. - // 4. Long-term: break out concept of signing from manifests. This is - // really a part of the distribution sprint. - // 5. Long-term: Manifest should be an interface. This code shouldn't - // really be concerned with the storage format. -} - -// LayerService provides operations on layer files in a backend storage. -type LayerService interface { - // Exists returns true if the layer exists. - Exists(digest digest.Digest) (bool, error) - - // Fetch the layer identifed by TarSum. - Fetch(digest digest.Digest) (Layer, error) - - // Upload begins a layer upload to repository identified by name, - // returning a handle. - Upload() (LayerUpload, error) - - // Resume continues an in progress layer upload, returning a handle to the - // upload. The caller should seek to the latest desired upload location - // before proceeding. - Resume(uuid string) (LayerUpload, error) -} - -// Layer provides a readable and seekable layer object. Typically, -// implementations are *not* goroutine safe. -type Layer interface { - // http.ServeContent requires an efficient implementation of - // ReadSeeker.Seek(0, os.SEEK_END). - io.ReadSeeker - io.Closer - - // Digest returns the unique digest of the blob. - Digest() digest.Digest - - // Length returns the length in bytes of the blob. - Length() int64 - - // CreatedAt returns the time this layer was created. - CreatedAt() time.Time - - // Handler returns an HTTP handler which serves the layer content, whether - // by providing a redirect directly to the content, or by serving the - // content itself. - Handler(r *http.Request) (http.Handler, error) -} - -// LayerUpload provides a handle for working with in-progress uploads. -// Instances can be obtained from the LayerService.Upload and -// LayerService.Resume. -type LayerUpload interface { - io.WriteSeeker - io.ReaderFrom - io.Closer - - // UUID returns the identifier for this upload. - UUID() string - - // StartedAt returns the time this layer upload was started. - StartedAt() time.Time - - // Finish marks the upload as completed, returning a valid handle to the - // uploaded layer. The digest is validated against the contents of the - // uploaded layer. - Finish(digest digest.Digest) (Layer, error) - - // Cancel the layer upload process. - Cancel() error -} - -// SignatureService provides operations on signatures. -type SignatureService interface { - // Get retrieves all of the signature blobs for the specified digest. - Get(dgst digest.Digest) ([][]byte, error) - - // Put stores the signature for the provided digest. - Put(dgst digest.Digest, signatures ...[]byte) error -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Length in bytes of content. - Length int64 `json:"length,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -blob -mark :109 -data 48428 -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/digest" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: RepositoryNameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - tagParameterDescriptor = ParameterDescriptor{ - Name: "tag", - Type: "string", - Format: TagNameRegexp.String(), - Required: true, - Description: `Tag of the target manifiest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - unauthorizedResponse = ResponseDescriptor{ - Description: "The client does not have access to the repository.", - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: unauthorizedErrorsBody, - }, - } - - unauthorizedResponsePush = ResponseDescriptor{ - Description: "The client does not have access to push to the repository.", - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: unauthorizedErrorsBody, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` - - unauthorizedErrorsBody = `{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor - - // ErrorDescriptors provides a list of the error codes and their - // associated documentation and metadata. - ErrorDescriptors []ErrorDescriptor -}{ - RouteDescriptors: routeDescriptors, - ErrorDescriptors: errorDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particalar request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particalar response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status recieved by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCodes provides a list of status under which this error - // condition may arise. If it is empty, the error condition may be seen - // for any status code. - HTTPStatusCodes []int -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The client is not authorized to access the registry.", - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - }, - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeNameUnknown, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - }, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - tagParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - }, - { - Description: "The named manifest is not known to the registry.", - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - tagParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have permission to push to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - }, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] -}`, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - tagParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Tag", - Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.", - Methods: []MethodDescriptor{ - - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponse, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponse, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - }, - }, - }, - }, - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", - Entity: "Intiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponsePush, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponsePush, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponse, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Description: "Upload a chunk of data to specified upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponsePush, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - // TODO(stevvooe): Break this down into three separate requests: - // 1. Complete an upload where all data has already been sent. - // 2. Complete an upload where the entire body is in the PUT. - // 3. Complete an upload where the final, partial chunk is the body. - - Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponsePush, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponse, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - }, - }, - }, - }, - }, - }, -} - -// ErrorDescriptors provides a list of HTTP API Error codes that may be -// encountered when interacting with the registry API. -var errorDescriptors = []ErrorDescriptor{ - { - Code: ErrorCodeUnknown, - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - }, - { - Code: ErrorCodeUnsupported, - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - }, - { - Code: ErrorCodeUnauthorized, - Value: "UNAUTHORIZED", - Message: "access to the requested resource is not authorized", - Description: `The access controller denied access for the operation on - a resource. Often this will be accompanied by a 401 Unauthorized - response status.`, - }, - { - Code: ErrorCodeDigestInvalid, - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeSizeInvalid, - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeNameInvalid, - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeTagInvalid, - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeNameUnknown, - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestUnknown, - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestInvalid, - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeManifestUnverified, - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeBlobUnknown, - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - - { - Code: ErrorCodeBlobUploadUnknown, - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeBlobUploadInvalid, - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, -} - -var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor -var idToDescriptors map[string]ErrorDescriptor -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors)) - idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors)) - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range errorDescriptors { - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - } - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} - -blob -mark :110 -data 477 -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 - -blob -mark :111 -data 5066 -package v2 - -import ( - "fmt" - "strings" -) - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -const ( - // ErrorCodeUnknown is a catch-all for errors not defined below. - ErrorCodeUnknown ErrorCode = iota - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported - - // ErrorCodeUnauthorized is returned if a request is not authorized. - ErrorCodeUnauthorized - - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - // size does not match the content length. - ErrorCodeSizeInvalid - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verfication. - ErrorCodeManifestUnverified - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid -) - -// ParseErrorCode attempts to parse the error code string, returning -// ErrorCodeUnknown if the error is not known. -func ParseErrorCode(s string) ErrorCode { - desc, ok := idToDescriptors[s] - - if !ok { - return ErrorCodeUnknown - } - - return desc.Code -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors struct { - Errors []Error `json:"errors,omitempty"` -} - -// Push pushes an error on to the error stack, with the optional detail -// argument. It is a programming error (ie panic) to push more than one -// detail at a time. -func (errs *Errors) Push(code ErrorCode, details ...interface{}) { - if len(details) > 1 { - panic("please specify zero or one detail items for this error") - } - - var detail interface{} - if len(details) > 0 { - detail = details[0] - } - - if err, ok := detail.(error); ok { - detail = err.Error() - } - - errs.PushErr(Error{ - Code: code, - Message: code.Message(), - Detail: detail, - }) -} - -// PushErr pushes an error interface onto the error stack. -func (errs *Errors) PushErr(err error) { - switch err.(type) { - case Error: - errs.Errors = append(errs.Errors, err.(Error)) - default: - errs.Errors = append(errs.Errors, Error{Message: err.Error()}) - } -} - -func (errs *Errors) Error() string { - switch errs.Len() { - case 0: - return "" - case 1: - return errs.Errors[0].Error() - default: - msg := "errors:\n" - for _, err := range errs.Errors { - msg += err.Error() + "\n" - } - return msg - } -} - -// Clear clears the errors. -func (errs *Errors) Clear() { - errs.Errors = errs.Errors[:0] -} - -// Len returns the current number of errors. -func (errs *Errors) Len() int { - return len(errs.Errors) -} - -blob -mark :112 -data 4623 -package v2 - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/docker/distribution/digest" -) - -// TestErrorCodes ensures that error code format, mappings and -// marshaling/unmarshaling. round trips are stable. -func TestErrorCodes(t *testing.T) { - for _, desc := range errorDescriptors { - if desc.Code.String() != desc.Value { - t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) - } - - if desc.Code.Message() != desc.Message { - t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message) - } - - // Serialize the error code using the json library to ensure that we - // get a string and it works round trip. - p, err := json.Marshal(desc.Code) - - if err != nil { - t.Fatalf("error marshaling error code %v: %v", desc.Code, err) - } - - if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v", desc.Code) - } - - // First, unmarshal to interface and ensure we have a string. - var ecUnspecified interface{} - if err := json.Unmarshal(p, &ecUnspecified); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) - } - - if _, ok := ecUnspecified.(string); !ok { - t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified) - } - - // Now, unmarshal with the error code type and ensure they are equal - var ecUnmarshaled ErrorCode - if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) - } - - if ecUnmarshaled != desc.Code { - t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code) - } - } -} - -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -func TestErrorsManagement(t *testing.T) { - var errs Errors - - errs.Push(ErrorCodeDigestInvalid) - errs.Push(ErrorCodeBlobUnknown, - map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"}) - - p, err := json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } - - errs.Clear() - errs.Push(ErrorCodeUnknown) - expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" - p, err = json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } -} - -// TestMarshalUnmarshal ensures that api errors can round trip through json -// without losing information. -func TestMarshalUnmarshal(t *testing.T) { - - var errors Errors - - for _, testcase := range []struct { - description string - err Error - }{ - { - description: "unknown error", - err: Error{ - - Code: ErrorCodeUnknown, - Message: ErrorCodeUnknown.Descriptor().Message, - }, - }, - { - description: "unknown manifest", - err: Error{ - Code: ErrorCodeManifestUnknown, - Message: ErrorCodeManifestUnknown.Descriptor().Message, - }, - }, - { - description: "unknown manifest", - err: Error{ - Code: ErrorCodeBlobUnknown, - Message: ErrorCodeBlobUnknown.Descriptor().Message, - Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"}, - }, - }, - } { - fatalf := func(format string, args ...interface{}) { - t.Fatalf(testcase.description+": "+format, args...) - } - - unexpectedErr := func(err error) { - fatalf("unexpected error: %v", err) - } - - p, err := json.Marshal(testcase.err) - if err != nil { - unexpectedErr(err) - } - - var unmarshaled Error - if err := json.Unmarshal(p, &unmarshaled); err != nil { - unexpectedErr(err) - } - - if !reflect.DeepEqual(unmarshaled, testcase.err) { - fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err) - } - - // Roll everything up into an error response envelope. - errors.PushErr(testcase.err) - } - - p, err := json.Marshal(errors) - if err != nil { - t.Fatalf("unexpected error marshaling error envelope: %v", err) - } - - var unmarshaled Errors - if err := json.Unmarshal(p, &unmarshaled); err != nil { - t.Fatalf("unexpected error unmarshaling error envelope: %v", err) - } - - if !reflect.DeepEqual(unmarshaled, errors) { - t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) - } -} - -blob -mark :113 -data 3965 -package v2 - -import ( - "fmt" - "regexp" - "strings" -) - -// TODO(stevvooe): Move these definitions back to an exported package. While -// they are used with v2 definitions, their relevance expands beyond. -// "distribution/names" is a candidate package. - -const ( - // RepositoryNameComponentMinLength is the minimum number of characters in a - // single repository name slash-delimited component - RepositoryNameComponentMinLength = 2 - - // RepositoryNameMinComponents is the minimum number of slash-delimited - // components that a repository name must have - RepositoryNameMinComponents = 1 - - // RepositoryNameTotalLengthMax is the maximum total number of characters in - // a repository name - RepositoryNameTotalLengthMax = 255 -) - -// RepositoryNameComponentRegexp restricts registry path component names to -// start with at least one letter or number, with following parts able to -// be separated by one period, dash or underscore. -var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) - -// RepositoryNameComponentAnchoredRegexp is the version of -// RepositoryNameComponentRegexp which must completely match the content -var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) - -// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow -// multiple path components, separated by a forward slash. -var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String()) - -// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. -var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) - -// TODO(stevvooe): Contribute these exports back to core, so they are shared. - -var ( - // ErrRepositoryNameComponentShort is returned when a repository name - // contains a component which is shorter than - // RepositoryNameComponentMinLength - ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) - - // ErrRepositoryNameMissingComponents is returned when a repository name - // contains fewer than RepositoryNameMinComponents components - ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) - - // ErrRepositoryNameLong is returned when a repository name is longer than - // RepositoryNameTotalLengthMax - ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) - - // ErrRepositoryNameComponentInvalid is returned when a repository name does - // not match RepositoryNameComponentRegexp - ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) -) - -// ValidateRespositoryName ensures the repository name is valid for use in the -// registry. This function accepts a superset of what might be accepted by -// docker core or docker hub. If the name does not pass validation, an error, -// describing the conditions, is returned. -// -// Effectively, the name should comply with the following grammar: -// -// alpha-numeric := /[a-z0-9]+/ -// separator := /[._-]/ -// component := alpha-numeric [separator alpha-numeric]* -// namespace := component ['/' component]* -// -// The result of the production, known as the "namespace", should be limited -// to 255 characters. -func ValidateRespositoryName(name string) error { - if len(name) > RepositoryNameTotalLengthMax { - return ErrRepositoryNameLong - } - - components := strings.Split(name, "/") - - if len(components) < RepositoryNameMinComponents { - return ErrRepositoryNameMissingComponents - } - - for _, component := range components { - if len(component) < RepositoryNameComponentMinLength { - return ErrRepositoryNameComponentShort - } - - if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { - return ErrRepositoryNameComponentInvalid - } - } - - return nil -} - -blob -mark :114 -data 1766 -package v2 - -import ( - "strings" - "testing" -) - -func TestRepositoryNameRegexp(t *testing.T) { - for _, testcase := range []struct { - input string - err error - }{ - { - input: "short", - }, - { - input: "simple/name", - }, - { - input: "library/ubuntu", - }, - { - input: "docker/stevvooe/app", - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - }, - { - input: "aa/aa/bb/bb/bb", - }, - { - input: "a/a/a/b/b", - err: ErrRepositoryNameComponentShort, - }, - { - input: "a/a/a/a/", - err: ErrRepositoryNameComponentShort, - }, - { - input: "foo.com/bar/baz", - }, - { - input: "blog.foo.com/bar/baz", - }, - { - input: "asdf", - }, - { - input: "asdf$$^/aa", - err: ErrRepositoryNameComponentInvalid, - }, - { - input: "aa-a/aa", - }, - { - input: "aa/aa", - }, - { - input: "a-a/a-a", - }, - { - input: "a", - err: ErrRepositoryNameComponentShort, - }, - { - input: "a-/a/a/a", - err: ErrRepositoryNameComponentInvalid, - }, - { - input: strings.Repeat("a", 255), - }, - { - input: strings.Repeat("a", 256), - err: ErrRepositoryNameLong, - }, - } { - - failf := func(format string, v ...interface{}) { - t.Logf(testcase.input+": "+format, v...) - t.Fail() - } - - if err := ValidateRespositoryName(testcase.input); err != testcase.err { - if testcase.err != nil { - if err != nil { - failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) - } else { - failf("expected invalid repository: %v", testcase.err) - } - } else { - if err != nil { - // Wrong error returned. - failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) - } else { - failf("unexpected error validating repository name: %v", err) - } - } - } - } -} - -blob -mark :115 -data 1209 -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} - -blob -mark :116 -data 9255 -package v2 - -import ( - "encoding/json" - "fmt" - "math/rand" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" - - "github.com/gorilla/mux" -) - -type routeTestCase struct { - RequestURI string - ExpectedURI string - Vars map[string]string - RouteName string - StatusCode int -} - -// TestRouter registers a test handler with all the routes and ensures that -// each route returns the expected path variables. Not method verification is -// present. This not meant to be exhaustive but as check to ensure that the -// expected variables are extracted. -// -// This may go away as the application structure comes together. -func TestRouter(t *testing.T) { - testCases := []routeTestCase{ - { - RouteName: RouteNameBase, - RequestURI: "/v2/", - Vars: map[string]string{}, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/manifests/bar", - Vars: map[string]string{ - "name": "foo", - "reference": "bar", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/tag", - Vars: map[string]string{ - "name": "foo/bar", - "reference": "tag", - }, - }, - { - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", - Vars: map[string]string{ - "name": "foo/bar", - "reference": "sha256:abcdef01234567890", - }, - }, - { - RouteName: RouteNameTags, - RequestURI: "/v2/foo/bar/tags/list", - Vars: map[string]string{ - "name": "foo/bar", - }, - }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "tarsum.dev+foo:abcdef0919234", - }, - }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "sha256:abcdef0919234", - }, - }, - { - RouteName: RouteNameBlobUpload, - RequestURI: "/v2/foo/bar/blobs/uploads/", - Vars: map[string]string{ - "name": "foo/bar", - }, - }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/uuid", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "uuid", - }, - }, - { - // support uuid proper - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - }, - }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", - }, - }, - { - // supports urlsafe base64 - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", - Vars: map[string]string{ - "name": "foo/bar", - "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", - }, - }, - { - // does not match - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", - StatusCode: http.StatusNotFound, - }, - { - // Check ambiguity: ensure we can distinguish between tags for - // "foo/bar/image/image" and image for "foo/bar/image" with tag - // "tags" - RouteName: RouteNameManifest, - RequestURI: "/v2/foo/bar/manifests/manifests/tags", - Vars: map[string]string{ - "name": "foo/bar/manifests", - "reference": "tags", - }, - }, - { - // This case presents an ambiguity between foo/bar with tag="tags" - // and list tags for "foo/bar/manifest" - RouteName: RouteNameTags, - RequestURI: "/v2/foo/bar/manifests/tags/list", - Vars: map[string]string{ - "name": "foo/bar/manifests", - }, - }, - } - - checkTestRouter(t, testCases, "", true) - checkTestRouter(t, testCases, "/prefix/", true) -} - -func TestRouterWithPathTraversals(t *testing.T) { - testCases := []routeTestCase{ - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - { - // Testing for path traversal attack handling - RouteName: RouteNameTags, - RequestURI: "/v2/foo/../bar/baz/tags/list", - ExpectedURI: "/v2/bar/baz/tags/list", - Vars: map[string]string{ - "name": "bar/baz", - }, - }, - } - checkTestRouter(t, testCases, "", false) -} - -func TestRouterWithBadCharacters(t *testing.T) { - if testing.Short() { - testCases := []routeTestCase{ - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - { - // Testing for path traversal attack handling - RouteName: RouteNameTags, - RequestURI: "/v2/foo/不bar/tags/list", - StatusCode: http.StatusNotFound, - }, - } - checkTestRouter(t, testCases, "", true) - } else { - // in the long version we're going to fuzz the router - // with random UTF8 characters not in the 128 bit ASCII range. - // These are not valid characters for the router and we expect - // 404s on every test. - rand.Seed(time.Now().UTC().UnixNano()) - testCases := make([]routeTestCase, 1000) - for idx := range testCases { - testCases[idx] = routeTestCase{ - RouteName: RouteNameTags, - RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), - StatusCode: http.StatusNotFound, - } - } - checkTestRouter(t, testCases, "", true) - } -} - -func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { - router := RouterWithPrefix(prefix) - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testCase := routeTestCase{ - RequestURI: r.RequestURI, - Vars: mux.Vars(r), - RouteName: mux.CurrentRoute(r).GetName(), - } - - enc := json.NewEncoder(w) - - if err := enc.Encode(testCase); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - }) - - // Startup test server - server := httptest.NewServer(router) - - for _, testcase := range testCases { - testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI - // Register the endpoint - route := router.GetRoute(testcase.RouteName) - if route == nil { - t.Fatalf("route for name %q not found", testcase.RouteName) - } - - route.Handler(testHandler) - - u := server.URL + testcase.RequestURI - - resp, err := http.Get(u) - - if err != nil { - t.Fatalf("error issuing get request: %v", err) - } - - if testcase.StatusCode == 0 { - // Override default, zero-value - testcase.StatusCode = http.StatusOK - } - if testcase.ExpectedURI == "" { - // Override default, zero-value - testcase.ExpectedURI = testcase.RequestURI - } - - if resp.StatusCode != testcase.StatusCode { - t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) - } - - if testcase.StatusCode != http.StatusOK { - // We don't care about json response. - continue - } - - dec := json.NewDecoder(resp.Body) - - var actualRouteInfo routeTestCase - if err := dec.Decode(&actualRouteInfo); err != nil { - t.Fatalf("error reading json response: %v", err) - } - // Needs to be set out of band - actualRouteInfo.StatusCode = resp.StatusCode - - if actualRouteInfo.RequestURI != testcase.ExpectedURI { - t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) - } - - if actualRouteInfo.RouteName != testcase.RouteName { - t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) - } - - // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want - // that to make the comparison fail. We're otherwise done with the testcase so empty the - // testcase.ExpectedURI - testcase.ExpectedURI = "" - if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { - t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) - } - } - -} - -// -------------- START LICENSED CODE -------------- -// The following code is derivative of https://github.com/google/gofuzz -// gofuzz is licensed under the Apache License, Version 2.0, January 2004, -// a copy of which can be found in the LICENSE file at the root of this -// repository. - -// These functions allow us to generate strings containing only multibyte -// characters that are invalid in our URLs. They are used above for fuzzing -// to ensure we always get 404s on these invalid strings -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose() rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -func randomString(length int) string { - runes := make([]rune, length) - for i := range runes { - runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() - } - return string(runes) -} - -// -------------- END LICENSED CODE -------------- - -blob -mark :117 -data 5603 -package v2 - -import ( - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/digest" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { - var scheme string - - forwardedProto := r.Header.Get("X-Forwarded-Proto") - - switch { - case len(forwardedProto) > 0: - scheme = forwardedProto - case r.TLS != nil: - scheme = "https" - case len(r.URL.Scheme) > 0: - scheme = r.URL.Scheme - default: - scheme = "http" - } - - host := r.Host - forwardedHost := r.Header.Get("X-Forwarded-Host") - if len(forwardedHost) > 0 { - host = forwardedHost - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - manifestURL, err := route.URL("name", name, "reference", reference) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", name, "digest", dgst.String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name, "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root} -} - -type clonedRoute struct { - *mux.Route - root *url.URL -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - return cr.root.ResolveReference(routeURL), nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} - -blob -mark :118 -data 5788 -package v2 - -import ( - "net/http" - "net/url" - "testing" -) - -type urlBuilderTestCase struct { - description string - expectedPath string - build func() (string, error) -} - -func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { - return []urlBuilderTestCase{ - { - description: "test base url", - expectedPath: "/v2/", - build: urlBuilder.BuildBaseURL, - }, - { - description: "test tags url", - expectedPath: "/v2/foo/bar/tags/list", - build: func() (string, error) { - return urlBuilder.BuildTagsURL("foo/bar") - }, - }, - { - description: "test manifest url", - expectedPath: "/v2/foo/bar/manifests/tag", - build: func() (string, error) { - return urlBuilder.BuildManifestURL("foo/bar", "tag") - }, - }, - { - description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", - build: func() (string, error) { - return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") - }, - }, - { - description: "build blob upload url", - expectedPath: "/v2/foo/bar/blobs/uploads/", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar") - }, - }, - { - description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ - "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, - }) - }, - }, - { - description: "build blob upload chunk url", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") - }, - }, - { - description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ - "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, - }) - }, - }, - } -} - -// TestURLBuilder tests the various url building functions, ensuring they are -// returning the expected values. -func TestURLBuilder(t *testing.T) { - roots := []string{ - "http://example.com", - "https://example.com", - "http://localhost:5000", - "https://localhost:5443", - } - - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := root + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} - -func TestURLBuilderWithPrefix(t *testing.T) { - roots := []string{ - "http://example.com/prefix/", - "https://example.com/prefix/", - "http://localhost:5000/prefix/", - "https://localhost:5443/prefix/", - } - - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := root[0:len(root)-1] + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} - -type builderFromRequestTestCase struct { - request *http.Request - base string -} - -func TestBuilderFromRequest(t *testing.T) { - u, err := url.Parse("http://example.com") - if err != nil { - t.Fatal(err) - } - - forwardedProtoHeader := make(http.Header, 1) - forwardedProtoHeader.Set("X-Forwarded-Proto", "https") - - testRequests := []struct { - request *http.Request - base string - }{ - { - request: &http.Request{URL: u, Host: u.Host}, - base: "http://example.com", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://example.com", - }, - } - - for _, tr := range testRequests { - builder := NewURLBuilderFromRequest(tr.request) - - for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := tr.base + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} - -func TestBuilderFromRequestWithPrefix(t *testing.T) { - u, err := url.Parse("http://example.com/prefix/v2/") - if err != nil { - t.Fatal(err) - } - - forwardedProtoHeader := make(http.Header, 1) - forwardedProtoHeader.Set("X-Forwarded-Proto", "https") - - testRequests := []struct { - request *http.Request - base string - }{ - { - request: &http.Request{URL: u, Host: u.Host}, - base: "http://example.com/prefix/", - }, - { - request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, - base: "https://example.com/prefix/", - }, - } - - for _, tr := range testRequests { - builder := NewURLBuilderFromRequest(tr.request) - - for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} - -blob -mark :119 -data 4768 -// Package auth defines a standard interface for request access controllers. -// -// An access controller has a simple interface with a single `Authorized` -// method which checks that a given request is authorized to perform one or -// more actions on one or more resources. This method should return a non-nil -// error if the requset is not authorized. -// -// An implementation registers its access controller by name with a constructor -// which accepts an options map for configuring the access controller. -// -// options := map[string]interface{}{"sillySecret": "whysosilly?"} -// accessController, _ := auth.GetAccessController("silly", options) -// -// This `accessController` can then be used in a request handler like so: -// -// func updateOrder(w http.ResponseWriter, r *http.Request) { -// orderNumber := r.FormValue("orderNumber") -// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} -// access := auth.Access{Resource: resource, Action: "update"} -// -// if ctx, err := accessController.Authorized(ctx, access); err != nil { -// if challenge, ok := err.(auth.Challenge) { -// // Let the challenge write the response. -// challenge.ServeHTTP(w, r) -// } else { -// // Some other error. -// } -// } -// } -// -package auth - -import ( - "fmt" - "net/http" - - "golang.org/x/net/context" -) - -// UserInfo carries information about -// an autenticated/authorized client. -type UserInfo struct { - Name string -} - -// Resource describes a resource by type and name. -type Resource struct { - Type string - Name string -} - -// Access describes a specific action that is -// requested or allowed for a given recource. -type Access struct { - Resource - Action string -} - -// Challenge is a special error type which is used for HTTP 401 Unauthorized -// responses and is able to write the response with WWW-Authenticate challenge -// header values based on the error. -type Challenge interface { - error - // ServeHTTP prepares the request to conduct the appropriate challenge - // response. For most implementations, simply calling ServeHTTP should be - // sufficient. Because no body is written, users may write a custom body after - // calling ServeHTTP, but any headers must be written before the call and may - // be overwritten. - ServeHTTP(w http.ResponseWriter, r *http.Request) -} - -// AccessController controls access to registry resources based on a request -// and required access levels for a request. Implementations can support both -// complete denial and http authorization challenges. -type AccessController interface { - // Authorized returns a non-nil error if the context is granted access and - // returns a new authorized context. If one or more Access structs are - // provided, the requested access will be compared with what is available - // to the context. The given context will contain a "http.request" key with - // a `*http.Request` value. If the error is non-nil, access should always - // be denied. The error may be of type Challenge, in which case the caller - // may have the Challenge handle the request or choose what action to take - // based on the Challenge header or response status. The returned context - // object should have a "auth.user" value set to a UserInfo struct. - Authorized(ctx context.Context, access ...Access) (context.Context, error) -} - -// WithUser returns a context with the authorized user info. -func WithUser(ctx context.Context, user UserInfo) context.Context { - return userInfoContext{ - Context: ctx, - user: user, - } -} - -type userInfoContext struct { - context.Context - user UserInfo -} - -func (uic userInfoContext) Value(key interface{}) interface{} { - switch key { - case "auth.user": - return uic.user - case "auth.user.name": - return uic.user.Name - } - - return uic.Context.Value(key) -} - -// InitFunc is the type of an AccessController factory function and is used -// to register the constructor for different AccesController backends. -type InitFunc func(options map[string]interface{}) (AccessController, error) - -var accessControllers map[string]InitFunc - -func init() { - accessControllers = make(map[string]InitFunc) -} - -// Register is used to register an InitFunc for -// an AccessController backend with the given name. -func Register(name string, initFunc InitFunc) error { - if _, exists := accessControllers[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - accessControllers[name] = initFunc - - return nil -} - -// GetAccessController constructs an AccessController -// with the given options using the named backend. -func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { - if initFunc, exists := accessControllers[name]; exists { - return initFunc(options) - } - - return nil, fmt.Errorf("no access controller registered with name: %s", name) -} - -blob -mark :120 -data 2741 -// Package silly provides a simple authentication scheme that checks for the -// existence of an Authorization header and issues access if is present and -// non-empty. -// -// This package is present as an example implementation of a minimal -// auth.AccessController and for testing. This is not suitable for any kind of -// production security. -package silly - -import ( - "fmt" - "net/http" - "strings" - - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" -) - -// accessController provides a simple implementation of auth.AccessController -// that simply checks for a non-empty Authorization header. It is useful for -// demonstration and testing. -type accessController struct { - realm string - service string -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for silly access controller`) - } - - service, present := options["service"] - if _, ok := service.(string); !present || !ok { - return nil, fmt.Errorf(`"service" must be set for silly access controller`) - } - - return &accessController{realm: realm.(string), service: service.(string)}, nil -} - -// Authorized simply checks for the existence of the authorization header, -// responding with a bearer challenge if it doesn't exist. -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := ctxu.GetRequest(ctx) - if err != nil { - return nil, err - } - - if req.Header.Get("Authorization") == "" { - challenge := challenge{ - realm: ac.realm, - service: ac.service, - } - - if len(accessRecords) > 0 { - var scopes []string - for _, access := range accessRecords { - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) - } - challenge.scope = strings.Join(scopes, " ") - } - - return nil, &challenge - } - - return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil -} - -type challenge struct { - realm string - service string - scope string -} - -func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) - - if ch.scope != "" { - header = fmt.Sprintf("%s,scope=%q", header, ch.scope) - } - - w.Header().Set("WWW-Authenticate", header) - w.WriteHeader(http.StatusUnauthorized) -} - -func (ch *challenge) Error() string { - return fmt.Sprintf("silly authentication challenge: %#v", ch) -} - -// init registers the silly auth backend. -func init() { - auth.Register("silly", auth.InitFunc(newAccessController)) -} - -blob -mark :121 -data 1765 -package silly - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" -) - -func TestSillyAccessController(t *testing.T) { - ac := &accessController{ - realm: "test-realm", - service: "test-service", - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(nil, "http.request", r) - authCtx, err := ac.Authorized(ctx) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - err.ServeHTTP(w, r) - return - default: - t.Fatalf("unexpected error authorizing request: %v", err) - } - } - - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) - if !ok { - t.Fatal("silly accessController did not set auth.user context") - } - - if userInfo.Name != "silly" { - t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) - } - - w.WriteHeader(http.StatusNoContent) - })) - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) - } - - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - req.Header.Set("Authorization", "seriously, anything") - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) - } -} - -blob -mark :122 -data 7491 -package token - -import ( - "crypto" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -// accessSet maps a typed, named resource to -// a set of actions requested or authorized. -type accessSet map[auth.Resource]actionSet - -// newAccessSet constructs an accessSet from -// a variable number of auth.Access items. -func newAccessSet(accessItems ...auth.Access) accessSet { - accessSet := make(accessSet, len(accessItems)) - - for _, access := range accessItems { - resource := auth.Resource{ - Type: access.Type, - Name: access.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - set.add(access.Action) - } - - return accessSet -} - -// contains returns whether or not the given access is in this accessSet. -func (s accessSet) contains(access auth.Access) bool { - actionSet, ok := s[access.Resource] - if ok { - return actionSet.contains(access.Action) - } - - return false -} - -// scopeParam returns a collection of scopes which can -// be used for a WWW-Authenticate challenge parameter. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (s accessSet) scopeParam() string { - scopes := make([]string, 0, len(s)) - - for resource, actionSet := range s { - actions := strings.Join(actionSet.keys(), ",") - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) - } - - return strings.Join(scopes, " ") -} - -// Errors used and exported by this package. -var ( - ErrInsufficientScope = errors.New("insufficient scope") - ErrTokenRequired = errors.New("authorization token required") -) - -// authChallenge implements the auth.Challenge interface. -type authChallenge struct { - err error - realm string - service string - accessSet accessSet -} - -// Error returns the internal error string for this authChallenge. -func (ac *authChallenge) Error() string { - return ac.err.Error() -} - -// Status returns the HTTP Response Status Code for this authChallenge. -func (ac *authChallenge) Status() int { - return http.StatusUnauthorized -} - -// challengeParams constructs the value to be used in -// the WWW-Authenticate response challenge header. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (ac *authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) - - if scope := ac.accessSet.scopeParam(); scope != "" { - str = fmt.Sprintf("%s,scope=%q", str, scope) - } - - if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { - str = fmt.Sprintf("%s,error=%q", str, "invalid_token") - } else if ac.err == ErrInsufficientScope { - str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") - } - - return str -} - -// SetHeader sets the WWW-Authenticate value for the given header. -func (ac *authChallenge) SetHeader(header http.Header) { - header.Add("WWW-Authenticate", ac.challengeParams()) -} - -// ServeHttp handles writing the challenge response -// by setting the challenge header and status code. -func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ac.SetHeader(w.Header()) - w.WriteHeader(ac.Status()) -} - -// accessController implements the auth.AccessController interface. -type accessController struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey -} - -// tokenAccessOptions is a convenience type for handling -// options to the contstructor of an accessController. -type tokenAccessOptions struct { - realm string - issuer string - service string - rootCertBundle string -} - -// checkOptions gathers the necessary options -// for an accessController from the given map. -func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { - var opts tokenAccessOptions - - keys := []string{"realm", "issuer", "service", "rootcertbundle"} - vals := make([]string, 0, len(keys)) - for _, key := range keys { - val, ok := options[key].(string) - if !ok { - return opts, fmt.Errorf("token auth requires a valid option string: %q", key) - } - vals = append(vals, val) - } - - opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] - - return opts, nil -} - -// newAccessController creates an accessController using the given options. -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - config, err := checkOptions(options) - if err != nil { - return nil, err - } - - fp, err := os.Open(config.rootCertBundle) - if err != nil { - return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - defer fp.Close() - - rawCertBundle, err := ioutil.ReadAll(fp) - if err != nil { - return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - - var rootCerts []*x509.Certificate - pemBlock, rawCertBundle := pem.Decode(rawCertBundle) - for pemBlock != nil { - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) - } - - rootCerts = append(rootCerts, cert) - - pemBlock, rawCertBundle = pem.Decode(rawCertBundle) - } - - if len(rootCerts) == 0 { - return nil, errors.New("token auth requires at least one token signing root certificate") - } - - rootPool := x509.NewCertPool() - trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) - if err != nil { - return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) - } - trustedKeys[pubKey.KeyID()] = pubKey - } - - return &accessController{ - realm: config.realm, - issuer: config.issuer, - service: config.service, - rootCerts: rootPool, - trustedKeys: trustedKeys, - }, nil -} - -// Authorized handles checking whether the given request is authorized -// for actions on resources described by the given access items. -func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { - challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - accessSet: newAccessSet(accessItems...), - } - - req, err := ctxu.GetRequest(ctx) - if err != nil { - return nil, err - } - - parts := strings.Split(req.Header.Get("Authorization"), " ") - - if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { - challenge.err = ErrTokenRequired - return nil, challenge - } - - rawToken := parts[1] - - token, err := NewToken(rawToken) - if err != nil { - challenge.err = err - return nil, challenge - } - - verifyOpts := VerifyOptions{ - TrustedIssuers: []string{ac.issuer}, - AcceptedAudiences: []string{ac.service}, - Roots: ac.rootCerts, - TrustedKeys: ac.trustedKeys, - } - - if err = token.Verify(verifyOpts); err != nil { - challenge.err = err - return nil, challenge - } - - accessSet := token.accessSet() - for _, access := range accessItems { - if !accessSet.contains(access) { - challenge.err = ErrInsufficientScope - return nil, challenge - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil -} - -// init handles registering the token auth backend. -func init() { - auth.Register("token", auth.InitFunc(newAccessController)) -} - -blob -mark :123 -data 773 -package token - -// StringSet is a useful type for looking up strings. -type stringSet map[string]struct{} - -// NewStringSet creates a new StringSet with the given strings. -func newStringSet(keys ...string) stringSet { - ss := make(stringSet, len(keys)) - ss.add(keys...) - return ss -} - -// Add inserts the given keys into this StringSet. -func (ss stringSet) add(keys ...string) { - for _, key := range keys { - ss[key] = struct{}{} - } -} - -// Contains returns whether the given key is in this StringSet. -func (ss stringSet) contains(key string) bool { - _, ok := ss[key] - return ok -} - -// Keys returns a slice of all keys in this StringSet. -func (ss stringSet) keys() []string { - keys := make([]string, 0, len(ss)) - - for key := range ss { - keys = append(keys, key) - } - - return keys -} - -blob -mark :124 -data 10150 -package token - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" - - "github.com/docker/distribution/registry/auth" -) - -const ( - // TokenSeparator is the value which separates the header, claims, and - // signature in the compact serialization of a JSON Web Token. - TokenSeparator = "." -) - -// Errors used by token parsing and verification. -var ( - ErrMalformedToken = errors.New("malformed token") - ErrInvalidToken = errors.New("invalid token") -) - -// ResourceActions stores allowed actions on a named and typed resource. -type ResourceActions struct { - Type string `json:"type"` - Name string `json:"name"` - Actions []string `json:"actions"` -} - -// ClaimSet describes the main section of a JSON Web Token. -type ClaimSet struct { - // Public claims - Issuer string `json:"iss"` - Subject string `json:"sub"` - Audience string `json:"aud"` - Expiration int64 `json:"exp"` - NotBefore int64 `json:"nbf"` - IssuedAt int64 `json:"iat"` - JWTID string `json:"jti"` - - // Private claims - Access []*ResourceActions `json:"access"` -} - -// Header describes the header section of a JSON Web Token. -type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK json.RawMessage `json:"jwk,omitempty"` -} - -// Token describes a JSON Web Token. -type Token struct { - Raw string - Header *Header - Claims *ClaimSet - Signature []byte -} - -// VerifyOptions is used to specify -// options when verifying a JSON Web Token. -type VerifyOptions struct { - TrustedIssuers []string - AcceptedAudiences []string - Roots *x509.CertPool - TrustedKeys map[string]libtrust.PublicKey -} - -// NewToken parses the given raw token string -// and constructs an unverified JSON Web Token. -func NewToken(rawToken string) (*Token, error) { - parts := strings.Split(rawToken, TokenSeparator) - if len(parts) != 3 { - return nil, ErrMalformedToken - } - - var ( - rawHeader, rawClaims = parts[0], parts[1] - headerJSON, claimsJSON []byte - err error - ) - - defer func() { - if err != nil { - log.Errorf("error while unmarshalling raw token: %s", err) - } - }() - - if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { - err = fmt.Errorf("unable to decode header: %s", err) - return nil, ErrMalformedToken - } - - if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { - err = fmt.Errorf("unable to decode claims: %s", err) - return nil, ErrMalformedToken - } - - token := new(Token) - token.Header = new(Header) - token.Claims = new(ClaimSet) - - token.Raw = strings.Join(parts[:2], TokenSeparator) - if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { - err = fmt.Errorf("unable to decode signature: %s", err) - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(headerJSON, token.Header); err != nil { - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { - return nil, ErrMalformedToken - } - - return token, nil -} - -// Verify attempts to verify this token using the given options. -// Returns a nil error if the token is valid. -func (t *Token) Verify(verifyOpts VerifyOptions) error { - // Verify that the Issuer claim is a trusted authority. - if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { - log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) - return ErrInvalidToken - } - - // Verify that the Audience claim is allowed. - if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { - log.Errorf("token intended for another audience: %q", t.Claims.Audience) - return ErrInvalidToken - } - - // Verify that the token is currently usable and not expired. - currentUnixTime := time.Now().Unix() - if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { - log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) - return ErrInvalidToken - } - - // Verify the token signature. - if len(t.Signature) == 0 { - log.Error("token has no signature") - return ErrInvalidToken - } - - // Verify that the signing key is trusted. - signingKey, err := t.VerifySigningKey(verifyOpts) - if err != nil { - log.Error(err) - return ErrInvalidToken - } - - // Finally, verify the signature of the token using the key which signed it. - if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { - log.Errorf("unable to verify token signature: %s", err) - return ErrInvalidToken - } - - return nil -} - -// VerifySigningKey attempts to get the key which was used to sign this token. -// The token header should contain either of these 3 fields: -// `x5c` - The x509 certificate chain for the signing key. Needs to be -// verified. -// `jwk` - The JSON Web Key representation of the signing key. -// May contain its own `x5c` field which needs to be verified. -// `kid` - The unique identifier for the key. This library interprets it -// as a libtrust fingerprint. The key itself can be looked up in -// the trustedKeys field of the given verify options. -// Each of these methods are tried in that order of preference until the -// signing key is found or an error is returned. -func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { - // First attempt to get an x509 certificate chain from the header. - var ( - x5c = t.Header.X5c - rawJWK = t.Header.RawJWK - keyID = t.Header.KeyID - ) - - switch { - case len(x5c) > 0: - signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case len(rawJWK) > 0: - signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) - case len(keyID) > 0: - signingKey = verifyOpts.TrustedKeys[keyID] - if signingKey == nil { - err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) - } - default: - err = errors.New("unable to get token signing key") - } - - return -} - -func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { - if len(x5c) == 0 { - return nil, errors.New("empty x509 certificate chain") - } - - // Ensure the first element is encoded correctly. - leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) - if err != nil { - return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) - } - - // And that it is a valid x509 certificate. - leafCert, err := x509.ParseCertificate(leafCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) - } - - // The rest of the certificate chain are intermediate certificates. - intermediates := x509.NewCertPool() - for i := 1; i < len(x5c); i++ { - intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) - } - - intermediateCert, err := x509.ParseCertificate(intermediateCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) - } - - intermediates.AddCert(intermediateCert) - } - - verifyOpts := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: roots, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - } - - // TODO: this call returns certificate chains which we ignore for now, but - // we should check them for revocations if we have the ability later. - if _, err = leafCert.Verify(verifyOpts); err != nil { - return nil, fmt.Errorf("unable to verify certificate chain: %s", err) - } - - // Get the public key from the leaf certificate. - leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) - if !ok { - return nil, errors.New("unable to get leaf cert public key value") - } - - leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) - if err != nil { - return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) - } - - return -} - -func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) - if err != nil { - return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) - } - - // Check to see if the key includes a certificate chain. - x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) - if !ok { - // The JWK should be one of the trusted root keys. - if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { - return nil, errors.New("untrusted JWK with no certificate chain") - } - - // The JWK is one of the trusted keys. - return - } - - // Ensure each item in the chain is of the correct type. - x5c := make([]string, len(x5cVal)) - for i, val := range x5cVal { - certString, ok := val.(string) - if !ok || len(certString) == 0 { - return nil, errors.New("malformed certificate chain") - } - x5c[i] = certString - } - - // Ensure that the x509 certificate chain can - // be verified up to one of our trusted roots. - leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) - if err != nil { - return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) - } - - // Verify that the public key in the leaf cert *is* the signing key. - if pubKey.KeyID() != leafKey.KeyID() { - return nil, errors.New("leaf certificate public key ID does not match JWK key ID") - } - - return -} - -// accessSet returns a set of actions available for the resource -// actions listed in the `access` section of this token. -func (t *Token) accessSet() accessSet { - if t.Claims == nil { - return nil - } - - accessSet := make(accessSet, len(t.Claims.Access)) - - for _, resourceActions := range t.Claims.Access { - resource := auth.Resource{ - Type: resourceActions.Type, - Name: resourceActions.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - for _, action := range resourceActions.Actions { - set.add(action) - } - } - - return accessSet -} - -func (t *Token) compactRaw() string { - return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) -} - -blob -mark :125 -data 9594 -package token - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { - keys := make([]libtrust.PrivateKey, 0, numKeys) - - for i := 0; i < numKeys; i++ { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err - } - keys = append(keys, key) - } - - return keys, nil -} - -func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { - if depth == 0 { - // Don't need to build a chain. - return rootKey, nil - } - - var ( - x5c = make([]string, depth) - parentKey = rootKey - key libtrust.PrivateKey - cert *x509.Certificate - err error - ) - - for depth > 0 { - if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { - return nil, err - } - - if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { - return nil, err - } - - depth-- - x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) - parentKey = key - } - - key.AddExtendedField("x5c", x5c) - - return key, nil -} - -func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { - certs := make([]*x509.Certificate, 0, len(rootKeys)) - - for _, key := range rootKeys { - cert, err := libtrust.GenerateCACert(key, key) - if err != nil { - return nil, err - } - certs = append(certs, cert) - } - - return certs, nil -} - -func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { - trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) - - for _, key := range rootKeys { - trustedKeys[key.KeyID()] = key.PublicKey() - } - - return trustedKeys -} - -func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { - signingKey, err := makeSigningKeyWithChain(rootKey, depth) - if err != nil { - return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) - } - - rawJWK, err := signingKey.PublicKey().MarshalJSON() - if err != nil { - return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) - } - - joseHeader := &Header{ - Type: "JWT", - SigningAlg: "ES256", - RawJWK: json.RawMessage(rawJWK), - } - - now := time.Now() - - randomBytes := make([]byte, 15) - if _, err = rand.Read(randomBytes); err != nil { - return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) - } - - claimSet := &ClaimSet{ - Issuer: issuer, - Subject: "foo", - Audience: audience, - Expiration: now.Add(5 * time.Minute).Unix(), - NotBefore: now.Unix(), - IssuedAt: now.Unix(), - JWTID: base64.URLEncoding.EncodeToString(randomBytes), - Access: access, - } - - var joseHeaderBytes, claimSetBytes []byte - - if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { - return nil, fmt.Errorf("unable to marshal jose header: %s", err) - } - if claimSetBytes, err = json.Marshal(claimSet); err != nil { - return nil, fmt.Errorf("unable to marshal claim set: %s", err) - } - - encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) - encodedClaimSet := joseBase64UrlEncode(claimSetBytes) - encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) - - var signatureBytes []byte - if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { - return nil, fmt.Errorf("unable to sign jwt payload: %s", err) - } - - signature := joseBase64UrlEncode(signatureBytes) - tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) - - return NewToken(tokenString) -} - -// This test makes 4 tokens with a varying number of intermediate -// certificates ranging from no intermediate chain to a length of 3 -// intermediates. -func TestTokenVerify(t *testing.T) { - var ( - numTokens = 4 - issuer = "test-issuer" - audience = "test-audience" - access = []*ResourceActions{ - { - Type: "repository", - Name: "foo/bar", - Actions: []string{"pull", "push"}, - }, - } - ) - - rootKeys, err := makeRootKeys(numTokens) - if err != nil { - t.Fatal(err) - } - - rootCerts, err := makeRootCerts(rootKeys) - if err != nil { - t.Fatal(err) - } - - rootPool := x509.NewCertPool() - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - } - - trustedKeys := makeTrustedKeyMap(rootKeys) - - tokens := make([]*Token, 0, numTokens) - - for i := 0; i < numTokens; i++ { - token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) - if err != nil { - t.Fatal(err) - } - tokens = append(tokens, token) - } - - verifyOps := VerifyOptions{ - TrustedIssuers: []string{issuer}, - AcceptedAudiences: []string{audience}, - Roots: rootPool, - TrustedKeys: trustedKeys, - } - - for _, token := range tokens { - if err := token.Verify(verifyOps); err != nil { - t.Fatal(err) - } - } -} - -func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { - rootCerts, err := makeRootCerts(rootKeys) - if err != nil { - return "", err - } - - tempFile, err := ioutil.TempFile("", "rootCertBundle") - if err != nil { - return "", err - } - defer tempFile.Close() - - for _, cert := range rootCerts { - if err = pem.Encode(tempFile, &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }); err != nil { - os.Remove(tempFile.Name()) - return "", err - } - } - - return tempFile.Name(), nil -} - -// TestAccessController tests complete integration of the token auth package. -// It starts by mocking the options for a token auth accessController which -// it creates. It then tries a few mock requests: -// - don't supply a token; should error with challenge -// - supply an invalid token; should error with challenge -// - supply a token with insufficient access; should error with challenge -// - supply a valid token; should not error -func TestAccessController(t *testing.T) { - // Make 2 keys; only the first is to be a trusted root key. - rootKeys, err := makeRootKeys(2) - if err != nil { - t.Fatal(err) - } - - rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) - if err != nil { - t.Fatal(err) - } - defer os.Remove(rootCertBundleFilename) - - realm := "https://auth.example.com/token/" - issuer := "test-issuer.example.com" - service := "test-service.example.com" - - options := map[string]interface{}{ - "realm": realm, - "issuer": issuer, - "service": service, - "rootcertbundle": rootCertBundleFilename, - } - - accessController, err := newAccessController(options) - if err != nil { - t.Fatal(err) - } - - // 1. Make a mock http.Request with no token. - req, err := http.NewRequest("GET", "http://example.com/foo", nil) - if err != nil { - t.Fatal(err) - } - - testAccess := auth.Access{ - Resource: auth.Resource{ - Type: "foo", - Name: "bar", - }, - Action: "baz", - } - - ctx := context.WithValue(nil, "http.request", req) - authCtx, err := accessController.Authorized(ctx, testAccess) - challenge, ok := err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrTokenRequired.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 2. Supply an invalid token. - token, err := makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{testAccess.Action}, - }}, - rootKeys[1], 1, // Everything is valid except the key which signed it. - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - challenge, ok = err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrInvalidToken.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 3. Supply a token with insufficient access. - token, err = makeTestToken( - issuer, service, - []*ResourceActions{}, // No access specified. - rootKeys[0], 1, - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - challenge, ok = err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrInsufficientScope.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 4. Supply the token we need, or deserve, or whatever. - token, err = makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{testAccess.Action}, - }}, - rootKeys[0], 1, - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - if err != nil { - t.Fatalf("accessController returned unexpected error: %s", err) - } - - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) - if !ok { - t.Fatal("token accessController did not set auth.user context") - } - - if userInfo.Name != "foo" { - t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) - } -} - -blob -mark :126 -data 1512 -package token - -import ( - "encoding/base64" - "errors" - "strings" -) - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -// actionSet is a special type of stringSet. -type actionSet struct { - stringSet -} - -func newActionSet(actions ...string) actionSet { - return actionSet{newStringSet(actions...)} -} - -// Contains calls StringSet.Contains() for -// either "*" or the given action string. -func (s actionSet) contains(action string) bool { - return s.stringSet.contains("*") || s.stringSet.contains(action) -} - -// contains returns true if q is found in ss. -func contains(ss []string, q string) bool { - for _, s := range ss { - if s == q { - return true - } - } - - return false -} - -blob -mark :127 -data 15864 -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "regexp" - "strconv" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" -) - -// Client implements the client interface to the registry http api -type Client interface { - // GetImageManifest returns an image manifest for the image at the given - // name, tag pair. - GetImageManifest(name, tag string) (*manifest.SignedManifest, error) - - // PutImageManifest uploads an image manifest for the image at the given - // name, tag pair. - PutImageManifest(name, tag string, imageManifest *manifest.SignedManifest) error - - // DeleteImage removes the image at the given name, tag pair. - DeleteImage(name, tag string) error - - // ListImageTags returns a list of all image tags with the given repository - // name. - ListImageTags(name string) ([]string, error) - - // BlobLength returns the length of the blob stored at the given name, - // digest pair. - // Returns a length value of -1 on error or if the blob does not exist. - BlobLength(name string, dgst digest.Digest) (int, error) - - // GetBlob returns the blob stored at the given name, digest pair in the - // form of an io.ReadCloser with the length of this blob. - // A nonzero byteOffset can be provided to receive a partial blob beginning - // at the given offset. - GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) - - // InitiateBlobUpload starts a blob upload in the given repository namespace - // and returns a unique location url to use for other blob upload methods. - InitiateBlobUpload(name string) (string, error) - - // GetBlobUploadStatus returns the byte offset and length of the blob at the - // given upload location. - GetBlobUploadStatus(location string) (int, int, error) - - // UploadBlob uploads a full blob to the registry. - UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error - - // UploadBlobChunk uploads a blob chunk with a given length and startByte to - // the registry. - // FinishChunkedBlobUpload must be called to finalize this upload. - UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error - - // FinishChunkedBlobUpload completes a chunked blob upload at a given - // location. - FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error - - // CancelBlobUpload deletes all content at the unfinished blob upload - // location and invalidates any future calls to this blob upload. - CancelBlobUpload(location string) error -} - -var ( - patternRangeHeader = regexp.MustCompile("bytes=0-(\\d+)/(\\d+)") -) - -// New returns a new Client which operates against a registry with the -// given base endpoint -// This endpoint should not include /v2/ or any part of the url after this. -func New(endpoint string) (Client, error) { - ub, err := v2.NewURLBuilderFromString(endpoint) - if err != nil { - return nil, err - } - - return &clientImpl{ - endpoint: endpoint, - ub: ub, - }, nil -} - -// clientImpl is the default implementation of the Client interface -type clientImpl struct { - endpoint string - ub *v2.URLBuilder -} - -// TODO(bbland): use consistent route generation between server and client - -func (r *clientImpl) GetImageManifest(name, tag string) (*manifest.SignedManifest, error) { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return nil, err - } - - response, err := http.Get(manifestURL) - if err != nil { - return nil, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - break - case response.StatusCode == http.StatusNotFound: - return nil, &ImageManifestNotFoundError{Name: name, Tag: tag} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, err - } - return nil, &errs - default: - return nil, &UnexpectedHTTPStatusError{Status: response.Status} - } - - decoder := json.NewDecoder(response.Body) - - manifest := new(manifest.SignedManifest) - err = decoder.Decode(manifest) - if err != nil { - return nil, err - } - return manifest, nil -} - -func (r *clientImpl) PutImageManifest(name, tag string, manifest *manifest.SignedManifest) error { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(manifest.Raw)) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted: - return nil - case response.StatusCode >= 400 && response.StatusCode < 500: - var errors v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) - if err != nil { - return err - } - - return &errors - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) DeleteImage(name, tag string) error { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return err - } - - deleteRequest, err := http.NewRequest("DELETE", manifestURL, nil) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(deleteRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - break - case response.StatusCode == http.StatusNotFound: - return &ImageManifestNotFoundError{Name: name, Tag: tag} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } - - return nil -} - -func (r *clientImpl) ListImageTags(name string) ([]string, error) { - tagsURL, err := r.ub.BuildTagsURL(name) - if err != nil { - return nil, err - } - - response, err := http.Get(tagsURL) - if err != nil { - return nil, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - break - case response.StatusCode == http.StatusNotFound: - return nil, &RepositoryNotFoundError{Name: name} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, err - } - return nil, &errs - default: - return nil, &UnexpectedHTTPStatusError{Status: response.Status} - } - - tags := struct { - Tags []string `json:"tags"` - }{} - - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&tags) - if err != nil { - return nil, err - } - - return tags.Tags, nil -} - -func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { - blobURL, err := r.ub.BuildBlobURL(name, dgst) - if err != nil { - return -1, err - } - - response, err := http.Head(blobURL) - if err != nil { - return -1, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return -1, err - } - return int(length), nil - case response.StatusCode == http.StatusNotFound: - return -1, nil - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return -1, err - } - return -1, &errs - default: - return -1, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { - blobURL, err := r.ub.BuildBlobURL(name, dgst) - if err != nil { - return nil, 0, err - } - - getRequest, err := http.NewRequest("GET", blobURL, nil) - if err != nil { - return nil, 0, err - } - - getRequest.Header.Add("Range", fmt.Sprintf("%d-", byteOffset)) - response, err := http.DefaultClient.Do(getRequest) - if err != nil { - return nil, 0, err - } - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 0) - if err != nil { - return nil, 0, err - } - return response.Body, int(length), nil - case response.StatusCode == http.StatusNotFound: - response.Body.Close() - return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, 0, err - } - return nil, 0, &errs - default: - response.Body.Close() - return nil, 0, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { - uploadURL, err := r.ub.BuildBlobUploadURL(name) - if err != nil { - return "", err - } - - postRequest, err := http.NewRequest("POST", uploadURL, nil) - if err != nil { - return "", err - } - - response, err := http.DefaultClient.Do(postRequest) - if err != nil { - return "", err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusAccepted: - return response.Header.Get("Location"), nil - // case response.StatusCode == http.StatusNotFound: - // return - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return "", err - } - return "", &errs - default: - return "", &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { - response, err := http.Get(location) - if err != nil { - return 0, 0, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - return parseRangeHeader(response.Header.Get("Range")) - case response.StatusCode == http.StatusNotFound: - return 0, 0, &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return 0, 0, err - } - return 0, 0, &errs - default: - return 0, 0, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error { - defer blob.Close() - - putRequest, err := http.NewRequest("PUT", location, blob) - if err != nil { - return err - } - - values := putRequest.URL.Query() - values.Set("digest", dgst.String()) - putRequest.URL.RawQuery = values.Encode() - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", fmt.Sprint(length)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusCreated: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error { - defer blobChunk.Close() - - putRequest, err := http.NewRequest("PUT", location, blobChunk) - if err != nil { - return err - } - - endByte := startByte + length - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", fmt.Sprint(length)) - putRequest.Header.Set("Content-Range", - fmt.Sprintf("%d-%d/%d", startByte, endByte, endByte)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusAccepted: - return nil - case response.StatusCode == http.StatusRequestedRangeNotSatisfiable: - lastValidRange, blobSize, err := parseRangeHeader(response.Header.Get("Range")) - if err != nil { - return err - } - return &BlobUploadInvalidRangeError{ - Location: location, - LastValidRange: lastValidRange, - BlobSize: blobSize, - } - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error { - putRequest, err := http.NewRequest("PUT", location, nil) - if err != nil { - return err - } - - values := putRequest.URL.Query() - values.Set("digest", dgst.String()) - putRequest.URL.RawQuery = values.Encode() - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", "0") - putRequest.Header.Set("Content-Range", - fmt.Sprintf("%d-%d/%d", length, length, length)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusCreated: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) CancelBlobUpload(location string) error { - deleteRequest, err := http.NewRequest("DELETE", location, nil) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(deleteRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -// parseRangeHeader parses out the offset and length from a returned Range -// header -func parseRangeHeader(byteRangeHeader string) (int, int, error) { - submatches := patternRangeHeader.FindStringSubmatch(byteRangeHeader) - if submatches == nil || len(submatches) < 3 { - return 0, 0, fmt.Errorf("Malformed Range header") - } - - offset, err := strconv.Atoi(submatches[1]) - if err != nil { - return 0, 0, err - } - length, err := strconv.Atoi(submatches[2]) - if err != nil { - return 0, 0, err - } - return offset, length, nil -} - -blob -mark :128 -data 10763 -package client - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "sync" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/testutil" -) - -type testBlob struct { - digest digest.Digest - contents []byte -} - -func TestRangeHeaderParser(t *testing.T) { - const ( - malformedRangeHeader = "bytes=0-A/C" - emptyRangeHeader = "" - rFirst = 100 - rSecond = 200 - ) - - var ( - wellformedRangeHeader = fmt.Sprintf("bytes=0-%d/%d", rFirst, rSecond) - ) - - if _, _, err := parseRangeHeader(malformedRangeHeader); err == nil { - t.Fatalf("malformedRangeHeader: error expected, got nil") - } - - if _, _, err := parseRangeHeader(emptyRangeHeader); err == nil { - t.Fatalf("emptyRangeHeader: error expected, got nil") - } - - first, second, err := parseRangeHeader(wellformedRangeHeader) - if err != nil { - t.Fatalf("wellformedRangeHeader: unexpected error %v", err) - } - - if first != rFirst || second != rSecond { - t.Fatalf("Range has been parsed unproperly: %d/%d", first, second) - } - -} - -func TestPush(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - uploadLocations := make([]string, len(testBlobs)) - blobs := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, blob := range testBlobs { - // TODO(bbland): this is returning the same location for all uploads, - // because we can't know which blob will get which location. - // It's sort of okay because we're using unique digests, but this needs - // to change at some point. - uploadLocations[i] = fmt.Sprintf("/v2/%s/blobs/test-uuid", name) - blobs[i] = manifest.FSLayer{BlobSum: blob.digest} - history[i] = manifest.History{V1Compatibility: blob.digest.String()} - } - - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - }, - } - var err error - m.Raw, err = json.Marshal(m) - - blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) - for i, blob := range testBlobs { - blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + name + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Location": {uploadLocations[i]}, - }), - }, - } - blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: uploadLocations[i], - QueryParams: map[string][]string{ - "digest": {blob.digest.String()}, - }, - Body: blob.contents, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - }, - } - } - - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + name + "/manifests/" + tag, - Body: m.Raw, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - }, - })) - var server *httptest.Server - - // HACK(stevvooe): Super hack to follow: the request response map approach - // above does not let us correctly format the location header to the - // server url. This handler intercepts and re-writes the location header - // to the server url. - - hack := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w = &headerInterceptingResponseWriter{ResponseWriter: w, serverURL: server.URL} - handler.ServeHTTP(w, r) - }) - - server = httptest.NewServer(hack) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - writer, err := l.Writer() - if err != nil { - t.Fatal(err) - } - - writer.SetSize(len(blob.contents)) - writer.Write(blob.contents) - writer.Close() - } - - objectStore.WriteManifest(name, tag, m) - - err = Push(client, objectStore, name, tag) - if err != nil { - t.Fatal(err) - } -} - -func TestPull(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - blobs := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, blob := range testBlobs { - blobs[i] = manifest.FSLayer{BlobSum: blob.digest} - history[i] = manifest.History{V1Compatibility: blob.digest.String()} - } - - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - }, - } - manifestBytes, err := json.Marshal(m) - - blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs)) - for i, blob := range testBlobs { - blobRequestResponseMappings[i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents, - }, - } - } - - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifests/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, - })) - server := httptest.NewServer(handler) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - err = Pull(client, objectStore, name, tag) - if err != nil { - t.Fatal(err) - } - - m, err = objectStore.Manifest(name, tag) - if err != nil { - t.Fatal(err) - } - - mBytes, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - - if string(mBytes) != string(manifestBytes) { - t.Fatal("Incorrect manifest") - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - reader, err := l.Reader() - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - blobBytes, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err) - } - - if string(blobBytes) != string(blob.contents) { - t.Fatal("Incorrect blob") - } - } -} - -func TestPullResume(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - layers := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, layer := range testBlobs { - layers[i] = manifest.FSLayer{BlobSum: layer.digest} - history[i] = manifest.History{V1Compatibility: layer.digest.String()} - } - - m := &manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: layers, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - } - manifestBytes, err := json.Marshal(m) - - layerRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) - for i, blob := range testBlobs { - layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents[:len(blob.contents)/2], - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(blob.contents))}, - }), - }, - } - layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents[len(blob.contents)/2:], - }, - } - } - - for i := 0; i < 3; i++ { - layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifests/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, - }) - } - - handler := testutil.NewHandler(layerRequestResponseMappings) - server := httptest.NewServer(handler) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - for attempts := 0; attempts < 3; attempts++ { - err = Pull(client, objectStore, name, tag) - if err == nil { - break - } - } - - if err != nil { - t.Fatal(err) - } - - sm, err := objectStore.Manifest(name, tag) - if err != nil { - t.Fatal(err) - } - - mBytes, err := json.Marshal(sm) - if err != nil { - t.Fatal(err) - } - - if string(mBytes) != string(manifestBytes) { - t.Fatal("Incorrect manifest") - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - reader, err := l.Reader() - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - layerBytes, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err) - } - - if string(layerBytes) != string(blob.contents) { - t.Fatal("Incorrect blob") - } - } -} - -// headerInterceptingResponseWriter is a hacky workaround to re-write the -// location header to have the server url. -type headerInterceptingResponseWriter struct { - http.ResponseWriter - serverURL string -} - -func (hirw *headerInterceptingResponseWriter) WriteHeader(status int) { - location := hirw.Header().Get("Location") - if location != "" { - hirw.Header().Set("Location", hirw.serverURL+location) - } - - hirw.ResponseWriter.WriteHeader(status) -} - -blob -mark :129 -data 2282 -package client - -import ( - "fmt" - - "github.com/docker/distribution/digest" -) - -// RepositoryNotFoundError is returned when making an operation against a -// repository that does not exist in the registry. -type RepositoryNotFoundError struct { - Name string -} - -func (e *RepositoryNotFoundError) Error() string { - return fmt.Sprintf("No repository found with Name: %s", e.Name) -} - -// ImageManifestNotFoundError is returned when making an operation against a -// given image manifest that does not exist in the registry. -type ImageManifestNotFoundError struct { - Name string - Tag string -} - -func (e *ImageManifestNotFoundError) Error() string { - return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", - e.Name, e.Tag) -} - -// BlobNotFoundError is returned when making an operation against a given image -// layer that does not exist in the registry. -type BlobNotFoundError struct { - Name string - Digest digest.Digest -} - -func (e *BlobNotFoundError) Error() string { - return fmt.Sprintf("No blob found with Name: %s, Digest: %s", - e.Name, e.Digest) -} - -// BlobUploadNotFoundError is returned when making a blob upload operation against an -// invalid blob upload location url. -// This may be the result of using a cancelled, completed, or stale upload -// location. -type BlobUploadNotFoundError struct { - Location string -} - -func (e *BlobUploadNotFoundError) Error() string { - return fmt.Sprintf("No blob upload found at Location: %s", e.Location) -} - -// BlobUploadInvalidRangeError is returned when attempting to upload an image -// blob chunk that is out of order. -// This provides the known BlobSize and LastValidRange which can be used to -// resume the upload. -type BlobUploadInvalidRangeError struct { - Location string - LastValidRange int - BlobSize int -} - -func (e *BlobUploadInvalidRangeError) Error() string { - return fmt.Sprintf( - "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", - e.Location, e.LastValidRange, e.BlobSize) -} - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) -} - -blob -mark :130 -data 5983 -package client - -import ( - "bytes" - "fmt" - "io" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -var ( - // ErrLayerAlreadyExists is returned when attempting to create a layer with - // a tarsum that is already in use. - ErrLayerAlreadyExists = fmt.Errorf("Layer already exists") - - // ErrLayerLocked is returned when attempting to write to a layer which is - // currently being written to. - ErrLayerLocked = fmt.Errorf("Layer locked") -) - -// ObjectStore is an interface which is designed to approximate the docker -// engine storage. This interface is subject to change to conform to the -// future requirements of the engine. -type ObjectStore interface { - // Manifest retrieves the image manifest stored at the given repository name - // and tag - Manifest(name, tag string) (*manifest.SignedManifest, error) - - // WriteManifest stores an image manifest at the given repository name and - // tag - WriteManifest(name, tag string, manifest *manifest.SignedManifest) error - - // Layer returns a handle to a layer for reading and writing - Layer(dgst digest.Digest) (Layer, error) -} - -// Layer is a generic image layer interface. -// A Layer may not be written to if it is already complete. -type Layer interface { - // Reader returns a LayerReader or an error if the layer has not been - // written to or is currently being written to. - Reader() (LayerReader, error) - - // Writer returns a LayerWriter or an error if the layer has been fully - // written to or is currently being written to. - Writer() (LayerWriter, error) - - // Wait blocks until the Layer can be read from. - Wait() error -} - -// LayerReader is a read-only handle to a Layer, which exposes the CurrentSize -// and full Size in addition to implementing the io.ReadCloser interface. -type LayerReader interface { - io.ReadCloser - - // CurrentSize returns the number of bytes written to the underlying Layer - CurrentSize() int - - // Size returns the full size of the underlying Layer - Size() int -} - -// LayerWriter is a write-only handle to a Layer, which exposes the CurrentSize -// and full Size in addition to implementing the io.WriteCloser interface. -// SetSize must be called on this LayerWriter before it can be written to. -type LayerWriter interface { - io.WriteCloser - - // CurrentSize returns the number of bytes written to the underlying Layer - CurrentSize() int - - // Size returns the full size of the underlying Layer - Size() int - - // SetSize sets the full size of the underlying Layer. - // This must be called before any calls to Write - SetSize(int) error -} - -// memoryObjectStore is an in-memory implementation of the ObjectStore interface -type memoryObjectStore struct { - mutex *sync.Mutex - manifestStorage map[string]*manifest.SignedManifest - layerStorage map[digest.Digest]Layer -} - -func (objStore *memoryObjectStore) Manifest(name, tag string) (*manifest.SignedManifest, error) { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - manifest, ok := objStore.manifestStorage[name+":"+tag] - if !ok { - return nil, fmt.Errorf("No manifest found with Name: %q, Tag: %q", name, tag) - } - return manifest, nil -} - -func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *manifest.SignedManifest) error { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - objStore.manifestStorage[name+":"+tag] = manifest - return nil -} - -func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - layer, ok := objStore.layerStorage[dgst] - if !ok { - layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} - objStore.layerStorage[dgst] = layer - } - - return layer, nil -} - -type memoryLayer struct { - cond *sync.Cond - contents []byte - expectedSize int - writing bool -} - -func (ml *memoryLayer) Reader() (LayerReader, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents == nil { - return nil, fmt.Errorf("Layer has not been written to yet") - } - if ml.writing { - return nil, ErrLayerLocked - } - - return &memoryLayerReader{ml: ml, reader: bytes.NewReader(ml.contents)}, nil -} - -func (ml *memoryLayer) Writer() (LayerWriter, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents != nil { - if ml.writing { - return nil, ErrLayerLocked - } - if ml.expectedSize == len(ml.contents) { - return nil, ErrLayerAlreadyExists - } - } else { - ml.contents = make([]byte, 0) - } - - ml.writing = true - return &memoryLayerWriter{ml: ml, buffer: bytes.NewBuffer(ml.contents)}, nil -} - -func (ml *memoryLayer) Wait() error { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents == nil { - return fmt.Errorf("No writer to wait on") - } - - for ml.writing { - ml.cond.Wait() - } - - return nil -} - -type memoryLayerReader struct { - ml *memoryLayer - reader *bytes.Reader -} - -func (mlr *memoryLayerReader) Read(p []byte) (int, error) { - return mlr.reader.Read(p) -} - -func (mlr *memoryLayerReader) Close() error { - return nil -} - -func (mlr *memoryLayerReader) CurrentSize() int { - return len(mlr.ml.contents) -} - -func (mlr *memoryLayerReader) Size() int { - return mlr.ml.expectedSize -} - -type memoryLayerWriter struct { - ml *memoryLayer - buffer *bytes.Buffer -} - -func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { - if mlw.ml.expectedSize == 0 { - return 0, fmt.Errorf("Must set size before writing to layer") - } - wrote, err := mlw.buffer.Write(p) - mlw.ml.contents = mlw.buffer.Bytes() - return wrote, err -} - -func (mlw *memoryLayerWriter) Close() error { - mlw.ml.cond.L.Lock() - defer mlw.ml.cond.L.Unlock() - - return mlw.close() -} - -func (mlw *memoryLayerWriter) close() error { - mlw.ml.writing = false - mlw.ml.cond.Broadcast() - return nil -} - -func (mlw *memoryLayerWriter) CurrentSize() int { - return len(mlw.ml.contents) -} - -func (mlw *memoryLayerWriter) Size() int { - return mlw.ml.expectedSize -} - -func (mlw *memoryLayerWriter) SetSize(size int) error { - if !mlw.ml.writing { - return fmt.Errorf("Layer is closed for writing") - } - mlw.ml.expectedSize = size - return nil -} - -blob -mark :131 -data 4131 -package client - -import ( - "fmt" - "io" - - log "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/manifest" -) - -// simultaneousLayerPullWindow is the size of the parallel layer pull window. -// A layer may not be pulled until the layer preceeding it by the length of the -// pull window has been successfully pulled. -const simultaneousLayerPullWindow = 4 - -// Pull implements a client pull workflow for the image defined by the given -// name and tag pair, using the given ObjectStore for local manifest and layer -// storage -func Pull(c Client, objectStore ObjectStore, name, tag string) error { - manifest, err := c.GetImageManifest(name, tag) - if err != nil { - return err - } - log.WithField("manifest", manifest).Info("Pulled manifest") - - if len(manifest.FSLayers) != len(manifest.History) { - return fmt.Errorf("Length of history not equal to number of layers") - } - if len(manifest.FSLayers) == 0 { - return fmt.Errorf("Image has no layers") - } - - errChans := make([]chan error, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - errChans[i] = make(chan error) - } - - // To avoid leak of goroutines we must notify - // pullLayer goroutines about a cancelation, - // otherwise they will lock forever. - cancelCh := make(chan struct{}) - - // Iterate over each layer in the manifest, simultaneously pulling no more - // than simultaneousLayerPullWindow layers at a time. If an error is - // received from a layer pull, we abort the push. - for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ { - dependentLayer := i - simultaneousLayerPullWindow - if dependentLayer >= 0 { - err := <-errChans[dependentLayer] - if err != nil { - log.WithField("error", err).Warn("Pull aborted") - close(cancelCh) - return err - } - } - - if i < len(manifest.FSLayers) { - go func(i int) { - select { - case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]): - case <-cancelCh: // no chance to recv until cancelCh's closed - } - }(i) - } - } - - err = objectStore.WriteManifest(name, tag, manifest) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "manifest": manifest, - }).Warn("Unable to write image manifest") - return err - } - - return nil -} - -func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { - log.WithField("layer", fsLayer).Info("Pulling layer") - - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - - layerWriter, err := layer.Writer() - if err == ErrLayerAlreadyExists { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - if err == ErrLayerLocked { - log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") - layer.Wait() - return nil - } - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - defer layerWriter.Close() - - if layerWriter.CurrentSize() > 0 { - log.WithFields(log.Fields{ - "layer": fsLayer, - "currentSize": layerWriter.CurrentSize(), - "size": layerWriter.Size(), - }).Info("Layer partially downloaded, resuming") - } - - layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize()) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - defer layerReader.Close() - - layerWriter.SetSize(layerWriter.CurrentSize() + length) - - _, err = io.Copy(layerWriter, layerReader) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - if layerWriter.CurrentSize() != layerWriter.Size() { - log.WithFields(log.Fields{ - "size": layerWriter.Size(), - "currentSize": layerWriter.CurrentSize(), - "layer": fsLayer, - }).Warn("Layer invalid size") - return fmt.Errorf( - "Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d", - fsLayer, layerWriter.Size(), layerWriter.CurrentSize(), - ) - } - return nil -} - -blob -mark :132 -data 3514 -package client - -import ( - "fmt" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/manifest" -) - -// simultaneousLayerPushWindow is the size of the parallel layer push window. -// A layer may not be pushed until the layer preceeding it by the length of the -// push window has been successfully pushed. -const simultaneousLayerPushWindow = 4 - -type pushFunction func(fsLayer manifest.FSLayer) error - -// Push implements a client push workflow for the image defined by the given -// name and tag pair, using the given ObjectStore for local manifest and layer -// storage -func Push(c Client, objectStore ObjectStore, name, tag string) error { - manifest, err := objectStore.Manifest(name, tag) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "name": name, - "tag": tag, - }).Info("No image found") - return err - } - - errChans := make([]chan error, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - errChans[i] = make(chan error) - } - - cancelCh := make(chan struct{}) - - // Iterate over each layer in the manifest, simultaneously pushing no more - // than simultaneousLayerPushWindow layers at a time. If an error is - // received from a layer push, we abort the push. - for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ { - dependentLayer := i - simultaneousLayerPushWindow - if dependentLayer >= 0 { - err := <-errChans[dependentLayer] - if err != nil { - log.WithField("error", err).Warn("Push aborted") - close(cancelCh) - return err - } - } - - if i < len(manifest.FSLayers) { - go func(i int) { - select { - case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]): - case <-cancelCh: // recv broadcast notification about cancelation - } - }(i) - } - } - - err = c.PutImageManifest(name, tag, manifest) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "manifest": manifest, - }).Warn("Unable to upload manifest") - return err - } - - return nil -} - -func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { - log.WithField("layer", fsLayer).Info("Pushing layer") - - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - - layerReader, err := layer.Reader() - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - defer layerReader.Close() - - if layerReader.CurrentSize() != layerReader.Size() { - log.WithFields(log.Fields{ - "layer": fsLayer, - "currentSize": layerReader.CurrentSize(), - "size": layerReader.Size(), - }).Warn("Local layer incomplete") - return fmt.Errorf("Local layer incomplete") - } - - length, err := c.BlobLength(name, fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to check existence of remote layer") - return err - } - if length >= 0 { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - - location, err := c.InitiateBlobUpload(name) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - err = c.UploadBlob(location, layerReader, int(layerReader.CurrentSize()), fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - return nil -} - -blob -mark :133 -data 115 -// Package registry is a placeholder package for registry interface -// definitions and utilities. -package registry - -blob -mark :134 -data 20920 -package handlers - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "os" - "path" - "reflect" - "strings" - "testing" - - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" - "github.com/gorilla/handlers" - "golang.org/x/net/context" -) - -// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified -// 200 OK response. -func TestCheckAPI(t *testing.T) { - env := newTestEnv(t) - - baseURL, err := env.builder.BuildBaseURL() - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - resp, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing api base check", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, - "Content-Length": []string{"2"}, - }) - - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected error reading response body: %v", err) - } - - if string(p) != "{}" { - t.Fatalf("unexpected response body: %v", string(p)) - } -} - -func TestURLPrefix(t *testing.T) { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - } - config.HTTP.Prefix = "/test/" - - env := newTestEnvWithConfig(t, &config) - - baseURL, err := env.builder.BuildBaseURL() - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - parsed, _ := url.Parse(baseURL) - if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { - t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) - } - - resp, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing api base check", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, - "Content-Length": []string{"2"}, - }) - -} - -// TestLayerAPI conducts a full of the of the layer api. -func TestLayerAPI(t *testing.T) { - // TODO(stevvooe): This test code is complete junk but it should cover the - // complete flow. This must be broken down and checked against the - // specification *before* we submit the final to docker core. - env := newTestEnv(t) - - imageName := "foo/bar" - // "build" our layer file - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer file: %v", err) - } - - layerDigest := digest.Digest(tarSumStr) - - // ----------------------------------- - // Test fetch for non-existent content - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) - if err != nil { - t.Fatalf("error building url: %v", err) - } - - resp, err := http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } - - checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) - - // ------------------------------------------ - // Test head request for non-existent content - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on non-existent layer: %v", err) - } - - checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) - - // ------------------------------------------ - // Start an upload, check the status then cancel - uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) - - // A status check should work - resp, err = http.Get(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error getting upload status: %v", err) - } - checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Range": []string{"0-0"}, - "Docker-Upload-UUID": []string{uploadUUID}, - }) - - req, err := http.NewRequest("DELETE", uploadURLBase, nil) - if err != nil { - t.Fatalf("unexpected error creating delete request: %v", err) - } - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error sending delete request: %v", err) - } - - checkResponse(t, "deleting upload", resp, http.StatusNoContent) - - // A status check should result in 404 - resp, err = http.Get(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error getting upload status: %v", err) - } - checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) - - // ----------------------------------------- - // Do layer push with an empty body and different digest - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("unexpected error doing bad layer push: %v", err) - } - - checkResponse(t, "bad layer push", resp, http.StatusBadRequest) - checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) - - // ----------------------------------------- - // Do layer push with an empty body and correct digest - zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("unexpected error digesting empty buffer: %v", err) - } - - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) - - // ----------------------------------------- - // Do layer push with an empty body and correct digest - - // This is a valid but empty tarfile! - emptyTar := bytes.Repeat([]byte("\x00"), 1024) - emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) - if err != nil { - t.Fatalf("unexpected error digesting empty tar: %v", err) - } - - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) - - // ------------------------------------------ - // Now, actually do successful upload. - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - layerFile.Seek(0, os.SEEK_SET) - - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - // ------------------------ - // Use a head request to see if the layer exists. - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - checkResponse(t, "checking head on existing layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, - }) - - // ---------------- - // Fetch the layer! - resp, err = http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, - }) - - // Verify the body - verifier, err := digest.NewDigestVerifier(layerDigest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - io.Copy(verifier, resp.Body) - - if !verifier.Verified() { - t.Fatalf("response body did not pass verification") - } - - // Missing tests: - // - Upload the same tarsum file under and different repository and - // ensure the content remains uncorrupted. -} - -func TestManifestAPI(t *testing.T) { - env := newTestEnv(t) - - imageName := "foo/bar" - tag := "thetag" - - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - // ----------------------------- - // Attempt to fetch the manifest - resp, err := http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error getting manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) - - tagsURL, err := env.builder.BuildTagsURL(imageName) - if err != nil { - t.Fatalf("unexpected error building tags url: %v", err) - } - - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) - - // -------------------------------- - // Attempt to push unsigned manifest with missing layers - unsignedManifest := &manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: imageName, - Tag: tag, - FSLayers: []manifest.FSLayer{ - { - BlobSum: "asdf", - }, - { - BlobSum: "qwer", - }, - }, - } - - resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) - defer resp.Body.Close() - checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, - v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) - - expectedCounts := map[v2.ErrorCode]int{ - v2.ErrorCodeManifestUnverified: 1, - v2.ErrorCodeBlobUnknown: 2, - v2.ErrorCodeDigestInvalid: 2, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // TODO(stevvooe): Add a test case where we take a mostly valid registry, - // tamper with the content and ensure that we get a unverified manifest - // error. - - // Push 2 random layers - expectedLayers := make(map[digest.Digest]io.ReadSeeker) - - for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random layer %d: %v", i, err) - } - dgst := digest.Digest(dgstStr) - - expectedLayers[dgst] = rs - unsignedManifest.FSLayers[i].BlobSum = dgst - - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) - } - - // ------------------- - // Push the signed manifest with all layers pushed. - signedManifest, err := manifest.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") - - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") - - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) - checkErr(t, err, "building manifest url") - - resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // -------------------- - // Push by digest -- should get same result - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // ------------------ - // Fetch by tag name - resp, err = http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - }) - - var fetchedManifest manifest.SignedManifest - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { - t.Fatalf("manifests do not match") - } - - // --------------- - // Fetch by digest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - }) - - var fetchedManifestByDigest manifest.SignedManifest - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifestByDigest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if !bytes.Equal(fetchedManifestByDigest.Raw, signedManifest.Raw) { - t.Fatalf("manifests do not match") - } - - // Ensure that the tag is listed. - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) - dec = json.NewDecoder(resp.Body) - - var tagsResponse tagsAPIResponse - - if err := dec.Decode(&tagsResponse); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if tagsResponse.Name != imageName { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) - } - - if len(tagsResponse.Tags) != 1 { - t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) - } - - if tagsResponse.Tags[0] != tag { - t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) - } -} - -type testEnv struct { - pk libtrust.PrivateKey - ctx context.Context - config configuration.Configuration - app *App - server *httptest.Server - builder *v2.URLBuilder -} - -func newTestEnv(t *testing.T) *testEnv { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - } - - return newTestEnvWithConfig(t, &config) -} - -func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { - ctx := context.Background() - - app := NewApp(ctx, *config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) - - if err != nil { - t.Fatalf("error creating url builder: %v", err) - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - return &testEnv{ - pk: pk, - ctx: ctx, - config: *config, - app: app, - server: server, - builder: builder, - } -} - -func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { - var body []byte - if sm, ok := v.(*manifest.SignedManifest); ok { - body = sm.Raw - } else { - var err error - body, err = json.MarshalIndent(v, "", " ") - if err != nil { - t.Fatalf("unexpected error marshaling %v: %v", v, err) - } - } - - req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) - if err != nil { - t.Fatalf("error creating request for %s: %v", msg, err) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("error doing put request while %s: %v", msg, err) - } - - return resp -} - -func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location string, uuid string) { - layerUploadURL, err := ub.BuildBlobUploadURL(name) - if err != nil { - t.Fatalf("unexpected error building layer upload url: %v", err) - } - - resp, err := http.Post(layerUploadURL, "", nil) - if err != nil { - t.Fatalf("unexpected error starting layer push: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) - - u, err := url.Parse(resp.Header.Get("Location")) - if err != nil { - t.Fatalf("error parsing location header: %v", err) - } - - uuid = path.Base(u.Path) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Content-Length": []string{"0"}, - "Docker-Upload-UUID": []string{uuid}, - }) - - return resp.Header.Get("Location"), uuid -} - -// doPushLayer pushes the layer content returning the url on success returning -// the response. If you're only expecting a successful response, use pushLayer. -func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { - u, err := url.Parse(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error parsing pushLayer url: %v", err) - } - - u.RawQuery = url.Values{ - "_state": u.Query()["_state"], - - "digest": []string{dgst.String()}, - }.Encode() - - uploadURL := u.String() - - // Just do a monolithic upload - req, err := http.NewRequest("PUT", uploadURL, body) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - - return http.DefaultClient.Do(req) -} - -// pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.NewCanonicalDigester() - - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, &digester)) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - - if err != nil { - t.Fatalf("error generating sha256 digest of body") - } - - sha256Dgst := digester.Digest() - - expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst) - if err != nil { - t.Fatalf("error building expected layer url: %v", err) - } - - checkHeaders(t, resp, http.Header{ - "Location": []string{expectedLayerURL}, - "Content-Length": []string{"0"}, - "Docker-Content-Digest": []string{sha256Dgst.String()}, - }) - - return resp.Header.Get("Location") -} - -func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { - if resp.StatusCode != expectedStatus { - t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) - maybeDumpResponse(t, resp) - - t.FailNow() - } -} - -// checkBodyHasErrorCodes ensures the body is an error body and has the -// expected error codes, returning the error structure, the json slice and a -// count of the errors by code. -func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...v2.ErrorCode) (v2.Errors, []byte, map[v2.ErrorCode]int) { - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected error reading body %s: %v", msg, err) - } - - var errs v2.Errors - if err := json.Unmarshal(p, &errs); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if len(errs.Errors) == 0 { - t.Fatalf("expected errors in response") - } - - // TODO(stevvooe): Shoot. The error setup is not working out. The content- - // type headers are being set after writing the status code. - // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { - // t.Fatalf("unexpected content type: %v != 'application/json'", - // resp.Header.Get("Content-Type")) - // } - - expected := map[v2.ErrorCode]struct{}{} - counts := map[v2.ErrorCode]int{} - - // Initialize map with zeros for expected - for _, code := range errorCodes { - expected[code] = struct{}{} - counts[code] = 0 - } - - for _, err := range errs.Errors { - if _, ok := expected[err.Code]; !ok { - t.Fatalf("unexpected error code %v encountered during %s: %s ", err.Code, msg, string(p)) - } - counts[err.Code]++ - } - - // Ensure that counts of expected errors were all non-zero - for code := range expected { - if counts[code] == 0 { - t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) - } - } - - return errs, p, counts -} - -func maybeDumpResponse(t *testing.T, resp *http.Response) { - if d, err := httputil.DumpResponse(resp, true); err != nil { - t.Logf("error dumping response: %v", err) - } else { - t.Logf("response:\n%s", string(d)) - } -} - -// matchHeaders checks that the response has at least the headers. If not, the -// test will fail. If a passed in header value is "*", any non-zero value will -// suffice as a match. -func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { - for k, vs := range headers { - if resp.Header.Get(k) == "" { - t.Fatalf("response missing header %q", k) - } - - for _, v := range vs { - if v == "*" { - // Just ensure there is some value. - if len(resp.Header[k]) > 0 { - continue - } - } - - for _, hv := range resp.Header[k] { - if hv != v { - t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v) - } - } - } - } -} - -func checkErr(t *testing.T, err error, msg string) { - if err != nil { - t.Fatalf("unexpected error %s: %v", msg, err) - } -} - -blob -mark :135 -data 19258 -package handlers - -import ( - "expvar" - "fmt" - "math/rand" - "net" - "net/http" - "os" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/notifications" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - registrymiddleware "github.com/docker/distribution/registry/middleware/registry" - repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/garyburd/redigo/redis" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// App is a global registry application object. Shared resources can be placed -// on this object that will be accessible from all requests. Any writable -// fields should be protected. -type App struct { - context.Context - - Config configuration.Configuration - - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application - - // events contains notification related configuration. - events struct { - sink notifications.Sink - source notifications.SourceRecord - } - - redis *redis.Pool -} - -// NewApp takes a configuration and returns a configured app, ready to serve -// requests. The app only implements ServeHTTP and can be wrapped in other -// handlers accordingly. -func NewApp(ctx context.Context, configuration configuration.Configuration) *App { - app := &App{ - Config: configuration, - Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - } - - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) - - // Register the handler dispatchers. - app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { - return http.HandlerFunc(apiBase) - }) - app.register(v2.RouteNameManifest, imageManifestDispatcher) - app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, layerDispatcher) - app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) - - var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - - if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. - panic(err) - } - - startUploadPurger(app.driver, ctxu.GetLogger(app)) - - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) - if err != nil { - panic(err) - } - - app.configureEvents(&configuration) - app.configureRedis(&configuration) - - // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { - switch cc["layerinfo"] { - case "redis": - if app.redis == nil { - panic("redis configuration required to use for layerinfo cache") - } - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) - ctxu.GetLogger(app).Infof("using redis layerinfo cache") - case "inmemory": - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) - ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") - default: - if cc["layerinfo"] != "" { - ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) - } - } - } - - if app.registry == nil { - // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.driver, nil) - } - - app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) - if err != nil { - panic(err) - } - - authType := configuration.Auth.Type() - - if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) - if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) - } - app.accessController = accessController - } - - return app -} - -// register a handler with the application, by route name. The handler will be -// passed through the application filters and context will be constructed at -// request time. -func (app *App) register(routeName string, dispatch dispatchFunc) { - - // TODO(stevvooe): This odd dispatcher/route registration is by-product of - // some limitations in the gorilla/mux router. We are using it to keep - // routing consistent between the client and server, but we may want to - // replace it with manual routing and structure-based dispatch for better - // control over the request execution. - - app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) -} - -// configureEvents prepares the event sink for action. -func (app *App) configureEvents(configuration *configuration.Configuration) { - // Configure all of the endpoint sinks. - var sinks []notifications.Sink - for _, endpoint := range configuration.Notifications.Endpoints { - if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) - continue - } - - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) - endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ - Timeout: endpoint.Timeout, - Threshold: endpoint.Threshold, - Backoff: endpoint.Backoff, - Headers: endpoint.Headers, - }) - - sinks = append(sinks, endpoint) - } - - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as - // replacing broadcaster with a rabbitmq implementation. It's recommended - // that the registry instances also act as the workers to keep deployment - // simple. - app.events.sink = notifications.NewBroadcaster(sinks...) - - // Populate registry event source - hostname, err := os.Hostname() - if err != nil { - hostname = configuration.HTTP.Addr - } else { - // try to pick the port off the config - _, port, err := net.SplitHostPort(configuration.HTTP.Addr) - if err == nil { - hostname = net.JoinHostPort(hostname, port) - } - } - - app.events.source = notifications.SourceRecord{ - Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), - } -} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") - return - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) - - done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, "redis.connect.startedat")) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.DialTimeout("tcp", - configuration.Redis.Addr, - configuration.Redis.DialTimeout, - configuration.Redis.ReadTimeout, - configuration.Redis.WriteTimeout) - if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - if configuration.Redis.Password != "" { - if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - app.redis = pool - - // setup expvar - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { - return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), - } - })) -} - -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() // ensure that request body is always closed. - - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) - defer func() { - ctxu.GetResponseLogger(ctx).Infof("response completed") - }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } - - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - -// dispatchFunc takes a context and request and returns a constructed handler -// for the route. The dispatcher will use this to dynamically create request -// specific handlers for each endpoint without creating a new router for each -// request. -type dispatchFunc func(ctx *Context, r *http.Request) http.Handler - -// TODO(stevvooe): dispatchers should probably have some validation error -// chain with proper error reporting. - -// dispatcher returns a handler that constructs a request specific context and -// handler, using the dispatch factory function. -func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - context := app.context(w, r) - - if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) - return - } - - // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) - - if app.nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) - - if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) - - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - context.Errors.Push(v2.ErrorCodeNameUnknown, err) - case distribution.ErrRepositoryNameInvalid: - context.Errors.Push(v2.ErrorCodeNameInvalid, err) - } - - w.WriteHeader(http.StatusBadRequest) - serveJSON(w, context.Errors) - return - } - - // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - repository, - app.eventBridge(context, r)) - - context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) - if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - serveJSON(w, context.Errors) - return - } - } - - dispatch(context, r).ServeHTTP(w, r) - - // Automated error response handling here. Handlers may return their - // own errors if they need different behavior (such as range errors - // for layer upload). - if context.Errors.Len() > 0 { - if context.Value("http.response.status") == 0 { - // TODO(stevvooe): Getting this value from the context is a - // bit of a hack. We can further address with some of our - // future refactoring. - w.WriteHeader(http.StatusBadRequest) - } - serveJSON(w, context.Errors) - } - }) -} - -// context constructs the context object for the application. This only be -// called once per request. -func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, - "vars.name", - "vars.reference", - "vars.digest", - "vars.uuid")) - - context := &Context{ - App: app, - Context: ctx, - urlBuilder: v2.NewURLBuilderFromRequest(r), - } - - return context -} - -// authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the context may access the requested -// repository. An error will be returned if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { - ctxu.GetLogger(context).Debug("authorizing request") - repo := getName(context) - - if app.accessController == nil { - return nil // access controller is not enabled. - } - - var accessRecords []auth.Access - - if repo != "" { - accessRecords = appendAccessRecords(accessRecords, r.Method, repo) - } else { - // Only allow the name not to be set on the base route. - if app.nameRequired(r) { - // For this to be properly secured, repo must always be set for a - // resource that may make a modification. The only condition under - // which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making - // that mistake elsewhere in the code, allowing any operation to - // proceed. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusForbidden) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized) - serveJSON(w, errs) - return fmt.Errorf("forbidden: no repository name") - } - } - - ctx, err := app.accessController.Authorized(context.Context, accessRecords...) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - w.Header().Set("Content-Type", "application/json; charset=utf-8") - err.ServeHTTP(w, r) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized, accessRecords) - serveJSON(w, errs) - default: - // This condition is a potential security problem either in - // the configuration or whatever is backing the access - // controller. Just return a bad request with no information - // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - - return err - } - - // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context - // should be replaced by another, rather than replacing the context on a - // mutable object. - context.Context = ctx - return nil -} - -// eventBridge returns a bridge for the current request, configured with the -// correct actor and source. -func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - actor := notifications.ActorRecord{ - Name: getUserName(ctx, r), - } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) - - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) -} - -// nameRequired returns true if the route requires a name. -func (app *App) nameRequired(r *http.Request) bool { - route := mux.CurrentRoute(r) - return route == nil || route.GetName() != v2.RouteNameBase -} - -// apiBase implements a simple yes-man for doing overall checks against the -// api. This can support auth roundtrips to support docker login. -func apiBase(w http.ResponseWriter, r *http.Request) { - const emptyJSON = "{}" - // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) - - fmt.Fprint(w, emptyJSON) -} - -// appendAccessRecords checks the method and adds the appropriate Access records to the records list. -func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch method { - case "GET", "HEAD": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - records = append(records, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return records -} - -// applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { - for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) - if err != nil { - return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) - } - registry = rmw - } - return registry, nil - -} - -// applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { - for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, repository) - if err != nil { - return nil, err - } - repository = rmw - } - return repository, nil -} - -// applyStorageMiddleware wraps a storage driver with the configured middlewares -func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { - for _, mw := range middlewares { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) - if err != nil { - return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) - } - driver = smw - } - return driver, nil -} - -// startUploadPurger schedules a goroutine which will periodically -// check upload directories for old files and delete them -func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute - - // Start with reasonable defaults - // TODO:(richardscothern) make configurable - purgeAge := time.Duration(7 * 24 * time.Hour) - timeBetweenPurges := time.Duration(1 * 24 * time.Hour) - - go func() { - log.Infof("Starting upload purge in %s", jitter) - time.Sleep(jitter) - - for { - storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true) - log.Infof("Starting upload purge in %s", timeBetweenPurges) - time.Sleep(timeBetweenPurges) - } - }() - -} - -blob -mark :136 -data 7587 -package handlers - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - _ "github.com/docker/distribution/registry/auth/silly" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "golang.org/x/net/context" -) - -// TestAppDispatcher builds an application with a test dispatcher and ensures -// that requests are properly dispatched and the handlers are constructed. -// This only tests the dispatch mechanism. The underlying dispatchers must be -// tested individually. -func TestAppDispatcher(t *testing.T) { - driver := inmemory.New() - app := &App{ - Config: configuration.Configuration{}, - Context: context.Background(), - router: v2.Router(), - driver: driver, - registry: storage.NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()), - } - server := httptest.NewServer(app) - router := v2.Router() - - serverURL, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("error parsing server url: %v", err) - } - - varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { - return func(ctx *Context, r *http.Request) http.Handler { - // Always checks the same name context - if ctx.Repository.Name() != getName(ctx) { - t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") - } - - // Check that we have all that is expected - for expectedK, expectedV := range expectedVars { - if ctx.Value(expectedK) != expectedV { - t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) - } - } - - // Check that we only have variables that are expected - for k, v := range ctx.Value("vars").(map[string]string) { - _, ok := expectedVars[k] - - if !ok { // name is checked on context - // We have an unexpected key, fail - t.Fatalf("unexpected key %q in vars with value %q", k, v) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - } - } - - // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string - unflatten := func(vars []string) map[string]string { - m := make(map[string]string) - for i := 0; i < len(vars)-1; i = i + 2 { - m[vars[i]] = vars[i+1] - } - - return m - } - - for _, testcase := range []struct { - endpoint string - vars []string - }{ - { - endpoint: v2.RouteNameManifest, - vars: []string{ - "name", "foo/bar", - "reference", "sometag", - }, - }, - { - endpoint: v2.RouteNameTags, - vars: []string{ - "name", "foo/bar", - }, - }, - { - endpoint: v2.RouteNameBlob, - vars: []string{ - "name", "foo/bar", - "digest", "tarsum.v1+bogus:abcdef0123456789", - }, - }, - { - endpoint: v2.RouteNameBlobUpload, - vars: []string{ - "name", "foo/bar", - }, - }, - { - endpoint: v2.RouteNameBlobUploadChunk, - vars: []string{ - "name", "foo/bar", - "uuid", "theuuid", - }, - }, - } { - app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) - route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) - u, err := route.URL(testcase.vars...) - - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(u.String()) - - if err != nil { - t.Fatal(err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) - } - } -} - -// TestNewApp covers the creation of an application via NewApp with a -// configuration. -func TestNewApp(t *testing.T) { - ctx := context.Background() - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": nil, - }, - Auth: configuration.Auth{ - // For now, we simply test that new auth results in a viable - // application. - "silly": { - "realm": "realm-test", - "service": "service-test", - }, - }, - } - - // Mostly, with this test, given a sane configuration, we are simply - // ensuring that NewApp doesn't panic. We might want to tweak this - // behavior. - app := NewApp(ctx, config) - - server := httptest.NewServer(app) - builder, err := v2.NewURLBuilderFromString(server.URL) - if err != nil { - t.Fatalf("error creating urlbuilder: %v", err) - } - - baseURL, err := builder.BuildBaseURL() - if err != nil { - t.Fatalf("error creating baseURL: %v", err) - } - - // TODO(stevvooe): The rest of this test might belong in the API tests. - - // Just hit the app and make sure we get a 401 Unauthorized error. - req, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer req.Body.Close() - - if req.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected status code during request: %v", err) - } - - if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { - t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") - } - - expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" - if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { - t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) - } - - var errs v2.Errors - dec := json.NewDecoder(req.Body) - if err := dec.Decode(&errs); err != nil { - t.Fatalf("error decoding error response: %v", err) - } - - if errs.Errors[0].Code != v2.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) - } -} - -// Test the access record accumulator -func TestAppendAccessRecords(t *testing.T) { - repo := "testRepo" - - expectedResource := auth.Resource{ - Type: "repository", - Name: repo, - } - - expectedPullRecord := auth.Access{ - Resource: expectedResource, - Action: "pull", - } - expectedPushRecord := auth.Access{ - Resource: expectedResource, - Action: "push", - } - expectedAllRecord := auth.Access{ - Resource: expectedResource, - Action: "*", - } - - records := []auth.Access{} - result := appendAccessRecords(records, "GET", repo) - expectedResult := []auth.Access{expectedPullRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "HEAD", repo) - expectedResult = []auth.Access{expectedPullRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "POST", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "PUT", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "PATCH", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "DELETE", repo) - expectedResult = []auth.Access{expectedAllRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - -} - -blob -mark :137 -data 154 -// +build go1.4 - -package handlers - -import ( - "net/http" -) - -func basicAuth(r *http.Request) (username, password string, ok bool) { - return r.BasicAuth() -} - -blob -mark :138 -data 1034 -// +build !go1.4 - -package handlers - -import ( - "encoding/base64" - "net/http" - "strings" -) - -// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we -// can compile on go1.3 and earlier. - -// BasicAuth returns the username and password provided in the request's -// Authorization header, if the request uses HTTP Basic Authentication. -// See RFC 2617, Section 2. -func basicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - return parseBasicAuth(auth) -} - -// parseBasicAuth parses an HTTP Basic Authentication string. -// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). -func parseBasicAuth(auth string) (username, password string, ok bool) { - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} - -blob -mark :139 -data 4320 -package handlers - -import ( - "fmt" - "net/http" - "sync" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "golang.org/x/net/context" -) - -// Context should contain the request specific context for use in across -// handlers. Resources that don't need to be shared across handlers should not -// be on this object. -type Context struct { - // App points to the application structure that created this context. - *App - context.Context - - // Repository is the repository for the current request. All requests - // should be scoped to a single repository. This field may be nil. - Repository distribution.Repository - - // Errors is a collection of errors encountered during the request to be - // returned to the client API. If errors are added to the collection, the - // handler *must not* start the response via http.ResponseWriter. - Errors v2.Errors - - urlBuilder *v2.URLBuilder - - // TODO(stevvooe): The goal is too completely factor this context and - // dispatching out of the web application. Ideally, we should lean on - // context.Context for injection of these resources. -} - -// Value overrides context.Context.Value to ensure that calls are routed to -// correct context. -func (ctx *Context) Value(key interface{}) interface{} { - return ctx.Context.Value(key) -} - -func getName(ctx context.Context) (name string) { - return ctxu.GetStringValue(ctx, "vars.name") -} - -func getReference(ctx context.Context) (reference string) { - return ctxu.GetStringValue(ctx, "vars.reference") -} - -var errDigestNotAvailable = fmt.Errorf("digest not available in context") - -func getDigest(ctx context.Context) (dgst digest.Digest, err error) { - dgstStr := ctxu.GetStringValue(ctx, "vars.digest") - - if dgstStr == "" { - ctxu.GetLogger(ctx).Errorf("digest not available") - return "", errDigestNotAvailable - } - - d, err := digest.ParseDigest(dgstStr) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) - return "", err - } - - return d, nil -} - -func getUploadUUID(ctx context.Context) (uuid string) { - return ctxu.GetStringValue(ctx, "vars.uuid") -} - -// getUserName attempts to resolve a username from the context and request. If -// a username cannot be resolved, the empty string is returned. -func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, "auth.user.name") - - // Fallback to request user with basic auth - if username == "" { - var ok bool - uname, _, ok := basicAuth(r) - if ok { - username = uname - } - } - - return username -} - -// contextManager allows us to associate net/context.Context instances with a -// request, based on the memory identity of http.Request. This prepares http- -// level context, which is not application specific. If this is called, -// (*contextManager).release must be called on the context when the request is -// completed. -// -// Providing this circumvents a lot of necessity for dispatchers with the -// benefit of instantiating the request context much earlier. -// -// TODO(stevvooe): Consider making this facility a part of the context package. -type contextManager struct { - contexts map[*http.Request]context.Context - mu sync.Mutex -} - -// defaultContextManager is just a global instance to register request contexts. -var defaultContextManager = newContextManager() - -func newContextManager() *contextManager { - return &contextManager{ - contexts: make(map[*http.Request]context.Context), - } -} - -// context either returns a new context or looks it up in the manager. -func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { - cm.mu.Lock() - defer cm.mu.Unlock() - - ctx, ok := cm.contexts[r] - if ok { - return ctx - } - - if parent == nil { - parent = ctxu.Background() - } - - ctx = ctxu.WithRequest(parent, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) - cm.contexts[r] = ctx - - return ctx -} - -// releases frees any associated with resources from request. -func (cm *contextManager) release(ctx context.Context) { - cm.mu.Lock() - defer cm.mu.Unlock() - - r, err := ctxu.GetRequest(ctx) - if err != nil { - ctxu.GetLogger(ctx).Errorf("no request found in context during release") - return - } - delete(cm.contexts, r) -} - -blob -mark :140 -data 807 -package handlers - -import ( - "encoding/json" - "io" - "net/http" -) - -// serveJSON marshals v and sets the content-type header to -// 'application/json'. If a different status code is required, call -// ResponseWriter.WriteHeader before this function. -func serveJSON(w http.ResponseWriter, v interface{}) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - enc := json.NewEncoder(w) - - if err := enc.Encode(v); err != nil { - return err - } - - return nil -} - -// closeResources closes all the provided resources after running the target -// handler. -func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for _, closer := range closers { - defer closer.Close() - } - handler.ServeHTTP(w, r) - }) -} - -blob -mark :141 -data 1782 -package handlers - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "time" -) - -// layerUploadState captures the state serializable state of the layer upload. -type layerUploadState struct { - // name is the primary repository under which the layer will be linked. - Name string - - // UUID identifies the upload. - UUID string - - // offset contains the current progress of the upload. - Offset int64 - - // StartedAt is the original start time of the upload. - StartedAt time.Time -} - -type hmacKey string - -// unpackUploadState unpacks and validates the layer upload state from the -// token, using the hmacKey secret. -func (secret hmacKey) unpackUploadState(token string) (layerUploadState, error) { - var state layerUploadState - - tokenBytes, err := base64.URLEncoding.DecodeString(token) - if err != nil { - return state, err - } - mac := hmac.New(sha256.New, []byte(secret)) - - if len(tokenBytes) < mac.Size() { - return state, fmt.Errorf("Invalid token") - } - - macBytes := tokenBytes[:mac.Size()] - messageBytes := tokenBytes[mac.Size():] - - mac.Write(messageBytes) - if !hmac.Equal(mac.Sum(nil), macBytes) { - return state, fmt.Errorf("Invalid token") - } - - if err := json.Unmarshal(messageBytes, &state); err != nil { - return state, err - } - - return state, nil -} - -// packUploadState packs the upload state signed with and hmac digest using -// the hmacKey secret, encoding to url safe base64. The resulting token can be -// used to share data with minimized risk of external tampering. -func (secret hmacKey) packUploadState(lus layerUploadState) (string, error) { - mac := hmac.New(sha256.New, []byte(secret)) - p, err := json.Marshal(lus) - if err != nil { - return "", err - } - - mac.Write(p) - - return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil -} - -blob -mark :142 -data 2880 -package handlers - -import "testing" - -var layerUploadStates = []layerUploadState{ - { - Name: "hello", - UUID: "abcd-1234-qwer-0987", - Offset: 0, - }, - { - Name: "hello-world", - UUID: "abcd-1234-qwer-0987", - Offset: 0, - }, - { - Name: "h3ll0_w0rld", - UUID: "abcd-1234-qwer-0987", - Offset: 1337, - }, - { - Name: "ABCDEFG", - UUID: "ABCD-1234-QWER-0987", - Offset: 1234567890, - }, - { - Name: "this-is-A-sort-of-Long-name-for-Testing", - UUID: "dead-1234-beef-0987", - Offset: 8675309, - }, -} - -var secrets = []string{ - "supersecret", - "12345", - "a", - "SuperSecret", - "Sup3r... S3cr3t!", - "This is a reasonably long secret key that is used for the purpose of testing.", - "\u2603+\u2744", // snowman+snowflake -} - -// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and -// validates that the tokens can be used to reconstruct the proper upload state. -func TestLayerUploadTokens(t *testing.T) { - secret := hmacKey("supersecret") - - for _, testcase := range layerUploadStates { - token, err := secret.packUploadState(testcase) - if err != nil { - t.Fatal(err) - } - - lus, err := secret.unpackUploadState(token) - if err != nil { - t.Fatal(err) - } - - assertLayerUploadStateEquals(t, testcase, lus) - } -} - -// TestHMACValidate ensures that any HMAC token providers are compatible if and -// only if they share the same secret. -func TestHMACValidation(t *testing.T) { - for _, secret := range secrets { - secret1 := hmacKey(secret) - secret2 := hmacKey(secret) - badSecret := hmacKey("DifferentSecret") - - for _, testcase := range layerUploadStates { - token, err := secret1.packUploadState(testcase) - if err != nil { - t.Fatal(err) - } - - lus, err := secret2.unpackUploadState(token) - if err != nil { - t.Fatal(err) - } - - assertLayerUploadStateEquals(t, testcase, lus) - - _, err = badSecret.unpackUploadState(token) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) - } - - badToken, err := badSecret.packUploadState(lus) - if err != nil { - t.Fatal(err) - } - - _, err = secret1.unpackUploadState(badToken) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) - } - - _, err = secret2.unpackUploadState(badToken) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) - } - } - } -} - -func assertLayerUploadStateEquals(t *testing.T, expected layerUploadState, received layerUploadState) { - if expected.Name != received.Name { - t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) - } - if expected.UUID != received.UUID { - t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) - } - if expected.Offset != received.Offset { - t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) - } -} - -blob -mark :143 -data 6677 -package handlers - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" - "golang.org/x/net/context" -) - -// imageManifestDispatcher takes the request context and builds the -// appropriate handler for handling image manifest requests. -func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { - imageManifestHandler := &imageManifestHandler{ - Context: ctx, - } - reference := getReference(ctx) - dgst, err := digest.ParseDigest(reference) - if err != nil { - // We just have a tag - imageManifestHandler.Tag = reference - } else { - imageManifestHandler.Digest = dgst - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), - "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), - "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), - } -} - -// imageManifestHandler handles http operations on image manifests. -type imageManifestHandler struct { - *Context - - // One of tag or digest gets set, depending on what is present in context. - Tag string - Digest digest.Digest -} - -// GetImageManifest fetches the image manifest from the storage backend, if it exists. -func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests := imh.Repository.Manifests() - - var ( - sm *manifest.SignedManifest - err error - ) - - if imh.Tag != "" { - sm, err = manifests.GetByTag(imh.Context, imh.Tag) - } else { - sm, err = manifests.Get(imh.Context, imh.Digest) - } - - if err != nil { - imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) - w.WriteHeader(http.StatusNotFound) - return - } - - // Get the digest, if we don't already have it. - if imh.Digest == "" { - dgst, err := digestManifest(imh, sm) - if err != nil { - imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - w.WriteHeader(http.StatusBadRequest) - return - } - - imh.Digest = dgst - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.Write(sm.Raw) -} - -// PutImageManifest validates and stores and image in the registry. -func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("PutImageManifest") - manifests := imh.Repository.Manifests() - dec := json.NewDecoder(r.Body) - - var manifest manifest.SignedManifest - if err := dec.Decode(&manifest); err != nil { - imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) - w.WriteHeader(http.StatusBadRequest) - return - } - - dgst, err := digestManifest(imh, &manifest) - if err != nil { - imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - w.WriteHeader(http.StatusBadRequest) - return - } - - // Validate manifest tag or digest matches payload - if imh.Tag != "" { - if manifest.Tag != imh.Tag { - ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors.Push(v2.ErrorCodeTagInvalid) - w.WriteHeader(http.StatusBadRequest) - return - } - - imh.Digest = dgst - } else if imh.Digest != "" { - if dgst != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) - imh.Errors.Push(v2.ErrorCodeDigestInvalid) - w.WriteHeader(http.StatusBadRequest) - return - } - } else { - imh.Errors.Push(v2.ErrorCodeTagInvalid, "no tag or digest specified") - w.WriteHeader(http.StatusBadRequest) - return - } - - if err := manifests.Put(imh.Context, &manifest); err != nil { - // TODO(stevvooe): These error handling switches really need to be - // handled by an app global mapper. - switch err := err.(type) { - case distribution.ErrManifestVerification: - for _, verificationError := range err { - switch verificationError := verificationError.(type) { - case distribution.ErrUnknownLayer: - imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) - case distribution.ErrManifestUnverified: - imh.Errors.Push(v2.ErrorCodeManifestUnverified) - default: - if verificationError == digest.ErrDigestInvalidFormat { - // TODO(stevvooe): We need to really need to move all - // errors to types. Its much more straightforward. - imh.Errors.Push(v2.ErrorCodeDigestInvalid) - } else { - imh.Errors.PushErr(verificationError) - } - } - } - default: - imh.Errors.PushErr(err) - } - - w.WriteHeader(http.StatusBadRequest) - return - } - - // Construct a canonical url for the uploaded manifest. - location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) - if err != nil { - // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to - // happen. We'll log the error here but proceed as if it worked. Worst - // case, we set an empty location header. - ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) - } - - w.Header().Set("Location", location) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.WriteHeader(http.StatusAccepted) -} - -// DeleteImageManifest removes the image with the given tag from the registry. -func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("DeleteImageManifest") - - // TODO(stevvooe): Unfortunately, at this point, manifest deletes are - // unsupported. There are issues with schema version 1 that make removing - // tag index entries a serious problem in eventually consistent storage. - // Once we work out schema version 2, the full deletion system will be - // worked out and we can add support back. - imh.Errors.Push(v2.ErrorCodeUnsupported) - w.WriteHeader(http.StatusBadRequest) -} - -// digestManifest takes a digest of the given manifest. This belongs somewhere -// better but we'll wait for a refactoring cycle to find that real somewhere. -func digestManifest(ctx context.Context, sm *manifest.SignedManifest) (digest.Digest, error) { - p, err := sm.Payload() - if err != nil { - if !strings.Contains(err.Error(), "missing signature key") { - ctxu.GetLogger(ctx).Errorf("error getting manifest payload: %v", err) - return "", err - } - - // NOTE(stevvooe): There are no signatures but we still have a - // payload. The request will fail later but this is not the - // responsibility of this part of the code. - p = sm.Raw - } - - dgst, err := digest.FromBytes(p) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error digesting manifest: %v", err) - return "", err - } - - return dgst, err -} - -blob -mark :144 -data 1837 -package handlers - -import ( - "net/http" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// layerDispatcher uses the request context to build a layerHandler. -func layerDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := getDigest(ctx) - if err != nil { - - if err == errDigestNotAvailable { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) - }) - } - - layerHandler := &layerHandler{ - Context: ctx, - Digest: dgst, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(layerHandler.GetLayer), - "HEAD": http.HandlerFunc(layerHandler.GetLayer), - } -} - -// layerHandler serves http layer requests. -type layerHandler struct { - *Context - - Digest digest.Digest -} - -// GetLayer fetches the binary data from backend storage returns it in the -// response. -func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(lh).Debug("GetImageLayer") - layers := lh.Repository.Layers() - layer, err := layers.Fetch(lh.Digest) - - if err != nil { - switch err := err.(type) { - case distribution.ErrUnknownLayer: - w.WriteHeader(http.StatusNotFound) - lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) - default: - lh.Errors.Push(v2.ErrorCodeUnknown, err) - } - return - } - - handler, err := layer.Handler(r) - if err != nil { - ctxu.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err) - lh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - handler.ServeHTTP(w, r) -} - -blob -mark :145 -data 9590 -package handlers - -import ( - "fmt" - "io" - "net/http" - "net/url" - "os" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// layerUploadDispatcher constructs and returns the layer upload handler for -// the given request context. -func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - luh := &layerUploadHandler{ - Context: ctx, - UUID: getUploadUUID(ctx), - } - - handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(luh.StartLayerUpload), - "GET": http.HandlerFunc(luh.GetUploadStatus), - "HEAD": http.HandlerFunc(luh.GetUploadStatus), - // TODO(stevvooe): Must implement patch support. - // "PATCH": http.HandlerFunc(luh.PutLayerChunk), - "PUT": http.HandlerFunc(luh.PutLayerUploadComplete), - "DELETE": http.HandlerFunc(luh.CancelLayerUpload), - }) - - if luh.UUID != "" { - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) - if err != nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - luh.State = state - - if state.Name != ctx.Repository.Name() { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, luh.Repository.Name()) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - - if state.UUID != luh.UUID { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - - layers := ctx.Repository.Layers() - upload, err := layers.Resume(luh.UUID) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == distribution.ErrLayerUploadUnknown { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - }) - } - luh.Upload = upload - - if state.Offset > 0 { - // Seek the layer upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(luh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking layer upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - upload.Cancel() - }) - } else if nn != luh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - upload.Cancel() - }) - } - } - - handler = closeResources(handler, luh.Upload) - } - - return handler -} - -// layerUploadHandler handles the http layer upload process. -type layerUploadHandler struct { - *Context - - // UUID identifies the upload instance for the current request. - UUID string - - Upload distribution.LayerUpload - - State layerUploadState -} - -// StartLayerUpload begins the layer upload process and allocates a server- -// side upload session. -func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { - layers := luh.Repository.Layers() - upload, err := layers.Upload() - if err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - luh.Upload = upload - defer luh.Upload.Close() - - if err := luh.layerUploadResponse(w, r); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.Upload.UUID()) - w.WriteHeader(http.StatusAccepted) -} - -// GetUploadStatus returns the status of a given upload, identified by uuid. -func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - if err := luh.layerUploadResponse(w, r); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - w.WriteHeader(http.StatusNoContent) -} - -// PutLayerUploadComplete takes the final request of a layer upload. The final -// chunk may include all the layer data, the final chunk of layer data or no -// layer data. Any data provided is received and verified. If successful, the -// layer is linked into the blob store and 201 Created is returned with the -// canonical url of the layer. -func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - // no digest? return error, but allow retry. - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") - return - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - // no digest? return error, but allow retry. - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") - return - } - - // TODO(stevvooe): Check the incoming range header here, per the - // specification. LayerUpload should be seeked (sought?) to that position. - - // TODO(stevvooe): Consider checking the error on this copy. - // Theoretically, problems should be detected during verification but we - // may miss a root cause. - - // Read in the final chunk, if any. - io.Copy(luh.Upload, r.Body) - - layer, err := luh.Upload.Finish(dgst) - if err != nil { - switch err := err.(type) { - case distribution.ErrLayerInvalidDigest: - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - default: - ctxu.GetLogger(luh).Errorf("unknown error completing upload: %#v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - } - - // Clean up the backend layer data if there was an error. - if err := luh.Upload.Cancel(); err != nil { - // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(luh).Errorf("error canceling upload after error: %v", err) - } - - return - } - - // Build our canonical layer url - layerURL, err := luh.urlBuilder.BuildBlobURL(luh.Repository.Name(), layer.Digest()) - if err != nil { - luh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - w.Header().Set("Location", layerURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", layer.Digest().String()) - w.WriteHeader(http.StatusCreated) -} - -// CancelLayerUpload cancels an in-progress upload of a layer. -func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - if err := luh.Upload.Cancel(); err != nil { - ctxu.GetLogger(luh).Errorf("error encountered canceling upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.PushErr(err) - } - - w.WriteHeader(http.StatusNoContent) -} - -// layerUploadResponse provides a standard request for uploading layers and -// chunk responses. This sets the correct headers but the response status is -// left to the caller. -func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { - - offset, err := luh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err) - return err - } - - // TODO(stevvooe): Need a better way to manage the upload state automatically. - luh.State.Name = luh.Repository.Name() - luh.State.UUID = luh.Upload.UUID() - luh.State.Offset = offset - luh.State.StartedAt = luh.Upload.StartedAt() - - token, err := hmacKey(luh.Config.HTTP.Secret).packUploadState(luh.State) - if err != nil { - ctxu.GetLogger(luh).Infof("error building upload state token: %s", err) - return err - } - - uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL( - luh.Repository.Name(), luh.Upload.UUID(), - url.Values{ - "_state": []string{token}, - }) - if err != nil { - ctxu.GetLogger(luh).Infof("error building upload url: %s", err) - return err - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - w.Header().Set("Location", uploadURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", luh.State.Offset)) - - return nil -} - -blob -mark :146 -data 1376 -package handlers - -import ( - "encoding/json" - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// tagsDispatcher constructs the tags handler api endpoint. -func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { - tagsHandler := &tagsHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(tagsHandler.GetTags), - } -} - -// tagsHandler handles requests for lists of tags under a repository name. -type tagsHandler struct { - *Context -} - -type tagsAPIResponse struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// GetTags returns a json list of tags for a specific image name. -func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - manifests := th.Repository.Manifests() - - tags, err := manifests.Tags(th.Context) - if err != nil { - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - w.WriteHeader(404) - th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()}) - default: - th.Errors.PushErr(err) - } - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - enc := json.NewEncoder(w) - if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Name(), - Tags: tags, - }); err != nil { - th.Errors.PushErr(err) - return - } -} - -blob -mark :147 -data 1171 -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// InitFunc is the type of a RegistryMiddleware factory function and is -// used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RegistryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(registry, options) - } - } - - return nil, fmt.Errorf("no registry middleware registered with name: %s", name) -} - -blob -mark :148 -data 1191 -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// InitFunc is the type of a RepositoryMiddleware factory function and is -// used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RepositoryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(repository, options) - } - } - - return nil, fmt.Errorf("no repository middleware registered with name: %s", name) -} - -blob -mark :149 -data 4143 -package storage - -import ( - "fmt" - - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// TODO(stevvooe): Currently, the blobStore implementation used by the -// manifest store. The layer store should be refactored to better leverage the -// blobStore, reducing duplicated code. - -// blobStore implements a generalized blob store over a driver, supporting the -// read side and link management. This object is intentionally a leaky -// abstraction, providing utility methods that support creating and traversing -// backend links. -type blobStore struct { - driver storagedriver.StorageDriver - pm *pathMapper - ctx context.Context -} - -// exists reports whether or not the path exists. If the driver returns error -// other than storagedriver.PathNotFound, an error may be returned. -func (bs *blobStore) exists(dgst digest.Digest) (bool, error) { - path, err := bs.path(dgst) - - if err != nil { - return false, err - } - - ok, err := exists(bs.driver, path) - if err != nil { - return false, err - } - - return ok, nil -} - -// get retrieves the blob by digest, returning it a byte slice. This should -// only be used for small objects. -func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) { - bp, err := bs.path(dgst) - if err != nil { - return nil, err - } - - return bs.driver.GetContent(bp) -} - -// link links the path to the provided digest by writing the digest into the -// target file. -func (bs *blobStore) link(path string, dgst digest.Digest) error { - if exists, err := bs.exists(dgst); err != nil { - return err - } else if !exists { - return fmt.Errorf("cannot link non-existent blob") - } - - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(path, []byte(dgst)) -} - -// linked reads the link at path and returns the content. -func (bs *blobStore) linked(path string) ([]byte, error) { - linked, err := bs.readlink(path) - if err != nil { - return nil, err - } - - return bs.get(linked) -} - -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(path) - if err != nil { - return "", err - } - - linked, err := digest.ParseDigest(string(content)) - if err != nil { - return "", err - } - - if exists, err := bs.exists(linked); err != nil { - return "", err - } else if !exists { - return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked) - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store link. -func (bs *blobStore) resolve(path string) (string, error) { - dgst, err := bs.readlink(path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -// put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. -func (bs *blobStore) put(p []byte) (digest.Digest, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - ctxu.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) - return "", err - } - - bp, err := bs.path(dgst) - if err != nil { - return "", err - } - - // If the content already exists, just return the digest. - if exists, err := bs.exists(dgst); err != nil { - return "", err - } else if exists { - return dgst, nil - } - - return dgst, bs.driver.PutContent(bp, p) -} - -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := bs.pm.path(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return "", err - } - - return bp, nil -} - -// exists provides a utility method to test whether or not -func exists(driver storagedriver.StorageDriver, path string) (bool, error) { - if _, err := driver.Stat(path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} - -blob -mark :150 -data 3280 -// Package cache provides facilities to speed up access to the storage -// backend. Typically cache implementations deal with internal implementation -// details at the backend level, rather than generalized caches for -// distribution related interfaces. In other words, unless the cache is -// specific to the storage package, it belongs in another package. -package cache - -import ( - "fmt" - - "github.com/docker/distribution/digest" - "golang.org/x/net/context" -) - -// ErrNotFound is returned when a meta item is not found. -var ErrNotFound = fmt.Errorf("not found") - -// LayerMeta describes the backend location and length of layer data. -type LayerMeta struct { - Path string - Length int64 -} - -// LayerInfoCache is a driver-aware cache of layer metadata. Basically, it -// provides a fast cache for checks against repository metadata, avoiding -// round trips to backend storage. Note that this is different from a pure -// layer cache, which would also provide access to backing data, as well. Such -// a cache should be implemented as a middleware, rather than integrated with -// the storage backend. -// -// Note that most implementations rely on the caller to do strict checks on on -// repo and dgst arguments, since these are mostly used behind existing -// implementations. -type LayerInfoCache interface { - // Contains returns true if the repository with name contains the layer. - Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) - - // Add includes the layer in the given repository cache. - Add(ctx context.Context, repo string, dgst digest.Digest) error - - // Meta provides the location of the layer on the backend and its size. Membership of a - // repository should be tested before using the result, if required. - Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) - - // SetMeta sets the meta data for the given layer. - SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error -} - -// base implements common checks between cache implementations. Note that -// these are not full checks of input, since that should be done by the -// caller. -type base struct { - LayerInfoCache -} - -func (b *base) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - if repo == "" { - return false, fmt.Errorf("cache: cannot check for empty repository name") - } - - if dgst == "" { - return false, fmt.Errorf("cache: cannot check for empty digests") - } - - return b.LayerInfoCache.Contains(ctx, repo, dgst) -} - -func (b *base) Add(ctx context.Context, repo string, dgst digest.Digest) error { - if repo == "" { - return fmt.Errorf("cache: cannot add empty repository name") - } - - if dgst == "" { - return fmt.Errorf("cache: cannot add empty digest") - } - - return b.LayerInfoCache.Add(ctx, repo, dgst) -} - -func (b *base) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - if dgst == "" { - return LayerMeta{}, fmt.Errorf("cache: cannot get meta for empty digest") - } - - return b.LayerInfoCache.Meta(ctx, dgst) -} - -func (b *base) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - if dgst == "" { - return fmt.Errorf("cache: cannot set meta for empty digest") - } - - if meta.Path == "" { - return fmt.Errorf("cache: cannot set empty path for meta") - } - - return b.LayerInfoCache.SetMeta(ctx, dgst, meta) -} - -blob -mark :151 -data 2309 -package cache - -import ( - "testing" - - "golang.org/x/net/context" -) - -// checkLayerInfoCache takes a cache implementation through a common set of -// operations. If adding new tests, please add them here so new -// implementations get the benefit. -func checkLayerInfoCache(t *testing.T, lic LayerInfoCache) { - ctx := context.Background() - - exists, err := lic.Contains(ctx, "", "fake:abc") - if err == nil { - t.Fatalf("expected error checking for cache item with empty repo") - } - - exists, err = lic.Contains(ctx, "foo/bar", "") - if err == nil { - t.Fatalf("expected error checking for cache item with empty digest") - } - - exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") - if err != nil { - t.Fatalf("unexpected error checking for cache item: %v", err) - } - - if exists { - t.Fatalf("item should not exist") - } - - if err := lic.Add(ctx, "", "fake:abc"); err == nil { - t.Fatalf("expected error adding cache item with empty name") - } - - if err := lic.Add(ctx, "foo/bar", ""); err == nil { - t.Fatalf("expected error adding cache item with empty digest") - } - - if err := lic.Add(ctx, "foo/bar", "fake:abc"); err != nil { - t.Fatalf("unexpected error adding item: %v", err) - } - - exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") - if err != nil { - t.Fatalf("unexpected error checking for cache item: %v", err) - } - - if !exists { - t.Fatalf("item should exist") - } - - _, err = lic.Meta(ctx, "") - if err == nil || err == ErrNotFound { - t.Fatalf("expected error getting meta for cache item with empty digest") - } - - _, err = lic.Meta(ctx, "fake:abc") - if err != ErrNotFound { - t.Fatalf("expected unknown layer error getting meta for cache item with empty digest") - } - - if err = lic.SetMeta(ctx, "", LayerMeta{}); err == nil { - t.Fatalf("expected error setting meta for cache item with empty digest") - } - - if err = lic.SetMeta(ctx, "foo/bar", LayerMeta{}); err == nil { - t.Fatalf("expected error setting meta for cache item with empty meta") - } - - expected := LayerMeta{Path: "/foo/bar", Length: 20} - if err := lic.SetMeta(ctx, "foo/bar", expected); err != nil { - t.Fatalf("unexpected error setting meta: %v", err) - } - - meta, err := lic.Meta(ctx, "foo/bar") - if err != nil { - t.Fatalf("unexpected error getting meta: %v", err) - } - - if meta != expected { - t.Fatalf("retrieved meta data did not match: %v", err) - } -} - -blob -mark :152 -data 1787 -package cache - -import ( - "github.com/docker/distribution/digest" - "golang.org/x/net/context" -) - -// inmemoryLayerInfoCache is a map-based implementation of LayerInfoCache. -type inmemoryLayerInfoCache struct { - membership map[string]map[digest.Digest]struct{} - meta map[digest.Digest]LayerMeta -} - -// NewInMemoryLayerInfoCache provides an implementation of LayerInfoCache that -// stores results in memory. -func NewInMemoryLayerInfoCache() LayerInfoCache { - return &base{&inmemoryLayerInfoCache{ - membership: make(map[string]map[digest.Digest]struct{}), - meta: make(map[digest.Digest]LayerMeta), - }} -} - -func (ilic *inmemoryLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - members, ok := ilic.membership[repo] - if !ok { - return false, nil - } - - _, ok = members[dgst] - return ok, nil -} - -// Add adds the layer to the redis repository blob set. -func (ilic *inmemoryLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - members, ok := ilic.membership[repo] - if !ok { - members = make(map[digest.Digest]struct{}) - ilic.membership[repo] = members - } - - members[dgst] = struct{}{} - - return nil -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (ilic *inmemoryLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - meta, ok := ilic.meta[dgst] - if !ok { - return LayerMeta{}, ErrNotFound - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (ilic *inmemoryLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - ilic.meta[dgst] = meta - return nil -} - -blob -mark :153 -data 227 -package cache - -import "testing" - -// TestInMemoryLayerInfoCache checks the in memory implementation is working -// correctly. -func TestInMemoryLayerInfoCache(t *testing.T) { - checkLayerInfoCache(t, NewInMemoryLayerInfoCache()) -} - -blob -mark :154 -data 3593 -package cache - -import ( - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/garyburd/redigo/redis" - "golang.org/x/net/context" -) - -// redisLayerInfoCache provides an implementation of storage.LayerInfoCache -// based on redis. Layer info is stored in two parts. The first provide fast -// access to repository membership through a redis set for each repo. The -// second is a redis hash keyed by the digest of the layer, providing path and -// length information. Note that there is no implied relationship between -// these two caches. The layer may exist in one, both or none and the code -// must be written this way. -type redisLayerInfoCache struct { - pool *redis.Pool - - // TODO(stevvooe): We use a pool because we don't have great control over - // the cache lifecycle to manage connections. A new connection if fetched - // for each operation. Once we have better lifecycle management of the - // request objects, we can change this to a connection. -} - -// NewRedisLayerInfoCache returns a new redis-based LayerInfoCache using the -// provided redis connection pool. -func NewRedisLayerInfoCache(pool *redis.Pool) LayerInfoCache { - return &base{&redisLayerInfoCache{ - pool: pool, - }} -} - -// Contains does a membership check on the repository blob set in redis. This -// is used as an access check before looking up global path information. If -// false is returned, the caller should still check the backend to if it -// exists elsewhere. -func (rlic *redisLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - conn := rlic.pool.Get() - defer conn.Close() - - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Contains(%q, %q)", repo, dgst) - return redis.Bool(conn.Do("SISMEMBER", rlic.repositoryBlobSetKey(repo), dgst)) -} - -// Add adds the layer to the redis repository blob set. -func (rlic *redisLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - conn := rlic.pool.Get() - defer conn.Close() - - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Add(%q, %q)", repo, dgst) - _, err := conn.Do("SADD", rlic.repositoryBlobSetKey(repo), dgst) - return err -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (rlic *redisLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - conn := rlic.pool.Get() - defer conn.Close() - - reply, err := redis.Values(conn.Do("HMGET", rlic.blobMetaHashKey(dgst), "path", "length")) - if err != nil { - return LayerMeta{}, err - } - - if len(reply) < 2 || reply[0] == nil || reply[1] == nil { - return LayerMeta{}, ErrNotFound - } - - var meta LayerMeta - if _, err := redis.Scan(reply, &meta.Path, &meta.Length); err != nil { - return LayerMeta{}, err - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (rlic *redisLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - conn := rlic.pool.Get() - defer conn.Close() - - _, err := conn.Do("HMSET", rlic.blobMetaHashKey(dgst), "path", meta.Path, "length", meta.Length) - return err -} - -// repositoryBlobSetKey returns the key for the blob set in the cache. -func (rlic *redisLayerInfoCache) repositoryBlobSetKey(repo string) string { - return "repository::" + repo + "::blobs" -} - -// blobMetaHashKey returns the cache key for immutable blob meta data. -func (rlic *redisLayerInfoCache) blobMetaHashKey(dgst digest.Digest) string { - return "blobs::" + dgst.String() -} - -blob -mark :155 -data 1190 -package cache - -import ( - "flag" - "os" - "testing" - "time" - - "github.com/garyburd/redigo/redis" -) - -var redisAddr string - -func init() { - flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") -} - -// TestRedisLayerInfoCache exercises a live redis instance using the cache -// implementation. -func TestRedisLayerInfoCache(t *testing.T) { - if redisAddr == "" { - // fallback to an environement variable - redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") - } - - if redisAddr == "" { - // skip if still not set - t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis") - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - return redis.Dial("tcp", redisAddr) - }, - MaxIdle: 1, - MaxActive: 2, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - // Clear the database - if _, err := pool.Get().Do("FLUSHDB"); err != nil { - t.Fatalf("unexpected error flushing redis db: %v", err) - } - - checkLayerInfoCache(t, NewRedisLayerInfoCache(pool)) -} - -blob -mark :156 -data 160 -// Package storage contains storage services for use in the registry -// application. It should be considered an internal package, as of Go 1.4. -package storage - -blob -mark :157 -data 9626 -// Package azure provides a storagedriver.StorageDriver implementation to -// store blobs in Microsoft Azure Blob Storage Service. -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -const driverName = "azure" - -const ( - paramAccountName = "accountname" - paramAccountKey = "accountkey" - paramContainer = "container" - paramRealm = "realm" -) - -type driver struct { - client azure.BlobStorageClient - container string -} - -type baseEmbed struct{ base.Base } - -// Driver is a storagedriver.StorageDriver implementation backed by -// Microsoft Azure Blob Storage Service. -type Driver struct{ baseEmbed } - -func init() { - factory.Register(driverName, &azureDriverFactory{}) -} - -type azureDriverFactory struct{} - -func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -// FromParameters constructs a new Driver with a given parameters map. -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - accountName, ok := parameters[paramAccountName] - if !ok || fmt.Sprint(accountName) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountName) - } - - accountKey, ok := parameters[paramAccountKey] - if !ok || fmt.Sprint(accountKey) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) - } - - container, ok := parameters[paramContainer] - if !ok || fmt.Sprint(container) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramContainer) - } - - realm, ok := parameters[paramRealm] - if !ok || fmt.Sprint(realm) == "" { - realm = azure.DefaultBaseUrl - } - - return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) -} - -// New constructs a new Driver with the given Azure Storage Account credentials -func New(accountName, accountKey, container, realm string) (*Driver, error) { - api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultApiVersion, true) - if err != nil { - return nil, err - } - - blobClient := api.GetBlobService() - - // Create registry container - if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { - return nil, err - } - - d := &driver{ - client: *blobClient, - container: container} - return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil -} - -// Implement the storagedriver.StorageDriver interface. - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { - blob, err := d.client.GetBlob(d.container, path) - if err != nil { - if is404(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - return ioutil.ReadAll(blob) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(path string, contents []byte) error { - return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if !ok { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - info, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - size := int64(info.ContentLength) - if offset >= size { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - bytesRange := fmt.Sprintf("%v-", offset) - resp, err := d.client.GetBlobRange(d.container, path, bytesRange) - if err != nil { - return nil, err - } - return resp, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (int64, error) { - if blobExists, err := d.client.BlobExists(d.container, path); err != nil { - return 0, err - } else if !blobExists { - err := d.client.CreateBlockBlob(d.container, path) - if err != nil { - return 0, err - } - } - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - zw := newZeroFillWriter(&bw) - return zw.Write(d.container, path, offset, reader) -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { - // Check if the path is a blob - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if ok { - blob, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - mtim, err := time.Parse(http.TimeFormat, blob.LastModified) - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(blob.ContentLength), - ModTime: mtim, - IsDir: false, - }}, nil - } - - // Check if path is a virtual container - virtContainerPath := path - if !strings.HasSuffix(virtContainerPath, "/") { - virtContainerPath += "/" - } - blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Prefix: virtContainerPath, - MaxResults: 1, - }) - if err != nil { - return nil, err - } - if len(blobs.Blobs) > 0 { - // path is a virtual container - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - IsDir: true, - }}, nil - } - - // path is not a blob or virtual container - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(path string) ([]string, error) { - if path == "/" { - path = "" - } - - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return blobs, err - } - - list := directDescendants(blobs, path) - return list, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) - err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) - if err != nil { - if is404(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err - } - - return d.client.DeleteBlob(d.container, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { - ok, err := d.client.DeleteBlobIfExists(d.container, path) - if err != nil { - return err - } - if ok { - return nil // was a blob and deleted, return - } - - // Not a blob, see if path is a virtual container with blobs - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return err - } - - for _, b := range blobs { - if err = d.client.DeleteBlob(d.container, b); err != nil { - return err - } - } - - if len(blobs) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - return nil -} - -// URLFor returns a publicly accessible URL for the blob stored at given path -// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { - expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration - expires, ok := options["expiry"] - if ok { - t, ok := expires.(time.Time) - if ok { - expiresTime = t - } - } - return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") -} - -// directDescendants will find direct descendants (blobs or virtual containers) -// of from list of blob paths and will return their full paths. Elements in blobs -// list must be prefixed with a "/" and -// -// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is -// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} -func directDescendants(blobs []string, prefix string) []string { - if !strings.HasPrefix(prefix, "/") { // add trailing '/' - prefix = "/" + prefix - } - if !strings.HasSuffix(prefix, "/") { // containerify the path - prefix += "/" - } - - out := make(map[string]bool) - for _, b := range blobs { - if strings.HasPrefix(b, prefix) { - rel := b[len(prefix):] - c := strings.Count(rel, "/") - if c == 0 { - out[b] = true - } else { - out[prefix+rel[:strings.Index(rel, "/")]] = true - } - } - } - - var keys []string - for k := range out { - keys = append(keys, k) - } - return keys -} - -func (d *driver) listBlobs(container, virtPath string) ([]string, error) { - if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path - virtPath += "/" - } - - out := []string{} - marker := "" - for { - resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Marker: marker, - Prefix: virtPath, - }) - - if err != nil { - return out, err - } - - for _, b := range resp.Blobs { - out = append(out, b.Name) - } - - if len(resp.Blobs) == 0 || resp.NextMarker == "" { - break - } - marker = resp.NextMarker - } - return out, nil -} - -func is404(err error) bool { - e, ok := err.(azure.StorageServiceError) - return ok && e.StatusCode == http.StatusNotFound -} - -blob -mark :158 -data 1627 -package azure - -import ( - "fmt" - "os" - "strings" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - . "gopkg.in/check.v1" -) - -const ( - envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" - envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" - envContainer = "AZURE_STORAGE_CONTAINER" - envRealm = "AZURE_STORAGE_REALM" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -func init() { - var ( - accountName string - accountKey string - container string - realm string - ) - - config := []struct { - env string - value *string - }{ - {envAccountName, &accountName}, - {envAccountKey, &accountKey}, - {envContainer, &container}, - {envRealm, &realm}, - } - - missing := []string{} - for _, v := range config { - *v.value = os.Getenv(v.env) - if *v.value == "" { - missing = append(missing, v.env) - } - } - - azureDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(accountName, accountKey, container, realm) - } - - // Skip Azure storage driver tests if environment variable parameters are not provided - skipCheck := func() string { - if len(missing) > 0 { - return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) - } - return "" - } - - testsuites.RegisterInProcessSuite(azureDriverConstructor, skipCheck) - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // paramAccountName: accountName, - // paramAccountKey: accountKey, - // paramContainer: container, - // paramRealm: realm, - // }, skipCheck) -} - -blob -mark :159 -data 609 -package azure - -import ( - "fmt" - "io" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -// azureBlockStorage is adaptor between azure.BlobStorageClient and -// blockStorage interface. -type azureBlockStorage struct { - azure.BlobStorageClient -} - -func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) -} - -func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { - a := azureBlockStorage{} - a.BlobStorageClient = b - return a -} - -blob -mark :160 -data 3666 -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -type StorageSimulator struct { - blobs map[string]*BlockBlob -} - -type BlockBlob struct { - blocks map[string]*DataBlock - blockList []string -} - -type DataBlock struct { - data []byte - committed bool -} - -func (s *StorageSimulator) path(container, blob string) string { - return fmt.Sprintf("%s/%s", container, blob) -} - -func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { - _, ok := s.blobs[s.path(container, blob)] - return ok, nil -} - -func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return nil, fmt.Errorf("blob not found") - } - - var readers []io.Reader - for _, bID := range bb.blockList { - readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) - } - return ioutil.NopCloser(io.MultiReader(readers...)), nil -} - -func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - r, err := s.GetBlob(container, blob) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil -} - -func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { - path := s.path(container, blob) - bb := &BlockBlob{ - blocks: make(map[string]*DataBlock), - blockList: []string{}, - } - s.blobs[path] = bb - return nil -} - -func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { - path := s.path(container, blob) - bb, ok := s.blobs[path] - if !ok { - return fmt.Errorf("blob not found") - } - data := make([]byte, len(chunk)) - copy(data, chunk) - bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob - return nil -} - -func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { - resp := azure.BlockListResponse{} - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return resp, fmt.Errorf("blob not found") - } - - // Iterate committed blocks (in order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for _, blockID := range bb.blockList { - b := bb.blocks[blockID] - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - resp.CommittedBlocks = append(resp.CommittedBlocks, block) - } - - } - - // Iterate uncommitted blocks (in no order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for blockID, b := range bb.blocks { - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - if !b.committed { - resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) - } - } - } - return resp, nil -} - -func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return fmt.Errorf("blob not found") - } - - var blockIDs []string - for _, v := range blocks { - bl, ok := bb.blocks[v.Id] - if !ok { // check if block ID exists - return fmt.Errorf("Block id '%s' not found", v.Id) - } - bl.committed = true - blockIDs = append(blockIDs, v.Id) - } - - // Mark all other blocks uncommitted - for k, b := range bb.blocks { - inList := false - for _, v := range blockIDs { - if k == v { - inList = true - break - } - } - if !inList { - b.committed = false - } - } - - bb.blockList = blockIDs - return nil -} - -func NewStorageSimulator() StorageSimulator { - return StorageSimulator{ - blobs: make(map[string]*BlockBlob), - } -} - -blob -mark :161 -data 1284 -package azure - -import ( - "encoding/base64" - "fmt" - "math/rand" - "sync" - "time" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -type blockIDGenerator struct { - pool map[string]bool - r *rand.Rand - m sync.Mutex -} - -// Generate returns an unused random block id and adds the generated ID -// to list of used IDs so that the same block name is not used again. -func (b *blockIDGenerator) Generate() string { - b.m.Lock() - defer b.m.Unlock() - - var id string - for { - id = toBlockID(int(b.r.Int())) - if !b.exists(id) { - break - } - } - b.pool[id] = true - return id -} - -func (b *blockIDGenerator) exists(id string) bool { - _, used := b.pool[id] - return used -} - -func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { - b.m.Lock() - defer b.m.Unlock() - - for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { - b.pool[bl.Name] = true - } -} - -func newBlockIDGenerator() *blockIDGenerator { - return &blockIDGenerator{ - pool: make(map[string]bool), - r: rand.New(rand.NewSource(time.Now().UnixNano()))} -} - -// toBlockId converts given integer to base64-encoded block ID of a fixed length. -func toBlockID(i int) string { - s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs - return base64.StdEncoding.EncodeToString([]byte(s)) -} - -blob -mark :162 -data 1889 -package azure - -import ( - "math" - "testing" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -func Test_blockIdGenerator(t *testing.T) { - r := newBlockIDGenerator() - - for i := 1; i <= 10; i++ { - if expected := i - 1; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - if id := r.Generate(); id == "" { - t.Fatal("returned empty id") - } - if expected := i; len(r.pool) != expected { - t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) - } - } -} - -func Test_blockIdGenerator_Feed(t *testing.T) { - r := newBlockIDGenerator() - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed empty list - blocks := azure.BlockListResponse{} - r.Feed(blocks) - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed blocks - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - {"2", 2}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed same block IDs with committed/uncommitted place changed - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } -} - -func Test_toBlockId(t *testing.T) { - min := 0 - max := math.MaxInt64 - - if len(toBlockID(min)) != len(toBlockID(max)) { - t.Fatalf("different-sized blockIDs are returned") - } -} - -blob -mark :163 -data 6914 -package azure - -import ( - "fmt" - "io" - "io/ioutil" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -// blockStorage is the interface required from a block storage service -// client implementation -type blockStorage interface { - CreateBlockBlob(container, blob string) error - GetBlob(container, blob string) (io.ReadCloser, error) - GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) - PutBlock(container, blob, blockID string, chunk []byte) error - GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) - PutBlockList(container, blob string, blocks []azure.Block) error -} - -// randomBlobWriter enables random access semantics on Azure block blobs -// by enabling writing arbitrary length of chunks to arbitrary write offsets -// within the blob. Normally, Azure Blob Storage does not support random -// access semantics on block blobs; however, this writer can download, split and -// reupload the overlapping blocks and discards those being overwritten entirely. -type randomBlobWriter struct { - bs blockStorage - blockSize int -} - -func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { - return randomBlobWriter{bs: bs, blockSize: blockSize} -} - -// WriteBlobAt writes the given chunk to the specified position of an existing blob. -// The offset must be equals to size of the blob or smaller than it. -func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { - rand := newBlockIDGenerator() - - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - rand.Feed(blocks) // load existing block IDs - - // Check for write offset for existing blob - size := getBlobSize(blocks) - if offset < 0 || offset > size { - return 0, fmt.Errorf("wrong offset for Write: %v", offset) - } - - // Upload the new chunk as blocks - blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) - if err != nil { - return 0, err - } - - // For non-append operations, existing blocks may need to be splitted - if offset != size { - // Split the block on the left end (if any) - leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) - if err != nil { - return 0, err - } - blockList = append(leftBlocks, blockList...) - - // Split the block on the right end (if any) - rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) - if err != nil { - return 0, err - } - blockList = append(blockList, rightBlocks...) - } else { - // Use existing block list - var existingBlocks []azure.Block - for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) - } - blockList = append(existingBlocks, blockList...) - } - // Put block list - return nn, r.bs.PutBlockList(container, blob, blockList) -} - -func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - return getBlobSize(blocks), nil -} - -// writeChunkToBlocks writes given chunk to one or multiple blocks within specified -// blob and returns their block representations. Those blocks are not committed, yet -func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { - var newBlocks []azure.Block - var nn int64 - - // Read chunks of at most size N except the last chunk to - // maximize block size and minimize block count. - buf := make([]byte, r.blockSize) - for { - n, err := io.ReadFull(chunk, buf) - if err == io.EOF { - break - } - nn += int64(n) - data := buf[:n] - blockID := rand.Generate() - if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { - return newBlocks, nn, err - } - newBlocks = append(newBlocks, azure.Block{Id: blockID, Status: azure.BlockStatusUncommitted}) - } - return newBlocks, nn, nil -} - -// blocksLeftSide returns the blocks that are going to be at the left side of -// the writeOffset: [0, writeOffset) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { - var left []azure.Block - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return left, err - } - - o := writeOffset - elapsed := int64(0) - for _, v := range bx.CommittedBlocks { - blkSize := int64(v.Size) - if o >= blkSize { // use existing block - left = append(left, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) - o -= blkSize - elapsed += blkSize - } else if o > 0 { // current block needs to be splitted - start := elapsed - size := o - part, err := r.bs.GetSectionReader(container, blob, start, size) - if err != nil { - return left, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return left, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return left, err - } - left = append(left, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) - break - } - } - return left, nil -} - -// blocksRightSide returns the blocks that are going to be at the right side of -// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { - var right []azure.Block - - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return nil, err - } - - re := writeOffset + chunkSize - 1 // right end of written chunk - var elapsed int64 - for _, v := range bx.CommittedBlocks { - var ( - bs = elapsed // left end of current block - be = elapsed + int64(v.Size) - 1 // right end of current block - ) - - if bs > re { // take the block as is - right = append(right, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) - } else if be > re { // current block needs to be splitted - part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) - if err != nil { - return right, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return right, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return right, err - } - right = append(right, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) - } - elapsed += int64(v.Size) - } - return right, nil -} - -func getBlobSize(blocks azure.BlockListResponse) int64 { - var n int64 - for _, v := range blocks.CommittedBlocks { - n += int64(v.Size) - } - return n -} - -blob -mark :164 -data 11679 -package azure - -import ( - "bytes" - "io" - "io/ioutil" - "math/rand" - "reflect" - "strings" - "testing" - - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" -) - -func TestRandomWriter_writeChunkToBlocks(t *testing.T) { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 3) - rand := newBlockIDGenerator() - c := []byte("AAABBBCCCD") - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) - if err != nil { - t.Fatal(err) - } - if expected := int64(len(c)); nn != expected { - t.Fatalf("wrong nn:%v, expected:%v", nn, expected) - } - if expected := 4; len(bw) != expected { - t.Fatal("unexpected written block count") - } - - bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) - if err != nil { - t.Fatal(err) - } - if expected := 0; len(bx.CommittedBlocks) != expected { - t.Fatal("unexpected committed block count") - } - if expected := 4; len(bx.UncommittedBlocks) != expected { - t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) - } - - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - assertBlobContents(t, r, c) -} - -func TestRandomWriter_blocksLeftSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, "", []azure.BlockStatus{}}, // write to beginning, discard all - {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change - {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 - {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block - {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block - {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_blocksRightSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - size int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob - {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block - {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block - {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains - {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block - {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block - {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index - {13, 20, "", []azure.BlockStatus{}}, // append to the end - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_Write_NewBlob(t *testing.T) { - var ( - s = NewStorageSimulator() - rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks - blob = randomContents(1024 * 7) // 7 KB blob - ) - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(blob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if len(bx.CommittedBlocks) != 3 { - t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) - } - - // Replace first 512 bytes - leftChunk := randomContents(512) - blob = append(leftChunk, blob[512:]...) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(leftChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 4; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace last 512 bytes with 1024 bytes - rightChunk := randomContents(1024) - offset := int64(len(blob) - 512) - blob = append(blob[:offset], rightChunk...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(rightChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 5; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace 2K-4K (overlaps 2 blocks from L/R) - newChunk := randomContents(1024 * 2) - offset = 1024 * 2 - blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 6; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } - - // Replace the entire blob - newBlob := randomContents(1024 * 30) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newBlob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, newBlob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 10; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { - t.Fatalf("committed block size does not indicate blob size") - } -} - -func Test_getBlobSize(t *testing.T) { - // with some committed blocks - if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } - - // with no committed blocks - if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ - UncommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } -} - -func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { - out, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(out, expected) { - t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) - } -} - -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) - } - return b -} - -blob -mark :165 -data 1370 -package azure - -import ( - "bytes" - "io" -) - -type blockBlobWriter interface { - GetSize(container, blob string) (int64, error) - WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) -} - -// zeroFillWriter enables writing to an offset outside a block blob's size -// by offering the chunk to the underlying writer as a contiguous data with -// the gap in between filled with NUL (zero) bytes. -type zeroFillWriter struct { - blockBlobWriter -} - -func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { - w := zeroFillWriter{} - w.blockBlobWriter = b - return w -} - -// Write writes the given chunk to the specified existing blob even though -// offset is out of blob's size. The gaps are filled with zeros. Returned -// written number count does not include zeros written. -func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { - size, err := z.blockBlobWriter.GetSize(container, blob) - if err != nil { - return 0, err - } - - var reader io.Reader - var zeroPadding int64 - if offset <= size { - reader = chunk - } else { - zeroPadding = offset - size - offset = size // adjust offset to be the append index - zeros := bytes.NewReader(make([]byte, zeroPadding)) - reader = io.MultiReader(zeros, chunk) - } - - nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) - nn -= zeroPadding - return nn, err -} - -blob -mark :166 -data 3727 -package azure - -import ( - "bytes" - "testing" -) - -func Test_zeroFillWrite_AppendNoGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*1) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(firstChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, secondChunk...)) - } - -} - -func Test_zeroFillWrite_StartWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - chunk := randomContents(1024 * 5) - padding := int64(1024*2 + 256) - if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(chunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(make([]byte, padding), chunk...)) - } -} - -func Test_zeroFillWrite_AppendWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - padding := int64(1024 * 4) - if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) - } -} - -func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024 * 3) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - // in this case, zerofill won't be used - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) - } -} - -blob -mark :167 -data 5233 -// Package base provides a base implementation of the storage driver that can -// be used to implement common checks. The goal is to increase the amount of -// code sharing. -// -// The canonical approach to use this class is to embed in the exported driver -// struct such that calls are proxied through this implementation. First, -// declare the internal driver, as follows: -// -// type driver struct { ... internal ...} -// -// The resulting type should implement StorageDriver such that it can be the -// target of a Base struct. The exported type can then be declared as follows: -// -// type Driver struct { -// Base -// } -// -// Because Driver embeds Base, it effectively implements Base. If the driver -// needs to intercept a call, before going to base, Driver should implement -// that method. Effectively, Driver can intercept calls before coming in and -// driver implements the actual logic. -// -// To further shield the embed from other packages, it is recommended to -// employ a private embed struct: -// -// type baseEmbed struct { -// base.Base -// } -// -// Then, declare driver to embed baseEmbed, rather than Base directly: -// -// type Driver struct { -// baseEmbed -// } -// -// The type now implements StorageDriver, proxying through Base, without -// exporting an unnessecary field. -package base - -import ( - "io" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// Base provides a wrapper around a storagedriver implementation that provides -// common path and bounds checking. -type Base struct { - storagedriver.StorageDriver -} - -// GetContent wraps GetContent of underlying storage driver. -func (base *Base) GetContent(path string) ([]byte, error) { - _, done := context.WithTrace(context.Background()) - defer done("Base.GetContent") - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.GetContent(path) -} - -// PutContent wraps PutContent of underlying storage driver. -func (base *Base) PutContent(path string, content []byte) error { - _, done := context.WithTrace(context.Background()) - defer done("Base.PutContent") - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.PutContent(path, content) -} - -// ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { - _, done := context.WithTrace(context.Background()) - defer done("Base.ReadStream") - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.ReadStream(path, offset) -} - -// WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { - _, done := context.WithTrace(context.Background()) - defer done("Base.WriteStream") - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return 0, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.WriteStream(path, offset, reader) -} - -// Stat wraps Stat of underlying storage driver. -func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { - _, done := context.WithTrace(context.Background()) - defer done("Base.Stat") - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.Stat(path) -} - -// List wraps List of underlying storage driver. -func (base *Base) List(path string) ([]string, error) { - _, done := context.WithTrace(context.Background()) - defer done("Base.List") - - if !storagedriver.PathRegexp.MatchString(path) && path != "/" { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.List(path) -} - -// Move wraps Move of underlying storage driver. -func (base *Base) Move(sourcePath string, destPath string) error { - _, done := context.WithTrace(context.Background()) - defer done("Base.Move") - - if !storagedriver.PathRegexp.MatchString(sourcePath) { - return storagedriver.InvalidPathError{Path: sourcePath} - } else if !storagedriver.PathRegexp.MatchString(destPath) { - return storagedriver.InvalidPathError{Path: destPath} - } - - return base.StorageDriver.Move(sourcePath, destPath) -} - -// Delete wraps Delete of underlying storage driver. -func (base *Base) Delete(path string) error { - _, done := context.WithTrace(context.Background()) - defer done("Base.Move") - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.Delete(path) -} - -// URLFor wraps URLFor of underlying storage driver. -func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { - _, done := context.WithTrace(context.Background()) - defer done("Base.URLFor") - - if !storagedriver.PathRegexp.MatchString(path) { - return "", storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.URLFor(path, options) -} - -blob -mark :168 -data 2716 -package factory - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// driverFactories stores an internal mapping between storage driver names and their respective -// factories -var driverFactories = make(map[string]StorageDriverFactory) - -// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces -// Storage drivers should call Register() with a factory to make the driver available by name -type StorageDriverFactory interface { - // Create returns a new storagedriver.StorageDriver with the given parameters - // Parameters will vary by driver and may be ignored - // Each parameter key must only consist of lowercase letters and numbers - Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) -} - -// Register makes a storage driver available by the provided name. -// If Register is called twice with the same name or if driver factory is nil, it panics. -func Register(name string, factory StorageDriverFactory) { - if factory == nil { - panic("Must not provide nil StorageDriverFactory") - } - _, registered := driverFactories[name] - if registered { - panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) - } - - driverFactories[name] = factory -} - -// Create a new storagedriver.StorageDriver with the given name and parameters -// To run in-process, the StorageDriverFactory must first be registered with the given name -// If no in-process drivers are found with the given name, this attempts to create an IPC driver -// If no in-process or external drivers are found, an InvalidStorageDriverError is returned -func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - driverFactory, ok := driverFactories[name] - if !ok { - return nil, InvalidStorageDriverError{name} - - // NOTE(stevvooe): We are disabling storagedriver ipc for now, as the - // server and client need to be updated for the changed API calls and - // there were some problems libchan hanging. We'll phase this - // functionality back in over the next few weeks. - - // No registered StorageDriverFactory found, try ipc - // driverClient, err := ipc.NewDriverClient(name, parameters) - // if err != nil { - // return nil, InvalidStorageDriverError{name} - // } - // err = driverClient.Start() - // if err != nil { - // return nil, err - // } - // return driverClient, nil - } - return driverFactory.Create(parameters) -} - -// InvalidStorageDriverError records an attempt to construct an unregistered storage driver -type InvalidStorageDriverError struct { - Name string -} - -func (err InvalidStorageDriverError) Error() string { - return fmt.Sprintf("StorageDriver not registered: %s", err.Name) -} - -blob -mark :169 -data 2652 -package driver - -import "time" - -// FileInfo returns information about a given path. Inspired by os.FileInfo, -// it elides the base name method for a full path instead. -type FileInfo interface { - // Path provides the full path of the target of this file info. - Path() string - - // Size returns current length in bytes of the file. The return value can - // be used to write to the end of the file at path. The value is - // meaningless if IsDir returns true. - Size() int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime() time.Time - - // IsDir returns true if the path is a directory. - IsDir() bool -} - -// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal -// should only be used by storagedriver implementations. They should moved to -// a "driver" package, similar to database/sql. - -// FileInfoFields provides the exported fields for implementing FileInfo -// interface in storagedriver implementations. It should be used with -// InternalFileInfo. -type FileInfoFields struct { - // Path provides the full path of the target of this file info. - Path string - - // Size is current length in bytes of the file. The value of this field - // can be used to write to the end of the file at path. The value is - // meaningless if IsDir is set to true. - Size int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime time.Time - - // IsDir returns true if the path is a directory. - IsDir bool -} - -// FileInfoInternal implements the FileInfo interface. This should only be -// used by storagedriver implementations that don't have a specialized -// FileInfo type. -type FileInfoInternal struct { - FileInfoFields -} - -var _ FileInfo = FileInfoInternal{} -var _ FileInfo = &FileInfoInternal{} - -// Path provides the full path of the target of this file info. -func (fi FileInfoInternal) Path() string { - return fi.FileInfoFields.Path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi FileInfoInternal) Size() int64 { - return fi.FileInfoFields.Size -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi FileInfoInternal) ModTime() time.Time { - return fi.FileInfoFields.ModTime -} - -// IsDir returns true if the path is a directory. -func (fi FileInfoInternal) IsDir() bool { - return fi.FileInfoFields.IsDir -} - -blob -mark :170 -data 7228 -package filesystem - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "time" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "filesystem" -const defaultRootDirectory = "/tmp/registry/storage" - -func init() { - factory.Register(driverName, &filesystemDriverFactory{}) -} - -// filesystemDriverFactory implements the factory.StorageDriverFactory interface -type filesystemDriverFactory struct{} - -func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters), nil -} - -type driver struct { - rootDirectory string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local -// filesystem. All provided paths will be subpaths of the RootDirectory. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Optional Parameters: -// - rootdirectory -func FromParameters(parameters map[string]interface{}) *Driver { - var rootDirectory = defaultRootDirectory - if parameters != nil { - rootDir, ok := parameters["rootdirectory"] - if ok { - rootDirectory = fmt.Sprint(rootDir) - } - } - return New(rootDirectory) -} - -// New constructs a new Driver with a given rootDirectory -func New(rootDirectory string) *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - rootDirectory: rootDirectory, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { - rc, err := d.ReadStream(path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(subPath string, contents []byte) error { - if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return os.Truncate(d.fullPath(subPath), int64(len(contents))) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { - file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return nil, err - } - - seekPos, err := file.Seek(int64(offset), os.SEEK_SET) - if err != nil { - file.Close() - return nil, err - } else if seekPos < int64(offset) { - file.Close() - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return file, nil -} - -// WriteStream stores the contents of the provided io.Reader at a location -// designated by the given path. -func (d *driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) { - // TODO(stevvooe): This needs to be a requirement. - // if !path.IsAbs(subPath) { - // return fmt.Errorf("absolute path required: %q", subPath) - // } - - fullPath := d.fullPath(subPath) - parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0755); err != nil { - return 0, err - } - - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - // TODO(stevvooe): A few missing conditions in storage driver: - // 1. What if the path is already a directory? - // 2. Should number 1 be exposed explicitly in storagedriver? - // 2. Can this path not exist, even if we create above? - return 0, err - } - defer fp.Close() - - nn, err = fp.Seek(offset, os.SEEK_SET) - if err != nil { - return 0, err - } - - if nn != offset { - return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) - } - - return io.Copy(fp, reader) -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(subPath string) (storagedriver.FileInfo, error) { - fullPath := d.fullPath(subPath) - - fi, err := os.Stat(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - - return nil, err - } - - return fileInfo{ - path: subPath, - FileInfo: fi, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(subPath string) ([]string, error) { - if subPath[len(subPath)-1] != '/' { - subPath += "/" - } - fullPath := d.fullPath(subPath) - - dir, err := os.Open(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - return nil, err - } - - defer dir.Close() - - fileNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - - keys := make([]string, 0, len(fileNames)) - for _, fileName := range fileNames { - keys = append(keys, path.Join(subPath, fileName)) - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(sourcePath string, destPath string) error { - source := d.fullPath(sourcePath) - dest := d.fullPath(destPath) - - if _, err := os.Stat(source); os.IsNotExist(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - - if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { - return err - } - - err := os.Rename(source, dest) - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(subPath string) error { - fullPath := d.fullPath(subPath) - - _, err := os.Stat(fullPath) - if err != nil && !os.IsNotExist(err) { - return err - } else if err != nil { - return storagedriver.PathNotFoundError{Path: subPath} - } - - err = os.RemoveAll(fullPath) - return err -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod -} - -// fullPath returns the absolute path of a key within the Driver's storage. -func (d *driver) fullPath(subPath string) string { - return path.Join(d.rootDirectory, subPath) -} - -type fileInfo struct { - os.FileInfo - path string -} - -var _ storagedriver.FileInfo = fileInfo{} - -// Path provides the full path of the target of this file info. -func (fi fileInfo) Path() string { - return fi.path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi fileInfo) Size() int64 { - if fi.IsDir() { - return 0 - } - - return fi.FileInfo.Size() -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi fileInfo) ModTime() time.Time { - return fi.FileInfo.ModTime() -} - -// IsDir returns true if the path is a directory. -func (fi fileInfo) IsDir() bool { - return fi.FileInfo.IsDir() -} - -blob -mark :171 -data 752 -package filesystem - -import ( - "io/ioutil" - "os" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -func init() { - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { - return New(root), nil - }, testsuites.NeverSkip) - - // BUG(stevvooe): IPC is broken so we're disabling for now. Will revisit later. - // testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": root}, testsuites.NeverSkip) -} - -blob -mark :172 -data 6221 -package inmemory - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "sync" - "time" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "inmemory" - -func init() { - factory.Register(driverName, &inMemoryDriverFactory{}) -} - -// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. -type inMemoryDriverFactory struct{} - -func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return New(), nil -} - -type driver struct { - root *dir - mutex sync.RWMutex -} - -// baseEmbed allows us to hide the Base embed. -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local map. -// Intended solely for example and testing purposes. -type Driver struct { - baseEmbed // embedded, hidden base driver. -} - -var _ storagedriver.StorageDriver = &Driver{} - -// New constructs a new Driver. -func New() *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - root: &dir{ - common: common{ - p: "/", - mod: time.Now(), - }, - }, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface. - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - rc, err := d.ReadStream(path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - return ioutil.ReadAll(rc) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(p string, contents []byte) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - f, err := d.root.mkfile(p) - if err != nil { - // TODO(stevvooe): Again, we need to clarify when this is not a - // directory in StorageDriver API. - return fmt.Errorf("not a file") - } - - f.truncate() - f.WriteAt(contents, 0) - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - path = normalize(path) - found := d.root.find(path) - - if found.path() != path { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - if found.isdir() { - return nil, fmt.Errorf("%q is a directory", path) - } - - return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { - d.mutex.Lock() - defer d.mutex.Unlock() - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - normalized := normalize(path) - - f, err := d.root.mkfile(normalized) - if err != nil { - return 0, fmt.Errorf("not a file") - } - - // Unlock while we are reading from the source, in case we are reading - // from the same mfs instance. This can be fixed by a more granular - // locking model. - d.mutex.Unlock() - d.mutex.RLock() // Take the readlock to block other writers. - var buf bytes.Buffer - - nn, err = buf.ReadFrom(reader) - if err != nil { - // TODO(stevvooe): This condition is odd and we may need to clarify: - // we've read nn bytes from reader but have written nothing to the - // backend. What is the correct return value? Really, the caller needs - // to know that the reader has been advanced and reattempting the - // operation is incorrect. - d.mutex.RUnlock() - d.mutex.Lock() - return nn, err - } - - d.mutex.RUnlock() - d.mutex.Lock() - f.WriteAt(buf.Bytes(), offset) - return nn, err -} - -// Stat returns info about the provided path. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - found := d.root.find(path) - - if found.path() != normalized { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - fi := storagedriver.FileInfoFields{ - Path: path, - IsDir: found.isdir(), - ModTime: found.modtime(), - } - - if !fi.IsDir { - fi.Size = int64(len(found.(*file).data)) - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(path string) ([]string, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - - found := d.root.find(normalized) - - if !found.isdir() { - return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... - } - - entries, err := found.(*dir).list(normalized) - - if err != nil { - switch err { - case errNotExists: - return nil, storagedriver.PathNotFoundError{Path: path} - case errIsNotDir: - return nil, fmt.Errorf("not a directory") - default: - return nil, err - } - } - - return entries, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(sourcePath string, destPath string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) - - err := d.root.move(normalizedSrc, normalizedDst) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: destPath} - default: - return err - } -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalized := normalize(path) - - err := d.root.delete(normalized) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: path} - default: - return err - } -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod -} - -blob -mark :173 -data 676 -package inmemory - -import ( - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -func init() { - inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(), nil - } - testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) - - // BUG(stevvooe): Disable flaky IPC tests for now when we can troubleshoot - // the problems with libchan. - // testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) -} - -blob -mark :174 -data 6120 -package inmemory - -import ( - "fmt" - "io" - "path" - "sort" - "strings" - "time" -) - -var ( - errExists = fmt.Errorf("exists") - errNotExists = fmt.Errorf("notexists") - errIsNotDir = fmt.Errorf("notdir") - errIsDir = fmt.Errorf("isdir") -) - -type node interface { - name() string - path() string - isdir() bool - modtime() time.Time -} - -// dir is the central type for the memory-based storagedriver. All operations -// are dispatched from a root dir. -type dir struct { - common - - // TODO(stevvooe): Use sorted slice + search. - children map[string]node -} - -var _ node = &dir{} - -func (d *dir) isdir() bool { - return true -} - -// add places the node n into dir d. -func (d *dir) add(n node) { - if d.children == nil { - d.children = make(map[string]node) - } - - d.children[n.name()] = n - d.mod = time.Now() -} - -// find searches for the node, given path q in dir. If the node is found, it -// will be returned. If the node is not found, the closet existing parent. If -// the node is found, the returned (node).path() will match q. -func (d *dir) find(q string) node { - q = strings.Trim(q, "/") - i := strings.Index(q, "/") - - if q == "" { - return d - } - - if i == 0 { - panic("shouldn't happen, no root paths") - } - - var component string - if i < 0 { - // No more path components - component = q - } else { - component = q[:i] - } - - child, ok := d.children[component] - if !ok { - // Node was not found. Return p and the current node. - return d - } - - if child.isdir() { - // traverse down! - q = q[i+1:] - return child.(*dir).find(q) - } - - return child -} - -func (d *dir) list(p string) ([]string, error) { - n := d.find(p) - - if n.path() != p { - return nil, errNotExists - } - - if !n.isdir() { - return nil, errIsNotDir - } - - var children []string - for _, child := range n.(*dir).children { - children = append(children, child.path()) - } - - sort.Strings(children) - return children, nil -} - -// mkfile or return the existing one. returns an error if it exists and is a -// directory. Essentially, this is open or create. -func (d *dir) mkfile(p string) (*file, error) { - n := d.find(p) - if n.path() == p { - if n.isdir() { - return nil, errIsDir - } - - return n.(*file), nil - } - - dirpath, filename := path.Split(p) - // Make any non-existent directories - n, err := d.mkdirs(dirpath) - if err != nil { - return nil, err - } - - dd := n.(*dir) - n = &file{ - common: common{ - p: path.Join(dd.path(), filename), - mod: time.Now(), - }, - } - - dd.add(n) - return n.(*file), nil -} - -// mkdirs creates any missing directory entries in p and returns the result. -func (d *dir) mkdirs(p string) (*dir, error) { - p = normalize(p) - - n := d.find(p) - - if !n.isdir() { - // Found something there - return nil, errIsNotDir - } - - if n.path() == p { - return n.(*dir), nil - } - - dd := n.(*dir) - - relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") - - if relative == "" { - return dd, nil - } - - components := strings.Split(relative, "/") - for _, component := range components { - d, err := dd.mkdir(component) - - if err != nil { - // This should actually never happen, since there are no children. - return nil, err - } - dd = d - } - - return dd, nil -} - -// mkdir creates a child directory under d with the given name. -func (d *dir) mkdir(name string) (*dir, error) { - if name == "" { - return nil, fmt.Errorf("invalid dirname") - } - - _, ok := d.children[name] - if ok { - return nil, errExists - } - - child := &dir{ - common: common{ - p: path.Join(d.path(), name), - mod: time.Now(), - }, - } - d.add(child) - d.mod = time.Now() - - return child, nil -} - -func (d *dir) move(src, dst string) error { - dstDirname, _ := path.Split(dst) - - dp, err := d.mkdirs(dstDirname) - if err != nil { - return err - } - - srcDirname, srcFilename := path.Split(src) - sp := d.find(srcDirname) - - if normalize(srcDirname) != normalize(sp.path()) { - return errNotExists - } - - spd, ok := sp.(*dir) - if !ok { - return errIsNotDir // paranoid. - } - - s, ok := spd.children[srcFilename] - if !ok { - return errNotExists - } - - delete(spd.children, srcFilename) - - switch n := s.(type) { - case *dir: - n.p = dst - case *file: - n.p = dst - } - - dp.add(s) - - return nil -} - -func (d *dir) delete(p string) error { - dirname, filename := path.Split(p) - parent := d.find(dirname) - - if normalize(dirname) != normalize(parent.path()) { - return errNotExists - } - - if _, ok := parent.(*dir).children[filename]; !ok { - return errNotExists - } - - delete(parent.(*dir).children, filename) - return nil -} - -// dump outputs a primitive directory structure to stdout. -func (d *dir) dump(indent string) { - fmt.Println(indent, d.name()+"/") - - for _, child := range d.children { - if child.isdir() { - child.(*dir).dump(indent + "\t") - } else { - fmt.Println(indent, child.name()) - } - - } -} - -func (d *dir) String() string { - return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) -} - -// file stores actual data in the fs tree. It acts like an open, seekable file -// where operations are conducted through ReadAt and WriteAt. Use it with -// SectionReader for the best effect. -type file struct { - common - data []byte -} - -var _ node = &file{} - -func (f *file) isdir() bool { - return false -} - -func (f *file) truncate() { - f.data = f.data[:0] -} - -func (f *file) sectionReader(offset int64) io.Reader { - return io.NewSectionReader(f, offset, int64(len(f.data))-offset) -} - -func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { - return copy(p, f.data[offset:]), nil -} - -func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { - off := int(offset) - if cap(f.data) < off+len(p) { - data := make([]byte, len(f.data), off+len(p)) - copy(data, f.data) - f.data = data - } - - f.mod = time.Now() - f.data = f.data[:off+len(p)] - - return copy(f.data[off:off+len(p)], p), nil -} - -func (f *file) String() string { - return fmt.Sprintf("&file{path: %q}", f.p) -} - -// common provides shared fields and methods for node implementations. -type common struct { - p string - mod time.Time -} - -func (c *common) name() string { - _, name := path.Split(c.p) - return name -} - -func (c *common) path() string { - return c.p -} - -func (c *common) modtime() time.Time { - return c.mod -} - -func normalize(p string) string { - return "/" + strings.Trim(p, "/") -} - -blob -mark :175 -data 11640 -// +build ignore - -package ipc - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "syscall" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" - "github.com/docker/libchan/spdy" -) - -// StorageDriverExecutablePrefix is the prefix which the IPC storage driver -// loader expects driver executables to begin with. For example, the s3 driver -// should be named "registry-storagedriver-s3". -const StorageDriverExecutablePrefix = "registry-storagedriver-" - -// StorageDriverClient is a storagedriver.StorageDriver implementation using a -// managed child process communicating over IPC using libchan with a unix domain -// socket -type StorageDriverClient struct { - subprocess *exec.Cmd - exitChan chan error - exitErr error - stopChan chan struct{} - socket *os.File - transport *spdy.Transport - sender libchan.Sender - version storagedriver.Version -} - -// NewDriverClient constructs a new out-of-process storage driver using the -// driver name and configuration parameters -// A user must call Start on this driver client before remote method calls can -// be made -// -// Looks for drivers in the following locations in order: -// - Storage drivers directory (to be determined, yet not implemented) -// - $GOPATH/bin -// - $PATH -func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) { - paramsBytes, err := json.Marshal(parameters) - if err != nil { - return nil, err - } - - driverExecName := StorageDriverExecutablePrefix + name - driverPath, err := exec.LookPath(driverExecName) - if err != nil { - return nil, err - } - - command := exec.Command(driverPath, string(paramsBytes)) - - return &StorageDriverClient{ - subprocess: command, - }, nil -} - -// Start starts the designated child process storage driver and binds a socket -// to this process for IPC method calls -func (driver *StorageDriverClient) Start() error { - driver.exitErr = nil - driver.exitChan = make(chan error) - driver.stopChan = make(chan struct{}) - - fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) - if err != nil { - return err - } - - childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket") - driver.socket = os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") - - driver.subprocess.Stdout = os.Stdout - driver.subprocess.Stderr = os.Stderr - driver.subprocess.ExtraFiles = []*os.File{childSocket} - - if err = driver.subprocess.Start(); err != nil { - driver.Stop() - return err - } - - go driver.handleSubprocessExit() - - if err = childSocket.Close(); err != nil { - driver.Stop() - return err - } - - connection, err := net.FileConn(driver.socket) - if err != nil { - driver.Stop() - return err - } - driver.transport, err = spdy.NewClientTransport(connection) - if err != nil { - driver.Stop() - return err - } - driver.sender, err = driver.transport.NewSendChannel() - if err != nil { - driver.Stop() - return err - } - - // Check the driver's version to determine compatibility - receiver, remoteSender := libchan.Pipe() - err = driver.sender.Send(&Request{Type: "Version", ResponseChannel: remoteSender}) - if err != nil { - driver.Stop() - return err - } - - var response VersionResponse - err = receiver.Receive(&response) - if err != nil { - driver.Stop() - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - driver.version = response.Version - - if driver.version.Major() != storagedriver.CurrentVersion.Major() || driver.version.Minor() > storagedriver.CurrentVersion.Minor() { - return IncompatibleVersionError{driver.version} - } - - return nil -} - -// Stop stops the child process storage driver -// storagedriver.StorageDriver methods called after Stop will fail -func (driver *StorageDriverClient) Stop() error { - var closeSenderErr, closeTransportErr, closeSocketErr, killErr error - - if driver.sender != nil { - closeSenderErr = driver.sender.Close() - } - if driver.transport != nil { - closeTransportErr = driver.transport.Close() - } - if driver.socket != nil { - closeSocketErr = driver.socket.Close() - } - if driver.subprocess != nil { - killErr = driver.subprocess.Process.Kill() - } - if driver.stopChan != nil { - close(driver.stopChan) - } - - if closeSenderErr != nil { - return closeSenderErr - } else if closeTransportErr != nil { - return closeTransportErr - } else if closeSocketErr != nil { - return closeSocketErr - } - - return killErr -} - -// Implement the storagedriver.StorageDriver interface over IPC - -// GetContent retrieves the content stored at "path" as a []byte. -func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "GetContent", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ReadStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - defer response.Reader.Close() - contents, err := ioutil.ReadAll(response.Reader) - if err != nil { - return nil, err - } - return contents, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - - params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))} - err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(WriteStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (driver *StorageDriverClient) ReadStream(path string, offset int64) (io.ReadCloser, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset} - err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ReadStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - return response.Reader, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (driver *StorageDriverClient) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": reader} - err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(WriteStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// CurrentSize retrieves the curernt size in bytes of the object at the given -// path. -func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { - if err := driver.exited(); err != nil { - return 0, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return 0, err - } - - response := new(CurrentSizeResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return 0, err - } - - if response.Error != nil { - return 0, response.Error.Unwrap() - } - - return response.Position, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (driver *StorageDriverClient) List(path string) ([]string, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ListResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - return response.Keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath} - err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(MoveResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (driver *StorageDriverClient) Delete(path string) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(DeleteResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// handleSubprocessExit populates the exit channel until we have explicitly -// stopped the storage driver subprocess -// Requests can select on driver.exitChan and response receiving and not hang if -// the process exits -func (driver *StorageDriverClient) handleSubprocessExit() { - exitErr := driver.subprocess.Wait() - if exitErr == nil { - exitErr = fmt.Errorf("Storage driver subprocess already exited cleanly") - } else { - exitErr = fmt.Errorf("Storage driver subprocess exited with error: %s", exitErr) - } - - driver.exitErr = exitErr - - for { - select { - case driver.exitChan <- exitErr: - case <-driver.stopChan: - close(driver.exitChan) - return - } - } -} - -// receiveResponse populates the response value with the next result from the -// given receiver, or returns an error if receiving failed or the driver has -// stopped -func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { - receiveChan := make(chan error, 1) - go func(receiver libchan.Receiver, receiveChan chan<- error) { - receiveChan <- receiver.Receive(response) - }(receiver, receiveChan) - - var err error - var ok bool - select { - case err = <-receiveChan: - case err, ok = <-driver.exitChan: - if !ok { - err = driver.exitErr - } - } - - return err -} - -// exited returns an exit error if the driver has exited or nil otherwise -func (driver *StorageDriverClient) exited() error { - select { - case err, ok := <-driver.exitChan: - if !ok { - return driver.exitErr - } - return err - default: - return nil - } -} - -blob -mark :176 -data 4252 -// +build ignore - -package ipc - -import ( - "fmt" - "io" - "reflect" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" -) - -// StorageDriver is the interface which IPC storage drivers must implement. As external storage -// drivers may be defined to use a different version of the storagedriver.StorageDriver interface, -// we use an additional version check to determine compatiblity. -type StorageDriver interface { - // Version returns the storagedriver.StorageDriver interface version which this storage driver - // implements, which is used to determine driver compatibility - Version() (storagedriver.Version, error) -} - -// IncompatibleVersionError is returned when a storage driver is using an incompatible version of -// the storagedriver.StorageDriver api -type IncompatibleVersionError struct { - version storagedriver.Version -} - -func (e IncompatibleVersionError) Error() string { - return fmt.Sprintf("Incompatible storage driver version: %s", e.version) -} - -// Request defines a remote method call request -// A return value struct is to be sent over the ResponseChannel -type Request struct { - Type string `codec:",omitempty"` - Parameters map[string]interface{} `codec:",omitempty"` - ResponseChannel libchan.Sender `codec:",omitempty"` -} - -// ResponseError is a serializable error type. -// The Type and Parameters may be used to reconstruct the same error on the -// client side, falling back to using the Type and Message if this cannot be -// done. -type ResponseError struct { - Type string `codec:",omitempty"` - Message string `codec:",omitempty"` - Parameters map[string]interface{} `codec:",omitempty"` -} - -// WrapError wraps an error in a serializable struct containing the error's type -// and message. -func WrapError(err error) *ResponseError { - if err == nil { - return nil - } - v := reflect.ValueOf(err) - re := ResponseError{ - Type: v.Type().String(), - Message: err.Error(), - } - - if v.Kind() == reflect.Struct { - re.Parameters = make(map[string]interface{}) - for i := 0; i < v.NumField(); i++ { - field := v.Type().Field(i) - re.Parameters[field.Name] = v.Field(i).Interface() - } - } - return &re -} - -// Unwrap returns the underlying error if it can be reconstructed, or the -// original ResponseError otherwise. -func (err *ResponseError) Unwrap() error { - var errVal reflect.Value - var zeroVal reflect.Value - - switch err.Type { - case "storagedriver.PathNotFoundError": - errVal = reflect.ValueOf(&storagedriver.PathNotFoundError{}) - case "storagedriver.InvalidOffsetError": - errVal = reflect.ValueOf(&storagedriver.InvalidOffsetError{}) - } - if errVal == zeroVal { - return err - } - - for k, v := range err.Parameters { - fieldVal := errVal.Elem().FieldByName(k) - if fieldVal == zeroVal { - return err - } - fieldVal.Set(reflect.ValueOf(v)) - } - - if unwrapped, ok := errVal.Elem().Interface().(error); ok { - return unwrapped - } - - return err - -} - -func (err *ResponseError) Error() string { - return fmt.Sprintf("%s: %s", err.Type, err.Message) -} - -// IPC method call response object definitions - -// VersionResponse is a response for a Version request -type VersionResponse struct { - Version storagedriver.Version `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// ReadStreamResponse is a response for a ReadStream request -type ReadStreamResponse struct { - Reader io.ReadCloser `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// WriteStreamResponse is a response for a WriteStream request -type WriteStreamResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -// CurrentSizeResponse is a response for a CurrentSize request -type CurrentSizeResponse struct { - Position uint64 `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// ListResponse is a response for a List request -type ListResponse struct { - Keys []string `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// MoveResponse is a response for a Move request -type MoveResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -// DeleteResponse is a response for a Delete request -type DeleteResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -blob -mark :177 -data 5191 -// +build ignore - -package ipc - -import ( - "bytes" - "io" - "io/ioutil" - "net" - "os" - "reflect" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" - "github.com/docker/libchan/spdy" -) - -// StorageDriverServer runs a new IPC server handling requests for the given -// storagedriver.StorageDriver -// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in -// client.go -// -// To create a new out-of-process driver, create a main package which calls StorageDriverServer with -// a storagedriver.StorageDriver -func StorageDriverServer(driver storagedriver.StorageDriver) error { - childSocket := os.NewFile(3, "childSocket") - defer childSocket.Close() - conn, err := net.FileConn(childSocket) - if err != nil { - panic(err) - } - defer conn.Close() - if transport, err := spdy.NewServerTransport(conn); err != nil { - panic(err) - } else { - for { - receiver, err := transport.WaitReceiveChannel() - if err == io.EOF { - return nil - } else if err != nil { - panic(err) - } - go receive(driver, receiver) - } - } -} - -// receive receives new storagedriver.StorageDriver method requests and creates a new goroutine to -// handle each request -// Requests are expected to be of type ipc.Request as the parameters are unknown until the request -// type is deserialized -func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { - for { - var request Request - err := receiver.Receive(&request) - if err == io.EOF { - return - } else if err != nil { - panic(err) - } - go handleRequest(driver, request) - } -} - -// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go -// Responds to requests using the Request.ResponseChannel -func handleRequest(driver storagedriver.StorageDriver, request Request) { - switch request.Type { - case "Version": - err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion}) - if err != nil { - panic(err) - } - case "GetContent": - path, _ := request.Parameters["Path"].(string) - content, err := driver.GetContent(path) - var response ReadStreamResponse - if err != nil { - response = ReadStreamResponse{Error: WrapError(err)} - } else { - response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))} - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "PutContent": - path, _ := request.Parameters["Path"].(string) - reader, _ := request.Parameters["Reader"].(io.ReadCloser) - contents, err := ioutil.ReadAll(reader) - defer reader.Close() - if err == nil { - err = driver.PutContent(path, contents) - } - response := WriteStreamResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "ReadStream": - path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be convereted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - reader, err := driver.ReadStream(path, offset) - var response ReadStreamResponse - if err != nil { - response = ReadStreamResponse{Error: WrapError(err)} - } else { - response = ReadStreamResponse{Reader: reader} - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "WriteStream": - path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be convereted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - // Depending on serialization method, Size may be convereted to any int/uint type - size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() - reader, _ := request.Parameters["Reader"].(io.ReadCloser) - err := driver.WriteStream(path, offset, size, reader) - response := WriteStreamResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "CurrentSize": - path, _ := request.Parameters["Path"].(string) - position, err := driver.CurrentSize(path) - response := CurrentSizeResponse{ - Position: position, - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "List": - path, _ := request.Parameters["Path"].(string) - keys, err := driver.List(path) - response := ListResponse{ - Keys: keys, - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "Move": - sourcePath, _ := request.Parameters["SourcePath"].(string) - destPath, _ := request.Parameters["DestPath"].(string) - err := driver.Move(sourcePath, destPath) - response := MoveResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "Delete": - path, _ := request.Parameters["Path"].(string) - err := driver.Delete(path) - response := DeleteResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - default: - panic(request) - } -} - -blob -mark :178 -data 3651 -// Package middleware - cloudfront wrapper for storage libs -// N.B. currently only works with S3, not arbitrary sites -// -package middleware - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "time" - - "github.com/AdRoll/goamz/cloudfront" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" -) - -// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that -// constructs temporary signed CloudFront URLs from the storagedriver layer URL, -// then issues HTTP Temporary Redirects to this CloudFront content URL. -type cloudFrontStorageMiddleware struct { - storagedriver.StorageDriver - cloudfront *cloudfront.CloudFront - duration time.Duration -} - -var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} - -// newCloudFrontLayerHandler constructs and returns a new CloudFront -// LayerHandler implementation. -// Required options: baseurl, privatekey, keypairid -func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { - base, ok := options["baseurl"] - if !ok { - return nil, fmt.Errorf("No baseurl provided") - } - baseURL, ok := base.(string) - if !ok { - return nil, fmt.Errorf("baseurl must be a string") - } - pk, ok := options["privatekey"] - if !ok { - return nil, fmt.Errorf("No privatekey provided") - } - pkPath, ok := pk.(string) - if !ok { - return nil, fmt.Errorf("privatekey must be a string") - } - kpid, ok := options["keypairid"] - if !ok { - return nil, fmt.Errorf("No keypairid provided") - } - keypairID, ok := kpid.(string) - if !ok { - return nil, fmt.Errorf("keypairid must be a string") - } - - pkBytes, err := ioutil.ReadFile(pkPath) - if err != nil { - return nil, fmt.Errorf("Failed to read privatekey file: %s", err) - } - - block, _ := pem.Decode([]byte(pkBytes)) - if block == nil { - return nil, fmt.Errorf("Failed to decode private key as an rsa private key") - } - privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - - cf := cloudfront.New(baseURL, privateKey, keypairID) - - duration := 20 * time.Minute - d, ok := options["duration"] - if ok { - switch d := d.(type) { - case time.Duration: - duration = d - case string: - dur, err := time.ParseDuration(d) - if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) - } - duration = dur - } - } - - return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil -} - -// S3BucketKeyer is any type that is capable of returning the S3 bucket key -// which should be cached by AWS CloudFront. -type S3BucketKeyer interface { - S3BucketKey(path string) string -} - -// Resolve returns an http.Handler which can serve the contents of the given -// Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]interface{}) (string, error) { - // TODO(endophage): currently only supports S3 - keyer, ok := lh.StorageDriver.(S3BucketKeyer) - if !ok { - context.GetLogger(context.Background()).Warn("the CloudFront middleware does not support this backend storage driver") - return lh.StorageDriver.URLFor(path, options) - } - - cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) - if err != nil { - return "", err - } - return cfURL, nil -} - -// init registers the cloudfront layerHandler backend. -func init() { - storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) -} - -blob -mark :179 -data 1295 -package storagemiddleware - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// InitFunc is the type of a StorageMiddleware factory function and is -// used to register the constructor for different StorageMiddleware backends. -type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) - -var storageMiddlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a StorageMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if storageMiddlewares == nil { - storageMiddlewares = make(map[string]InitFunc) - } - if _, exists := storageMiddlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - storageMiddlewares[name] = initFunc - - return nil -} - -// Get constructs a StorageMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { - if storageMiddlewares != nil { - if initFunc, exists := storageMiddlewares[name]; exists { - return initFunc(storageDriver, options) - } - } - - return nil, fmt.Errorf("no storage middleware registered with name: %s", name) -} - -blob -mark :180 -data 19249 -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the AdRoll/goamz client library for interfacing with -// s3. -// -// Because s3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that s3 guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. -package s3 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strconv" - "strings" - "time" - - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "s3" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region aws.Region - Encrypt bool - Secure bool - V4Auth bool - ChunkSize int64 - RootDirectory string -} - -func init() { - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket *s3.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskey"] - if !ok { - accessKey = "" - } - secretKey, ok := parameters["secretkey"] - if !ok { - secretKey = "" - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := aws.GetRegion(fmt.Sprint(regionName)) - if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - v4AuthBool := false - v4Auth, ok := parameters["v4auth"] - if ok { - v4AuthBool, ok = v4Auth.(bool) - if !ok { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - chunkSize, ok = chunkSizeParam.(int64) - if !ok || chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize parameter should be a number that is larger than 5*1024*1024") - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - encryptBool, - secureBool, - v4AuthBool, - chunkSize, - fmt.Sprint(rootDirectory), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) - if err != nil { - return nil, err - } - - if !params.Secure { - params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) - } - - s3obj := s3.New(auth, params.Region) - bucket := s3obj.Bucket(params.Bucket) - - if params.V4Auth { - s3obj.Signature = aws.V4Signature - } else { - if params.Region.Name == "eu-central-1" { - return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") - } - } - - // Validate that the given credentials have at least read permissions in the - // given bucket scope. - if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { - return nil, err - } - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { - content, err := d.Bucket.Get(d.s3Path(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) - if err != nil { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []s3.Part{} - var part s3.Part - - multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := make([]byte, d.ChunkSize) - zeroBuf := make([]byte, d.ChunkSize) - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - // parts and partNumber are safe, because this function is the only one modifying them and we - // force it to be executed serially. - if bytesRead > 0 { - part, putErr := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if putErr != nil { - putErrChan <- putErr - } - - parts = append(parts, part) - partNumber++ - } - putErrChan <- nil - }(bytesRead, from, buf) - - buf = make([]byte, d.ChunkSize) - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.s3Path(path), nil) - if err != nil { - if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(zeroBuf).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(zeroBuf)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(path string) ([]string, error) { - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) - if err != nil { - return nil, err - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), - s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - s3Objects := make([]s3.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - s3Objects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*aws.Error) - return ok && s3err.Code == code -} - -func (d *driver) getOptions() s3.Options { - return s3.Options{SSE: d.Encrypt} -} - -func getPermissions() s3.ACL { - return s3.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -blob -mark :181 -data 3677 -package s3 - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/AdRoll/goamz/aws" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type S3DriverConstructor func(rootDirectory string) (*Driver, error) - -func init() { - accessKey := os.Getenv("AWS_ACCESS_KEY") - secretKey := os.Getenv("AWS_SECRET_KEY") - bucket := os.Getenv("S3_BUCKET") - encrypt := os.Getenv("S3_ENCRYPT") - secure := os.Getenv("S3_SECURE") - v4auth := os.Getenv("S3_USE_V4_AUTH") - region := os.Getenv("AWS_REGION") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - s3DriverConstructor := func(rootDirectory string) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := true - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - v4AuthBool := false - if v4auth != "" { - v4AuthBool, err = strconv.ParseBool(v4auth) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - aws.GetRegion(region), - encryptBool, - secureBool, - v4AuthBool, - minChunkSize, - rootDirectory, - } - - return New(parameters) - } - - // Skip S3 storage driver tests if environment variable parameters are not provided - skipCheck := func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" - } - return "" - } - - driverConstructor := func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root) - } - - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) - - // s3Constructor := func() (*Driver, error) { - // return s3DriverConstructor(aws.GetRegion(region)) - // } - - RegisterS3DriverSuite(s3DriverConstructor, skipCheck) - - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // "accesskey": accessKey, - // "secretkey": secretKey, - // "region": region.Name, - // "bucket": bucket, - // "encrypt": encrypt, - // }, skipCheck) - // } -} - -func RegisterS3DriverSuite(s3DriverConstructor S3DriverConstructor, skipCheck testsuites.SkipCheck) { - check.Suite(&S3DriverSuite{ - Constructor: s3DriverConstructor, - SkipCheck: skipCheck, - }) -} - -type S3DriverSuite struct { - Constructor S3DriverConstructor - testsuites.SkipCheck -} - -func (suite *S3DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } -} - -func (suite *S3DriverSuite) TestEmptyRootList(c *check.C) { - validRoot, err := ioutil.TempDir("", "driver-") - c.Assert(err, check.IsNil) - defer os.Remove(validRoot) - - rootedDriver, err := suite.Constructor(validRoot) - c.Assert(err, check.IsNil) - emptyRootDriver, err := suite.Constructor("") - c.Assert(err, check.IsNil) - slashRootDriver, err := suite.Constructor("/") - c.Assert(err, check.IsNil) - - filename := "/test" - contents := []byte("contents") - err = rootedDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - defer rootedDriver.Delete(filename) - - keys, err := emptyRootDriver.List("/") - for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) - } - - keys, err = slashRootDriver.List("/") - for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) - } -} - -blob -mark :182 -data 4176 -package driver - -import ( - "errors" - "fmt" - "io" - "regexp" - "strconv" - "strings" -) - -// Version is a string representing the storage driver version, of the form -// Major.Minor. -// The registry must accept storage drivers with equal major version and greater -// minor version, but may not be compatible with older storage driver versions. -type Version string - -// Major returns the major (primary) component of a version. -func (version Version) Major() uint { - majorPart := strings.Split(string(version), ".")[0] - major, _ := strconv.ParseUint(majorPart, 10, 0) - return uint(major) -} - -// Minor returns the minor (secondary) component of a version. -func (version Version) Minor() uint { - minorPart := strings.Split(string(version), ".")[1] - minor, _ := strconv.ParseUint(minorPart, 10, 0) - return uint(minor) -} - -// CurrentVersion is the current storage driver Version. -const CurrentVersion Version = "0.1" - -// StorageDriver defines methods that a Storage Driver must implement for a -// filesystem-like key/value object storage. -type StorageDriver interface { - // GetContent retrieves the content stored at "path" as a []byte. - // This should primarily be used for small objects. - GetContent(path string) ([]byte, error) - - // PutContent stores the []byte content at a location designated by "path". - // This should primarily be used for small objects. - PutContent(path string, content []byte) error - - // ReadStream retrieves an io.ReadCloser for the content stored at "path" - // with a given byte offset. - // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(path string, offset int64) (io.ReadCloser, error) - - // WriteStream stores the contents of the provided io.ReadCloser at a - // location designated by the given path. - // May be used to resume writing a stream by providing a nonzero offset. - // The offset must be no larger than the CurrentSize for this path. - WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) - - // Stat retrieves the FileInfo for the given path, including the current - // size in bytes and the creation time. - Stat(path string) (FileInfo, error) - - // List returns a list of the objects that are direct descendants of the - //given path. - List(path string) ([]string, error) - - // Move moves an object stored at sourcePath to destPath, removing the - // original object. - // Note: This may be no more efficient than a copy followed by a delete for - // many implementations. - Move(sourcePath string, destPath string) error - - // Delete recursively deletes all objects stored at "path" and its subpaths. - Delete(path string) error - - // URLFor returns a URL which may be used to retrieve the content stored at - // the given path, possibly using the given options. - // May return an ErrUnsupportedMethod in certain StorageDriver - // implementations. - URLFor(path string, options map[string]interface{}) (string, error) -} - -// PathRegexp is the regular expression which each file path must match. A -// file path is absolute, beginning with a slash and containing a positive -// number of path components separated by slashes, where each component is -// restricted to lowercase alphanumeric characters or a period, underscore, or -// hyphen. -var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) - -// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. -var ErrUnsupportedMethod = errors.New("unsupported method") - -// PathNotFoundError is returned when operating on a nonexistent path. -type PathNotFoundError struct { - Path string -} - -func (err PathNotFoundError) Error() string { - return fmt.Sprintf("Path not found: %s", err.Path) -} - -// InvalidPathError is returned when the provided path is malformed. -type InvalidPathError struct { - Path string -} - -func (err InvalidPathError) Error() string { - return fmt.Sprintf("Invalid path: %s", err.Path) -} - -// InvalidOffsetError is returned when attempting to read or write from an -// invalid offset. -type InvalidOffsetError struct { - Path string - Offset int64 -} - -func (err InvalidOffsetError) Error() string { - return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path) -} - -blob -mark :183 -data 37707 -package testsuites - -import ( - "bytes" - "crypto/sha1" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path" - "sort" - "sync" - "testing" - "time" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" -) - -// Test hooks up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -// RegisterInProcessSuite registers an in-process storage driver test suite with -// the go test runner. -func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { - check.Suite(&DriverSuite{ - Constructor: driverConstructor, - SkipCheck: skipCheck, - }) -} - -// RegisterIPCSuite registers a storage driver test suite which runs the named -// driver as a child process with the given parameters. -func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { - panic("ipc testing is disabled for now") - - // NOTE(stevvooe): IPC testing is disabled for now. Uncomment the code - // block before and remove the panic when we phase it back in. - - // suite := &DriverSuite{ - // Constructor: func() (storagedriver.StorageDriver, error) { - // d, err := ipc.NewDriverClient(driverName, ipcParams) - // if err != nil { - // return nil, err - // } - // err = d.Start() - // if err != nil { - // return nil, err - // } - // return d, nil - // }, - // SkipCheck: skipCheck, - // } - // suite.Teardown = func() error { - // if suite.StorageDriver == nil { - // return nil - // } - - // driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) - // return driverClient.Stop() - // } - // check.Suite(suite) -} - -// SkipCheck is a function used to determine if a test suite should be skipped. -// If a SkipCheck returns a non-empty skip reason, the suite is skipped with -// the given reason. -type SkipCheck func() (reason string) - -// NeverSkip is a default SkipCheck which never skips the suite. -var NeverSkip SkipCheck = func() string { return "" } - -// DriverConstructor is a function which returns a new -// storagedriver.StorageDriver. -type DriverConstructor func() (storagedriver.StorageDriver, error) - -// DriverTeardown is a function which cleans up a suite's -// storagedriver.StorageDriver. -type DriverTeardown func() error - -// DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. -// The intended way to create a DriverSuite is with RegisterInProcessSuite or -// RegisterIPCSuite. -type DriverSuite struct { - Constructor DriverConstructor - Teardown DriverTeardown - SkipCheck - storagedriver.StorageDriver -} - -// SetUpSuite sets up the gocheck test suite. -func (suite *DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } - d, err := suite.Constructor() - c.Assert(err, check.IsNil) - suite.StorageDriver = d -} - -// TearDownSuite tears down the gocheck test suite. -func (suite *DriverSuite) TearDownSuite(c *check.C) { - if suite.Teardown != nil { - err := suite.Teardown() - c.Assert(err, check.IsNil) - } -} - -// TearDownTest tears down the gocheck test. -// This causes the suite to abort if any files are left around in the storage -// driver. -func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List("/") - if len(files) > 0 { - c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) - } -} - -// TestValidPaths checks that various valid file paths are accepted by the -// storage driver. -func (suite *DriverSuite) TestValidPaths(c *check.C) { - contents := randomContents(64) - validFiles := []string{ - "/a", - "/2", - "/aa", - "/a.a", - "/0-9/abcdefg", - "/abcdefg/z.75", - "/abc/1.2.3.4.5-6_zyx/123.z/4", - "/docker/docker-registry", - "/123.abc", - "/abc./abc", - "/.abc", - "/a--b", - "/a-.b", - "/_.abc", - "/Docker/docker-registry", - "/Abc/Cba"} - - for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(filename, contents) - defer suite.StorageDriver.Delete(firstPart(filename)) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - } -} - -// TestInvalidPaths checks that various invalid file paths are rejected by the -// storage driver. -func (suite *DriverSuite) TestInvalidPaths(c *check.C) { - contents := randomContents(64) - invalidFiles := []string{ - "", - "/", - "abc", - "123.abc", - "//bcd", - "/abc_123/"} - - for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(filename, contents) - defer suite.StorageDriver.Delete(firstPart(filename)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - - _, err = suite.StorageDriver.GetContent(filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - } -} - -// TestWriteRead1 tests a simple write-read workflow. -func (suite *DriverSuite) TestWriteRead1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead2 tests a simple write-read workflow with unicode data. -func (suite *DriverSuite) TestWriteRead2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead3 tests a simple write-read workflow with a small string. -func (suite *DriverSuite) TestWriteRead3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead4 tests a simple write-read workflow with 1MB of data. -func (suite *DriverSuite) TestWriteRead4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompare(c, filename, contents) -} - -// TestTruncate tests that putting smaller contents than an original file does -// remove the excess contents. -func (suite *DriverSuite) TestTruncate(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) - - contents = randomContents(1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestReadNonexistent tests reading content from an empty path. -func (suite *DriverSuite) TestReadNonexistent(c *check.C) { - filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestWriteReadStreams1 tests a simple write-read streaming workflow. -func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams2 tests a simple write-read streaming workflow with -// unicode data. -func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams3 tests a simple write-read streaming workflow with a -// small amount of data. -func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB -// of data. -func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the -// storage driver safely. -func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) - - checksum := sha1.New() - var fileSize int64 = 5 * 1024 * 1024 * 1024 - - contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(filename, 0, io.TeeReader(contents, checksum)) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, fileSize) - - reader, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, check.IsNil) - - writtenChecksum := sha1.New() - io.Copy(writtenChecksum, reader) - - c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) -} - -// TestReadStreamWithOffset tests that the appropriate data is streamed when -// reading with a given offset. -func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { - filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) - - chunkSize := int64(32) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - - err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*2) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contentsChunk3) - - // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(filename, -1) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(reader, check.IsNil) - - // Read past the end of the content and make sure we get a reader that - // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) - c.Assert(err, check.IsNil) - defer reader.Close() - - buf := make([]byte, chunkSize) - n, err := reader.Read(buf) - c.Assert(err, check.Equals, io.EOF) - c.Assert(n, check.Equals, 0) - - // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3-1) - c.Assert(err, check.IsNil) - defer reader.Close() - - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 1) - - // We don't care whether the io.EOF comes on the this read or the first - // zero read, but the only error acceptable here is io.EOF. - if err != nil { - c.Assert(err, check.Equals, io.EOF) - } - - // Any more reads should result in zero bytes and io.EOF - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 0) - c.Assert(err, check.Equals, io.EOF) -} - -// TestContinueStreamAppendLarge tests that a stream write can be appended to without -// corrupting the data with a large chunk size. -func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { - suite.testContinueStreamAppend(c, int64(10*1024*1024)) -} - -// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only -// with a tiny chunk size in order to test corner cases for some cloud storage drivers. -func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { - suite.testContinueStreamAppend(c, int64(32)) -} - -func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { - filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - contentsChunk4 := randomContents(chunkSize) - zeroChunk := make([]byte, int64(chunkSize)) - - fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - - nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contentsChunk1)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - - fi, err := suite.StorageDriver.Stat(filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) - - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) - - received, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, fullContents) - - // Writing past size of file extends file (no offest error). We would like - // to write chunk 4 one chunk length past chunk 3. It should be successful - // and the resulting file will be 5 chunks long, with a chunk of all - // zeros. - - fullContents = append(fullContents, zeroChunk...) - fullContents = append(fullContents, contentsChunk4...) - - nn, err = suite.StorageDriver.WriteStream(filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, chunkSize) - - fi, err = suite.StorageDriver.Stat(filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - - received, err = suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - c.Assert(len(received), check.Equals, len(fullContents)) - c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) - c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) - c.Assert(received, check.DeepEquals, fullContents) - - // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(filename, -1, bytes.NewReader(zeroChunk)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) -} - -// TestReadNonexistentStream tests that reading a stream for a nonexistent path -// fails. -func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { - filename := randomPath(32) - - _, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.ReadStream(filename, 64) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestList checks the returned list of keys after populating a directory tree. -func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(rootDirectory) - - parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles := make([]string, 50) - for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles[i] = childFile - err := suite.StorageDriver.PutContent(childFile, randomContents(32)) - c.Assert(err, check.IsNil) - } - sort.Strings(childFiles) - - keys, err := suite.StorageDriver.List("/") - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - - keys, err = suite.StorageDriver.List(rootDirectory) - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - - keys, err = suite.StorageDriver.List(parentDirectory) - c.Assert(err, check.IsNil) - - sort.Strings(keys) - c.Assert(keys, check.DeepEquals, childFiles) - - // A few checks to add here (check out #819 for more discussion on this): - // 1. Ensure that all paths are absolute. - // 2. Ensure that listings only include direct children. - // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). -} - -// TestMove checks that a moved object no longer exists at the source path and -// does exist at the destination. -func (suite *DriverSuite) TestMove(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.StorageDriver.Delete(firstPart(sourcePath)) - defer suite.StorageDriver.Delete(firstPart(destPath)) - - err := suite.StorageDriver.PutContent(sourcePath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - - _, err = suite.StorageDriver.GetContent(sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestMoveOverwrite checks that a moved object no longer exists at the source -// path and overwrites the contents at the destination. -func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { - sourcePath := randomPath(32) - destPath := randomPath(32) - sourceContents := randomContents(32) - destContents := randomContents(64) - - defer suite.StorageDriver.Delete(firstPart(sourcePath)) - defer suite.StorageDriver.Delete(firstPart(destPath)) - - err := suite.StorageDriver.PutContent(sourcePath, sourceContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(destPath, destContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, sourceContents) - - _, err = suite.StorageDriver.GetContent(sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestMoveNonexistent checks that moving a nonexistent key fails and does not -// delete the data at the destination path. -func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.StorageDriver.Delete(firstPart(destPath)) - - err := suite.StorageDriver.PutContent(destPath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(sourcePath, destPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - received, err := suite.StorageDriver.GetContent(destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) -} - -// TestMoveInvalid provides various checks for invalid moves. -func (suite *DriverSuite) TestMoveInvalid(c *check.C) { - contents := randomContents(32) - - // Create a regular file. - err := suite.StorageDriver.PutContent("/notadir", contents) - c.Assert(err, check.IsNil) - defer suite.StorageDriver.Delete("/notadir") - - // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move("/notadir/foo", "/notadir/bar") - c.Assert(err, check.NotNil) // non-nil error -} - -// TestDelete checks that the delete operation removes data from the storage -// driver -func (suite *DriverSuite) TestDelete(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(firstPart(filename)) - - err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(filename) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestURLFor checks that the URLFor method functions properly, but only if it -// is implemented -func (suite *DriverSuite) TestURLFor(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(firstPart(filename)) - - err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - - url, err := suite.StorageDriver.URLFor(filename, nil) - if err == storagedriver.ErrUnsupportedMethod { - return - } - c.Assert(err, check.IsNil) - - response, err := http.Get(url) - c.Assert(err, check.IsNil) - defer response.Body.Close() - - read, err := ioutil.ReadAll(response.Body) - c.Assert(err, check.IsNil) - c.Assert(read, check.DeepEquals, contents) - - url, err = suite.StorageDriver.URLFor(filename, map[string]interface{}{"method": "HEAD"}) - if err == storagedriver.ErrUnsupportedMethod { - return - } - c.Assert(err, check.IsNil) - - response, err = http.Head(url) - c.Assert(response.StatusCode, check.Equals, 200) - c.Assert(response.ContentLength, check.Equals, int64(32)) -} - -// TestDeleteNonexistent checks that removing a nonexistent key fails. -func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { - filename := randomPath(32) - err := suite.StorageDriver.Delete(filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestDeleteFolder checks that deleting a folder removes all child elements. -func (suite *DriverSuite) TestDeleteFolder(c *check.C) { - dirname := randomPath(32) - filename1 := randomPath(32) - filename2 := randomPath(32) - filename3 := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(firstPart(dirname)) - - err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(path.Join(dirname, filename3), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(path.Join(dirname, filename1)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(dirname) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestStatCall runs verifies the implementation of the storagedriver's Stat call. -func (suite *DriverSuite) TestStatCall(c *check.C) { - content := randomContents(4096) - dirPath := randomPath(32) - fileName := randomFilename(32) - filePath := path.Join(dirPath, fileName) - - defer suite.StorageDriver.Delete(firstPart(dirPath)) - - // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(dirPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(fi, check.IsNil) - - fi, err = suite.StorageDriver.Stat(filePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(fi, check.IsNil) - - err = suite.StorageDriver.PutContent(filePath, content) - c.Assert(err, check.IsNil) - - // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, filePath) - c.Assert(fi.Size(), check.Equals, int64(len(content))) - c.Assert(fi.IsDir(), check.Equals, false) - createdTime := fi.ModTime() - - // Sleep and modify the file - time.Sleep(time.Second * 10) - content = randomContents(4096) - err = suite.StorageDriver.PutContent(filePath, content) - c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) - - // Check if the modification time is after the creation time. - // In case of cloud storage services, storage frontend nodes might have - // time drift between them, however that should be solved with sleeping - // before update. - modTime := fi.ModTime() - if !modTime.After(createdTime) { - c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) - } - - // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(dirPath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, dirPath) - c.Assert(fi.Size(), check.Equals, int64(0)) - c.Assert(fi.IsDir(), check.Equals, true) -} - -// TestPutContentMultipleTimes checks that if storage driver can overwrite the content -// in the subsequent puts. Validates that PutContent does not have to work -// with an offset like WriteStream does and overwrites the file entirely -// rather than writing the data to the [0,len(data)) of the file. -func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { - filename := randomPath(32) - contents := randomContents(4096) - - defer suite.StorageDriver.Delete(firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - - contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents) -} - -// TestConcurrentStreamReads checks that multiple clients can safely read from -// the same file simultaneously with various offsets. -func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { - var filesize int64 = 128 * 1024 * 1024 - - if testing.Short() { - filesize = 10 * 1024 * 1024 - c.Log("Reducing file size to 10MB for short mode") - } - - filename := randomPath(32) - contents := randomContents(filesize) - - defer suite.StorageDriver.Delete(firstPart(filename)) - - err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - - var wg sync.WaitGroup - - readContents := func() { - defer wg.Done() - offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents[offset:]) - } - - wg.Add(10) - for i := 0; i < 10; i++ { - go readContents() - } - wg.Wait() -} - -// TestConcurrentFileStreams checks that multiple *os.File objects can be passed -// in to WriteStream concurrently without hanging. -func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - // if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { - // c.Skip("Need to fix out-of-process concurrency") - // } - - numStreams := 32 - - if testing.Short() { - numStreams = 8 - c.Log("Reducing number of streams to 8 for short mode") - } - - var wg sync.WaitGroup - - testStream := func(size int64) { - defer wg.Done() - suite.testFileStreams(c, size) - } - - wg.Add(numStreams) - for i := numStreams; i > 0; i-- { - go testStream(int64(numStreams) * 1024 * 1024) - } - - wg.Wait() -} - -// TestEventualConsistency checks that if stat says that a file is a certain size, then -// you can freely read from the file (this is the only guarantee that the driver needs to provide) -func (suite *DriverSuite) TestEventualConsistency(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) - - var offset int64 - var misswrites int - var chunkSize int64 = 32 - - for i := 0; i < 1024; i++ { - contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(filename, offset, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - - fi, err := suite.StorageDriver.Stat(filename) - c.Assert(err, check.IsNil) - - // We are most concerned with being able to read data as soon as Stat declares - // it is uploaded. This is the strongest guarantee that some drivers (that guarantee - // at best eventual consistency) absolutely need to provide. - if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) - - reader.Close() - offset += read - } else { - misswrites++ - } - } - - if misswrites > 0 { - c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") - } - - c.Assert(misswrites, check.Not(check.Equals), 1024) -} - -// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files -func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 0) -} - -// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files -func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024) -} - -// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files -func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024) -} - -// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files -func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(filename, randomContents(size)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - } -} - -// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files -func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 0) -} - -// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files -func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024) -} - -// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files -func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024) -} - -// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files -func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(randomContents(size))) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, size) - - rc, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, check.IsNil) - rc.Close() - } -} - -// BenchmarkList5Files benchmarks List for 5 small files -func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { - suite.benchmarkListFiles(c, 5) -} - -// BenchmarkList50Files benchmarks List for 50 small files -func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { - suite.benchmarkListFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) - }() - - for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - - c.ResetTimer() - for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(parentDir) - c.Assert(err, check.IsNil) - c.Assert(int64(len(files)), check.Equals, numFiles) - } -} - -// BenchmarkDelete5Files benchmarks Delete for 5 small files -func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 5) -} - -// BenchmarkDelete50Files benchmarks Delete for 50 small files -func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { - for i := 0; i < c.N; i++ { - parentDir := randomPath(8) - defer suite.StorageDriver.Delete(firstPart(parentDir)) - - c.StopTimer() - for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - c.StartTimer() - - // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(firstPart(parentDir)) - c.Assert(err, check.IsNil) - } -} - -func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { - tf, err := ioutil.TempFile("", "tf") - c.Assert(err, check.IsNil) - defer os.Remove(tf.Name()) - defer tf.Close() - - filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) - - contents := randomContents(size) - - _, err = tf.Write(contents) - c.Assert(err, check.IsNil) - - tf.Sync() - tf.Seek(0, os.SEEK_SET) - - nn, err := suite.StorageDriver.WriteStream(filename, 0, tf) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, size) - - reader, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(firstPart(filename)) - - err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(firstPart(filename)) - - nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contents))) - - reader, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") -var separatorChars = []byte("._-") - -func randomPath(length int64) string { - path := "/" - for int64(len(path)) < length { - chunkLength := rand.Int63n(length-int64(len(path))) + 1 - chunk := randomFilename(chunkLength) - path += chunk - remaining := length - int64(len(path)) - if remaining == 1 { - path += randomFilename(1) - } else if remaining > 1 { - path += "/" - } - } - return path -} - -func randomFilename(length int64) string { - b := make([]byte, length) - wasSeparator := true - for i := range b { - if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { - b[i] = separatorChars[rand.Intn(len(separatorChars))] - wasSeparator = true - } else { - b[i] = filenameChars[rand.Intn(len(filenameChars))] - wasSeparator = false - } - } - return string(b) -} - -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) - } - return b -} - -type randReader struct { - r int64 - m sync.Mutex -} - -func (rr *randReader) Read(p []byte) (n int, err error) { - rr.m.Lock() - defer rr.m.Unlock() - for i := 0; i < len(p) && rr.r > 0; i++ { - p[i] = byte(rand.Intn(255)) - n++ - rr.r-- - } - if rr.r == 0 { - err = io.EOF - } - return -} - -func newRandReader(n int64) *randReader { - return &randReader{r: n} -} - -func firstPart(filePath string) string { - if filePath == "" { - return "/" - } - for { - if filePath[len(filePath)-1] == '/' { - filePath = filePath[:len(filePath)-1] - } - - dir, file := path.Split(filePath) - if dir == "" && file == "" { - return "/" - } - if dir == "/" || dir == "" { - return "/" + file - } - if file == "" { - return dir - } - filePath = dir - } -} - -blob -mark :184 -data 5145 -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "time" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): Set an optimal buffer size here. We'll have to -// understand the latency characteristics of the underlying network to -// set this correctly, so we may want to leave it to the driver. For -// out of process drivers, we'll have to optimize this buffer size for -// local communication. -const fileReaderBufferSize = 4 << 20 - -// remoteFileReader provides a read seeker interface to files stored in -// storagedriver. Used to implement part of layer interface and will be used -// to implement read side of LayerUpload. -type fileReader struct { - driver storagedriver.StorageDriver - - // identifying fields - path string - size int64 // size is the total size, must be set. - modtime time.Time // TODO(stevvooe): This is not needed anymore. - - // mutable fields - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 // offset is the current read offset - err error // terminal error, if set, reader is closed -} - -// newFileReader initializes a file reader for the remote file. The read takes -// on the offset and size at the time the reader is created. If the underlying -// file changes, one must create a new fileReader. -func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) { - rd := &fileReader{ - driver: driver, - path: path, - } - - // Grab the size of the layer file, ensuring existence. - if fi, err := driver.Stat(path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is not - // actually present for the reader. If the caller needs to know - // whether or not the file exists, they should issue a stat call - // on the path. There is still no guarantee, since the file may be - // gone by the time the reader is created. The only correct - // behavior is to return a reader that immediately returns EOF. - default: - // Any other error we want propagated up the stack. - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot read a directory") - } - - // Fill in file information - rd.size = fi.Size() - rd.modtime = fi.ModTime() - } - - return rd, nil -} - -func (fr *fileReader) Read(p []byte) (n int, err error) { - if fr.err != nil { - return 0, fr.err - } - - rd, err := fr.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - fr.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && fr.offset >= fr.size { - err = io.EOF - } - - return n, err -} - -func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { - if fr.err != nil { - return 0, fr.err - } - - var err error - newOffset := fr.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fr.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if fr.offset != newOffset { - fr.reset() - } - - // No problems, set the offset. - fr.offset = newOffset - } - - return fr.offset, err -} - -func (fr *fileReader) Close() error { - return fr.closeWithErr(fmt.Errorf("fileReader: closed")) -} - -// reader prepares the current reader at the lrs offset, ensuring its buffered -// and ready to go. -func (fr *fileReader) reader() (io.Reader, error) { - if fr.err != nil { - return nil, fr.err - } - - if fr.rc != nil { - return fr.brd, nil - } - - // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.path, fr.offset) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): If the path is not found, we simply return a - // reader that returns io.EOF. However, we do not set fr.rc, - // allowing future attempts at getting a reader to possibly - // succeed if the file turns up later. - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - default: - return nil, err - } - } - - fr.rc = rc - - if fr.brd == nil { - // TODO(stevvooe): Set an optimal buffer size here. We'll have to - // understand the latency characteristics of the underlying network to - // set this correctly, so we may want to leave it to the driver. For - // out of process drivers, we'll have to optimize this buffer size for - // local communication. - fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) - } else { - fr.brd.Reset(fr.rc) - } - - return fr.brd, nil -} - -// resetReader resets the reader, forcing the read method to open up a new -// connection and rebuild the buffered reader. This should be called when the -// offset and the reader will become out of sync, such as during a seek -// operation. -func (fr *fileReader) reset() { - if fr.err != nil { - return - } - if fr.rc != nil { - fr.rc.Close() - fr.rc = nil - } -} - -func (fr *fileReader) closeWithErr(err error) error { - if fr.err != nil { - return fr.err - } - - fr.err = err - - // close and release reader chain - if fr.rc != nil { - fr.rc.Close() - } - - fr.rc = nil - fr.brd = nil - - return fr.err -} - -blob -mark :185 -data 5169 -package storage - -import ( - "bytes" - "crypto/rand" - "io" - mrand "math/rand" - "os" - "testing" - - "github.com/docker/distribution/digest" - - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func TestSimpleRead(t *testing.T) { - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read did't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - - if err := driver.PutContent(path, content); err != nil { - t.Fatalf("error putting patterned content: %v", err) - } - - fr, err := newFileReader(driver, path) - if err != nil { - t.Fatalf("error allocating file reader: %v", err) - } - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify read data") - } -} - -func TestFileReaderSeek(t *testing.T) { - driver := inmemory.New() - pattern := "01234567890ab" // prime length block - repititions := 1024 - path := "/patterned" - content := bytes.Repeat([]byte(pattern), repititions) - - if err := driver.PutContent(path, content); err != nil { - t.Fatalf("error putting patterned content: %v", err) - } - - fr, err := newFileReader(driver, path) - - if err != nil { - t.Fatalf("unexpected error creating file reader: %v", err) - } - - // Seek all over the place, in blocks of pattern size and make sure we get - // the right data. - for _, repitition := range mrand.Perm(repititions - 1) { - targetOffset := int64(len(pattern) * repitition) - // Seek to a multiple of pattern size and read pattern size bytes - offset, err := fr.Seek(targetOffset, os.SEEK_SET) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if offset != targetOffset { - t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) - } - - p := make([]byte, len(pattern)) - - n, err := fr.Read(p) - if err != nil { - t.Fatalf("error reading pattern: %v", err) - } - - if n != len(pattern) { - t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) - } - - if string(p) != pattern { - t.Fatalf("incorrect read content: %q != %q", p, pattern) - } - - // Check offset - current, err := fr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("error checking current offset: %v", err) - } - - if current != targetOffset+int64(len(pattern)) { - t.Fatalf("unexpected offset after read: %v", err) - } - } - - start, err := fr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking to start: %v", err) - } - - if start != 0 { - t.Fatalf("expected to seek to start: %v != 0", start) - } - - end, err := fr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("error checking current offset: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("expected to seek to end: %v != %v", end, len(content)) - } - - // 4. Seek before start, ensure error. - - // seek before start - before, err := fr.Seek(-1, os.SEEK_SET) - if err == nil { - t.Fatalf("error expected, returned offset=%v", before) - } - - // 5. Seek after end, - after, err := fr.Seek(1, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error expected, returned offset=%v", after) - } - - p := make([]byte, 16) - n, err := fr.Read(p) - - if n != 0 { - t.Fatalf("bytes reads %d != %d", n, 0) - } - - if err != io.EOF { - t.Fatalf("expected io.EOF, got %v", err) - } -} - -// TestFileReaderNonExistentFile ensures the reader behaves as expected with a -// missing or zero-length remote file. While the file may not exist, the -// reader should not error out on creation and should return 0-bytes from the -// read method, with an io.EOF error. -func TestFileReaderNonExistentFile(t *testing.T) { - driver := inmemory.New() - fr, err := newFileReader(driver, "/doesnotexist") - if err != nil { - t.Fatalf("unexpected error initializing reader: %v", err) - } - - var buf [1024]byte - - n, err := fr.Read(buf[:]) - if n != 0 { - t.Fatalf("non-zero byte read reported: %d != 0", n) - } - - if err != io.EOF { - t.Fatalf("read on missing file should return io.EOF, got %v", err) - } -} - -// TestLayerReadErrors covers the various error return type for different -// conditions that can arise when reading a layer. -func TestFileReaderErrors(t *testing.T) { - // TODO(stevvooe): We need to cover error return types, driven by the - // errors returned via the HTTP API. For now, here is a incomplete list: - // - // 1. Layer Not Found: returned when layer is not found or access is - // denied. - // 2. Layer Unavailable: returned when link references are unresolved, - // but layer is known to the registry. - // 3. Layer Invalid: This may more split into more errors, but should be - // returned when name or tarsum does not reference a valid error. We - // may also need something to communication layer verification errors - // for the inline tarsum check. - // 4. Timeout: timeouts to backend. Need to better understand these - // failure cases and how the storage driver propagates these errors - // up the stack. -} - -blob -mark :186 -data 4917 -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -const ( - fileWriterBufferSize = 5 << 20 -) - -// fileWriter implements a remote file writer backed by a storage driver. -type fileWriter struct { - driver storagedriver.StorageDriver - - // identifying fields - path string - - // mutable fields - size int64 // size of the file, aka the current end - offset int64 // offset is the current write offset - err error // terminal error, if set, reader is closed -} - -type bufferedFileWriter struct { - fileWriter - bw *bufio.Writer -} - -// fileWriterInterface makes the desired io compliant interface that the -// filewriter should implement. -type fileWriterInterface interface { - io.WriteSeeker - io.WriterAt - io.ReaderFrom - io.Closer -} - -var _ fileWriterInterface = &fileWriter{} - -// newFileWriter returns a prepared fileWriter for the driver and path. This -// could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { - fw := fileWriter{ - driver: driver, - path: path, - } - - if fi, err := driver.Stat(path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot write to a directory") - } - - fw.size = fi.Size() - } - - buffered := bufferedFileWriter{ - fileWriter: fw, - } - buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) - - return &buffered, nil -} - -// wraps the fileWriter.Write method to buffer small writes -func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { - return bfw.bw.Write(p) -} - -// wraps fileWriter.Close to ensure the buffer is flushed -// before we close the writer. -func (bfw *bufferedFileWriter) Close() (err error) { - if err = bfw.Flush(); err != nil { - return err - } - err = bfw.fileWriter.Close() - return err -} - -// wraps fileWriter.Seek to ensure offset is handled -// correctly in respect to pending data in the buffer -func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { - if err := bfw.Flush(); err != nil { - return 0, err - } - return bfw.fileWriter.Seek(offset, whence) -} - -// wraps bufio.Writer.Flush to allow intermediate flushes -// of the bufferedFileWriter -func (bfw *bufferedFileWriter) Flush() error { - return bfw.bw.Flush() -} - -// Write writes the buffer p at the current write offset. -func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.readFromAt(bytes.NewReader(p), -1) - return int(nn), err -} - -// WriteAt writes p at the specified offset. The underlying offset does not -// change. -func (fw *fileWriter) WriteAt(p []byte, offset int64) (n int, err error) { - nn, err := fw.readFromAt(bytes.NewReader(p), offset) - return int(nn), err -} - -// ReadFrom reads reader r until io.EOF writing the contents at the current -// offset. -func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - return fw.readFromAt(r, -1) -} - -// Seek moves the write position do the requested offest based on the whence -// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. -func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { - if fw.err != nil { - return 0, fw.err - } - - var err error - newOffset := fw.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fw.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - // No problems, set the offset. - fw.offset = newOffset - } - - return fw.offset, err -} - -// Close closes the fileWriter for writing. -// Calling it once is valid and correct and it will -// return a nil error. Calling it subsequent times will -// detect that fw.err has been set and will return the error. -func (fw *fileWriter) Close() error { - if fw.err != nil { - return fw.err - } - - fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) - - return nil -} - -// readFromAt writes to fw from r at the specified offset. If offset is less -// than zero, the value of fw.offset is used and updated after the operation. -func (fw *fileWriter) readFromAt(r io.Reader, offset int64) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - var updateOffset bool - if offset < 0 { - offset = fw.offset - updateOffset = true - } - - nn, err := fw.driver.WriteStream(fw.path, offset, r) - - if updateOffset { - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - } - - return nn, err -} - -blob -mark :187 -data 6258 -package storage - -import ( - "bytes" - "crypto/rand" - "io" - "os" - "testing" - - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -// TestSimpleWrite takes the fileWriter through common write operations -// ensuring data integrity. -func TestSimpleWrite(t *testing.T) { - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read did't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - - fw, err := newFileWriter(driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - n, err = fw.Write(content) - if err != nil { - t.Fatalf("unexpected error writing content: %v", err) - } - fw.Flush() - - if n != len(content) { - t.Fatalf("unexpected write length: %d != %d", n, len(content)) - } - - fr, err := newFileReader(driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check the seek position is equal to the content length - end, err := fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) - } - - // Double the content, but use the WriteAt method - doubled := append(content, content...) - doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) - if err != nil { - t.Fatalf("unexpected error digesting doubled content: %v", err) - } - - n, err = fw.WriteAt(content, end) - if err != nil { - t.Fatalf("unexpected error writing content at %d: %v", end, err) - } - - if n != len(content) { - t.Fatalf("writeat was short: %d != %d", n, len(content)) - } - - fr, err = newFileReader(driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check that WriteAt didn't update the offset. - end, err = fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) - } - - // Now, we copy from one path to another, running the data through the - // fileReader to fileWriter, rather than the driver.Move command to ensure - // everything is working correctly. - fr, err = newFileReader(driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - fw, err = newFileWriter(driver, "/copied") - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - nn, err := io.Copy(fw, fr) - if err != nil { - t.Fatalf("unexpected error copying data: %v", err) - } - - if nn != int64(len(doubled)) { - t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) - } - - fr, err = newFileReader(driver, "/copied") - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } -} - -func TestBufferedFileWriter(t *testing.T) { - writer, err := newFileWriter(inmemory.New(), "/random") - - if err != nil { - t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - // write one byte and ensure the offset hasn't been incremented. - // offset will only get incremented when the buffer gets flushed - short := []byte{byte(1)} - - writer.Write(short) - - if writer.offset > 0 { - t.Fatalf("WriteStream called prematurely") - } - - // write enough data to cause the buffer to flush and confirm - // the offset has been incremented - long := make([]byte, fileWriterBufferSize) - _, err = rand.Read(long) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - for i := range long { - long[i] = byte(i) - } - writer.Write(long) - writer.Close() - if writer.offset != (fileWriterBufferSize + 1) { - t.Fatalf("WriteStream not called when buffer capacity reached") - } -} - -func BenchmarkFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - for i := 0; i < b.N; i++ { - // Start basic fileWriter initialization - fw := fileWriter{ - driver: inmemory.New(), - path: "/random", - } - - if fi, err := fw.driver.Stat(fw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) - } - } else { - if fi.IsDir() { - b.Fatalf("Cannot write to a directory") - } - - fw.size = fi.Size() - } - - randomBytes := make([]byte, 1<<20) - _, err := rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - // End basic file writer initialization - - b.StartTimer() - for j := 0; j < 100; j++ { - fw.Write(randomBytes) - } - b.StopTimer() - } -} - -func BenchmarkBufferedFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - for i := 0; i < b.N; i++ { - bfw, err := newFileWriter(inmemory.New(), "/random") - - if err != nil { - b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - randomBytes := make([]byte, 1<<20) - _, err = rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - - b.StartTimer() - for j := 0; j < 100; j++ { - bfw.Write(randomBytes) - } - b.StopTimer() - } -} - -blob -mark :188 -data 10066 -package storage - -import ( - "bytes" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "golang.org/x/net/context" -) - -// TestSimpleLayerUpload covers the layer upload process, exercising common -// error paths that might be seen during an upload. -func TestSimpleLayerUpload(t *testing.T) { - randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - - dgst := digest.Digest(tarSumStr) - - if err != nil { - t.Fatalf("error allocating upload store: %v", err) - } - - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ls := repository.Layers() - - h := sha256.New() - rd := io.TeeReader(randomDataReader, h) - - layerUpload, err := ls.Upload() - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Cancel the upload then restart it - if err := layerUpload.Cancel(); err != nil { - t.Fatalf("unexpected error during upload cancellation: %v", err) - } - - // Do a resume, get unknown upload - layerUpload, err = ls.Resume(layerUpload.UUID()) - if err != distribution.ErrLayerUploadUnknown { - t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) - } - - // Restart! - layerUpload, err = ls.Upload() - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Get the size of our random tarfile - randomDataSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size of random data: %v", err) - } - - nn, err := io.Copy(layerUpload, rd) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("layer data write incomplete") - } - - offset, err := layerUpload.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("unexpected error seeking layer upload: %v", err) - } - - if offset != nn { - t.Fatalf("layerUpload not updated with correct offset: %v != %v", offset, nn) - } - layerUpload.Close() - - // Do a resume, for good fun - layerUpload, err = ls.Resume(layerUpload.UUID()) - if err != nil { - t.Fatalf("unexpected error resuming upload: %v", err) - } - - sha256Digest := digest.NewDigest("sha256", h) - layer, err := layerUpload.Finish(dgst) - - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - - // After finishing an upload, it should no longer exist. - if _, err := ls.Resume(layerUpload.UUID()); err != distribution.ErrLayerUploadUnknown { - t.Fatalf("expected layer upload to be unknown, got %v", err) - } - - // Test for existence. - exists, err := ls.Exists(layer.Digest()) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v", err) - } - - if !exists { - t.Fatalf("layer should now exist") - } - - h.Reset() - nn, err = io.Copy(h, layer) - if err != nil { - t.Fatalf("error reading layer: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("incorrect read length") - } - - if digest.NewDigest("sha256", h) != sha256Digest { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) - } -} - -// TestSimpleLayerRead just creates a simple layer file and ensures that basic -// open, read, seek, read works. More specific edge cases should be covered in -// other tests. -func TestSimpleLayerRead(t *testing.T) { - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ls := repository.Layers() - - randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random data: %v", err) - } - - dgst := digest.Digest(tarSumStr) - - // Test for existence. - exists, err := ls.Exists(dgst) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v", err) - } - - if exists { - t.Fatalf("layer should not exist") - } - - // Try to get the layer and make sure we get a not found error - layer, err := ls.Fetch(dgst) - if err == nil { - t.Fatalf("error expected fetching unknown layer") - } - - switch err.(type) { - case distribution.ErrUnknownLayer: - err = nil - default: - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } - - randomLayerDigest, err := writeTestLayer(driver, defaultPathMapper, imageName, dgst, randomLayerReader) - if err != nil { - t.Fatalf("unexpected error writing test layer: %v", err) - } - - randomLayerSize, err := seekerSize(randomLayerReader) - if err != nil { - t.Fatalf("error getting seeker size for random layer: %v", err) - } - - layer, err = ls.Fetch(dgst) - if err != nil { - t.Fatal(err) - } - defer layer.Close() - - // Now check the sha digest and ensure its the same - h := sha256.New() - nn, err := io.Copy(h, layer) - if err != nil && err != io.EOF { - t.Fatalf("unexpected error copying to hash: %v", err) - } - - if nn != randomLayerSize { - t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize) - } - - sha256Digest := digest.NewDigest("sha256", h) - if sha256Digest != randomLayerDigest { - t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest) - } - - // Now seek back the layer, read the whole thing and check against randomLayerData - offset, err := layer.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking layer: %v", err) - } - - if offset != 0 { - t.Fatalf("seek failed: expected 0 offset, got %d", offset) - } - - p, err := ioutil.ReadAll(layer) - if err != nil { - t.Fatalf("error reading all of layer: %v", err) - } - - if len(p) != int(randomLayerSize) { - t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize) - } - - // Reset the randomLayerReader and read back the buffer - _, err = randomLayerReader.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error resetting layer reader: %v", err) - } - - randomLayerData, err := ioutil.ReadAll(randomLayerReader) - if err != nil { - t.Fatalf("random layer read failed: %v", err) - } - - if !bytes.Equal(p, randomLayerData) { - t.Fatalf("layer data not equal") - } -} - -// TestLayerUploadZeroLength uploads zero-length -func TestLayerUploadZeroLength(t *testing.T) { - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ls := repository.Layers() - - upload, err := ls.Upload() - if err != nil { - t.Fatalf("unexpected error starting upload: %v", err) - } - - io.Copy(upload, bytes.NewReader([]byte{})) - - dgst, err := digest.FromReader(bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("error getting zero digest: %v", err) - } - - if dgst != digest.DigestSha256EmptyTar { - // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) - } - - layer, err := upload.Finish(dgst) - if err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - - if layer.Digest() != dgst { - t.Fatalf("unexpected digest: %v != %v", layer.Digest(), dgst) - } -} - -// writeRandomLayer creates a random layer under name and tarSum using driver -// and pathMapper. An io.ReadSeeker with the data is returned, along with the -// sha256 hex digest. -func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) { - reader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - return nil, "", "", err - } - - tarSum = digest.Digest(tarSumStr) - - // Now, actually create the layer. - randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader)) - - if _, err := reader.Seek(0, os.SEEK_SET); err != nil { - return nil, "", "", err - } - - return reader, tarSum, randomLayerDigest, err -} - -// seekerSize seeks to the end of seeker, checks the size and returns it to -// the original state, returning the size. The state of the seeker should be -// treated as unknown if an error is returned. -func seekerSize(seeker io.ReadSeeker) (int64, error) { - current, err := seeker.Seek(0, os.SEEK_CUR) - if err != nil { - return 0, err - } - - end, err := seeker.Seek(0, os.SEEK_END) - if err != nil { - return 0, err - } - - resumed, err := seeker.Seek(current, os.SEEK_SET) - if err != nil { - return 0, err - } - - if resumed != current { - return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") - } - - return end, nil -} - -// createTestLayer creates a simple test layer in the provided driver under -// tarsum dgst, returning the sha256 digest location. This is implemented -// peicemeal and should probably be replaced by the uploader when it's ready. -func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { - h := sha256.New() - rd := io.TeeReader(content, h) - - p, err := ioutil.ReadAll(rd) - - if err != nil { - return "", nil - } - - blobDigestSHA := digest.NewDigest("sha256", h) - - blobPath, err := pathMapper.path(blobDataPathSpec{ - digest: dgst, - }) - - if err := driver.PutContent(blobPath, p); err != nil { - return "", err - } - - if err != nil { - return "", err - } - - layerLinkPath, err := pathMapper.path(layerLinkPathSpec{ - name: name, - digest: dgst, - }) - - if err != nil { - return "", err - } - - if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil { - return "", nil - } - - return blobDigestSHA, err -} - -blob -mark :189 -data 6830 -package storage - -import ( - "expvar" - "sync/atomic" - "time" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// cachedLayerService implements the layer service with path-aware caching, -// using a LayerInfoCache interface. -type cachedLayerService struct { - distribution.LayerService // upstream layer service - repository distribution.Repository - ctx context.Context - driver driver.StorageDriver - *blobStore // global blob store - cache cache.LayerInfoCache -} - -// Exists checks for existence of the digest in the cache, immediately -// returning if it exists for the repository. If not, the upstream is checked. -// When a positive result is found, it is written into the cache. -func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst) - now := time.Now() - defer func() { - // TODO(stevvooe): Replace this with a decent context-based metrics solution - ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)). - Infof("(*cachedLayerService).Exists(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1) - return true, nil - } - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1) - exists, err := lc.LayerService.Exists(dgst) - if err != nil { - return exists, err - } - - if exists { - // we can only cache this if the existence is positive. - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return exists, err -} - -// Fetch checks for the availability of the layer in the repository via the -// cache. If present, the metadata is resolved and the layer is returned. If -// any operation fails, the layer is read directly from the upstream. The -// results are cached, if possible. -func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst) - now := time.Now() - defer func() { - ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)). - Infof("(*layerInfoCache).Fetch(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - // fast path: get the layer info and return - meta, err := lc.cache.Meta(lc.ctx, dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1) - return newLayerReader(lc.driver, dgst, meta.Path, meta.Length) - } - - // NOTE(stevvooe): Unfortunately, the cache here only makes checks for - // existing layers faster. We'd have to provide more careful - // synchronization with the backend to make the missing case as fast. - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1) - layer, err := lc.LayerService.Fetch(dgst) - if err != nil { - return nil, err - } - - // add the layer to the repository - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err) - } - - // lookup layer path and add it to the cache, if it succeds. Note that we - // still return the layer even if we have trouble caching it. - if path, err := lc.resolveLayerPath(layer); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err) - } else { - // add the layer to the cache once we've resolved the path. - if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return layer, err -} - -// extractLayerInfo pulls the layerInfo from the layer, attempting to get the -// path information from either the concrete object or by resolving the -// primary blob store path. -func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) { - // try and resolve the type and driver, so we don't have to traverse links - switch v := layer.(type) { - case *layerReader: - // only set path if we have same driver instance. - if v.driver == lc.driver { - return v.path, nil - } - } - - ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest()) - // we have to do an expensive stat to resolve the layer location but no - // need to check the link, since we already have layer instance for this - // repository. - bp, err := lc.blobStore.path(layer.Digest()) - if err != nil { - return "", err - } - - return bp, nil -} - -// layerInfoCacheMetrics keeps track of cache metrics for layer info cache -// requests. Note this is kept globally and made available via expvar. For -// more detailed metrics, its recommend to instrument a particular cache -// implementation. -var layerInfoCacheMetrics struct { - // Exists tracks calls to the Exists caches. - Exists struct { - Requests uint64 - Hits uint64 - Misses uint64 - } - - // Fetch tracks calls to the fetch caches. - Fetch struct { - Requests uint64 - Hits uint64 - Misses uint64 - } -} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return layerInfoCacheMetrics - })) -} - -blob -mark :190 -data 1992 -package storage - -import ( - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// layerReader implements Layer and provides facilities for reading and -// seeking. -type layerReader struct { - fileReader - - digest digest.Digest -} - -// newLayerReader returns a new layerReader with the digest, path and length, -// eliding round trips to the storage backend. -func newLayerReader(driver driver.StorageDriver, dgst digest.Digest, path string, length int64) (*layerReader, error) { - fr := &fileReader{ - driver: driver, - path: path, - size: length, - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -var _ distribution.Layer = &layerReader{} - -func (lr *layerReader) Digest() digest.Digest { - return lr.digest -} - -func (lr *layerReader) Length() int64 { - return lr.size -} - -func (lr *layerReader) CreatedAt() time.Time { - return lr.modtime -} - -// Close the layer. Should be called when the resource is no longer needed. -func (lr *layerReader) Close() error { - return lr.closeWithErr(distribution.ErrLayerClosed) -} - -func (lr *layerReader) Handler(r *http.Request) (h http.Handler, err error) { - var handlerFunc http.HandlerFunc - - redirectURL, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{"method": r.Method}) - - switch err { - case nil: - handlerFunc = func(w http.ResponseWriter, r *http.Request) { - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - } - case driver.ErrUnsupportedMethod: - handlerFunc = func(w http.ResponseWriter, r *http.Request) { - // Fallback to serving the content directly. - http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) - } - default: - // Some unexpected error. - return nil, err - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Docker-Content-Digest", lr.digest.String()) - handlerFunc.ServeHTTP(w, r) - }), nil -} - -blob -mark :191 -data 4302 -package storage - -import ( - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -type layerStore struct { - repository *repository -} - -func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") - - // Because this implementation just follows blob links, an existence check - // is pretty cheap by starting and closing a fetch. - _, err := ls.Fetch(digest) - - if err != nil { - switch err.(type) { - case distribution.ErrUnknownLayer: - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch") - bp, err := ls.path(dgst) - if err != nil { - return nil, err - } - - fr, err := newFileReader(ls.repository.driver, bp) - if err != nil { - return nil, err - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -// Upload begins a layer upload, returning a handle. If the layer upload -// is already in progress or the layer has already been uploaded, this -// will return an error. -func (ls *layerStore) Upload() (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload") - - // NOTE(stevvooe): Consider the issues with allowing concurrent upload of - // the same two layers. Should it be disallowed? For now, we allow both - // parties to proceed and the the first one uploads the layer. - - uuid := uuid.New() - startedAt := time.Now().UTC() - - path, err := ls.repository.registry.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// Resume continues an in progress layer upload, returning the current -// state of the upload. -func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") - startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return nil, distribution.ErrLayerUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := ls.repository.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// newLayerUpload allocates a new upload controller with the given state. -func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) { - fw, err := newFileWriter(ls.repository.driver, path) - if err != nil { - return nil, err - } - - lw := &layerWriter{ - layerStore: ls, - uuid: uuid, - startedAt: startedAt, - bufferedFileWriter: *fw, - } - - lw.setupResumableDigester() - - return lw, nil -} - -func (ls *layerStore) path(dgst digest.Digest) (string, error) { - // We must traverse this path through the link to enforce ownership. - layerLinkPath, err := ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) - if err != nil { - return "", err - } - - blobPath, err := ls.repository.blobStore.resolve(layerLinkPath) - - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return "", distribution.ErrUnknownLayer{ - FSLayer: manifest.FSLayer{BlobSum: dgst}, - } - default: - return "", err - } - } - - return blobPath, nil -} - -blob -mark :192 -data 12971 -package storage - -import ( - "fmt" - "io" - "os" - "path" - "strconv" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var _ distribution.LayerUpload = &layerWriter{} - -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. -type layerWriter struct { - layerStore *layerStore - - uuid string - startedAt time.Time - resumableDigester digest.ResumableDigester - - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - bufferedFileWriter -} - -var _ distribution.LayerUpload = &layerWriter{} - -// UUID returns the identifier for this upload. -func (lw *layerWriter) UUID() string { - return lw.uuid -} - -func (lw *layerWriter) StartedAt() time.Time { - return lw.startedAt -} - -// Finish marks the upload as completed, returning a valid handle to the -// uploaded layer. The final size and checksum are validated against the -// contents of the uploaded layer. The checksum should be provided in the -// format :. -func (lw *layerWriter) Finish(digest digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") - - if err := lw.bufferedFileWriter.Close(); err != nil { - return nil, err - } - - canonical, err := lw.validateLayer(digest) - if err != nil { - return nil, err - } - - if err := lw.moveLayer(canonical); err != nil { - // TODO(stevvooe): Cleanup? - return nil, err - } - - // Link the layer blob into the repository. - if err := lw.linkLayer(canonical, digest); err != nil { - return nil, err - } - - if err := lw.removeResources(); err != nil { - return nil, err - } - - return lw.layerStore.Fetch(canonical) -} - -// Cancel the layer upload process. -func (lw *layerWriter) Cancel() error { - ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Cancel") - if err := lw.removeResources(); err != nil { - return err - } - - lw.Close() - return nil -} - -func (lw *layerWriter) Write(p []byte) (int, error) { - if lw.resumableDigester == nil { - return lw.bufferedFileWriter.Write(p) - } - - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := lw.resumeHashAt(lw.offset); err != nil { - return 0, err - } - - return io.MultiWriter(&lw.bufferedFileWriter, lw.resumableDigester).Write(p) -} - -func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { - if lw.resumableDigester == nil { - return lw.bufferedFileWriter.ReadFrom(r) - } - - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := lw.resumeHashAt(lw.offset); err != nil { - return 0, err - } - - return lw.bufferedFileWriter.ReadFrom(io.TeeReader(r, lw.resumableDigester)) -} - -func (lw *layerWriter) Close() error { - if lw.err != nil { - return lw.err - } - - if lw.resumableDigester != nil { - if err := lw.storeHashState(); err != nil { - return err - } - } - - return lw.bufferedFileWriter.Close() -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - alg: lw.resumableDigester.Digest().Algorithm(), - list: true, - }) - if err != nil { - return nil, err - } - - paths, err := lw.driver.List(uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -// resumeHashAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (lw *layerWriter) resumeHashAt(offset int64) error { - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - - if offset == int64(lw.resumableDigester.Len()) { - // State of digester is already at the requseted offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := lw.getStoredHashStates() - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset less than or equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := lw.driver.Delete(hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - lw.resumableDigester.Reset() - } else { - storedState, err := lw.driver.GetContent(hashStateMatch.path) - if err != nil { - return err - } - - if err = lw.resumableDigester.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(lw.resumableDigester.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired - // offset. - fr, err := newFileReader(lw.driver, lw.path) - if err != nil { - return err - } - - if _, err = fr.Seek(int64(lw.resumableDigester.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", lw.resumableDigester.Len(), err) - } - - if _, err := io.CopyN(lw.resumableDigester, fr, gapLen); err != nil { - return err - } - } - - return nil -} - -func (lw *layerWriter) storeHashState() error { - uploadHashStatePath, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - alg: lw.resumableDigester.Digest().Algorithm(), - offset: int64(lw.resumableDigester.Len()), - }) - if err != nil { - return err - } - - hashState, err := lw.resumableDigester.State() - if err != nil { - return err - } - - return lw.driver.PutContent(uploadHashStatePath, hashState) -} - -// validateLayer checks the layer data against the digest, returning an error -// if it does not match. The canonical digest is returned. -func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if lw.resumableDigester != nil { - // Restore the hasher state to the end of the upload. - if err := lw.resumeHashAt(lw.size); err != nil { - return "", err - } - - canonical = lw.resumableDigester.Digest() - - if canonical.Algorithm() == dgst.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = dgst == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } - - if fullHash { - digester := digest.NewCanonicalDigester() - - digestVerifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return "", err - } - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) - if err != nil { - return "", err - } - - tr := io.TeeReader(fr, digester) - - if _, err = io.Copy(digestVerifier, tr); err != nil { - return "", err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() - } - - if !verified { - return "", distribution.ErrLayerInvalidDigest{ - Digest: dgst, - Reason: fmt.Errorf("content does not match digest"), - } - } - - return canonical, nil -} - -// moveLayer moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (lw *layerWriter) moveLayer(dgst digest.Digest) error { - blobPath, err := lw.layerStore.repository.registry.pm.path(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return err - } - - // Check for existence - if _, err := lw.driver.Stat(blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // tars. - if _, err := lw.driver.Stat(lw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the zero tarsum. - if dgst == digest.DigestSha256EmptyTar { - return lw.driver.PutContent(blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.uuid", lw.UUID()). - WithField("digest", dgst).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - return lw.driver.Move(lw.path, blobPath) -} - -// linkLayer links a valid, written layer blob into the registry under the -// named repository for the upload controller. -func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical}, aliases...) - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - layerLinkPath, err := lw.layerStore.repository.registry.pm.path(layerLinkPathSpec{ - name: lw.layerStore.repository.Name(), - digest: dgst, - }) - - if err != nil { - return err - } - - if err := lw.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { - return err - } - } - - return nil -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (lw *layerWriter) removeResources() error { - dataPath, err := lw.layerStore.repository.registry.pm.path(uploadDataPathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - - if err := lw.driver.Delete(dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - logrus.Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} - -blob -mark :193 -data 98 -// +build noresumabledigest - -package storage - -func (lw *layerWriter) setupResumableDigester() { -} - -blob -mark :194 -data 210 -// +build !noresumabledigest - -package storage - -import "github.com/docker/distribution/digest" - -func (lw *layerWriter) setupResumableDigester() { - lw.resumableDigester = digest.NewCanonicalResumableDigester() -} - -blob -mark :195 -data 3891 -package storage - -import ( - "fmt" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -type manifestStore struct { - repository *repository - - revisionStore *revisionStore - tagStore *tagStore -} - -var _ distribution.ManifestService = &manifestStore{} - -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Exists") - return ms.revisionStore.exists(dgst) -} - -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(dgst) -} - -func (ms *manifestStore) Put(ctx context.Context, manifest *manifest.SignedManifest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Put") - - // TODO(stevvooe): Add check here to see if the revision is already - // present in the repository. If it is, we should merge the signatures, do - // a shallow verify (or a full one, doesn't matter) and return an error - // indicating what happened. - - // Verify the manifest. - if err := ms.verifyManifest(manifest); err != nil { - return err - } - - // Store the revision of the manifest - revision, err := ms.revisionStore.put(manifest) - if err != nil { - return err - } - - // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision) -} - -// Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete - unsupported") - return fmt.Errorf("deletion of manifests not supported") -} - -func (ms *manifestStore) Tags(ctx context.Context) ([]string, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") - return ms.tagStore.tags() -} - -func (ms *manifestStore) ExistsByTag(ctx context.Context, tag string) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).ExistsByTag") - return ms.tagStore.exists(tag) -} - -func (ms *manifestStore) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).GetByTag") - dgst, err := ms.tagStore.resolve(tag) - if err != nil { - return nil, err - } - - return ms.revisionStore.get(dgst) -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { - var errs distribution.ErrManifestVerification - if mnfst.Name != ms.repository.Name() { - // TODO(stevvooe): This needs to be an exported error - errs = append(errs, fmt.Errorf("repository name does not match manifest name")) - } - - if _, err := manifest.Verify(mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - for _, fsLayer := range mnfst.FSLayers { - exists, err := ms.repository.Layers().Exists(fsLayer.BlobSum) - if err != nil { - errs = append(errs, err) - } - - if !exists { - errs = append(errs, distribution.ErrUnknownLayer{FSLayer: fsLayer}) - } - } - - if len(errs) != 0 { - // TODO(stevvooe): These need to be recoverable by a caller. - return errs - } - - return nil -} - -blob -mark :196 -data 7590 -package storage - -import ( - "bytes" - "io" - "reflect" - "testing" - - "github.com/docker/distribution/registry/storage/cache" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -type manifestStoreTestEnv struct { - ctx context.Context - driver driver.StorageDriver - registry distribution.Namespace - repository distribution.Repository - name string - tag string -} - -func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { - ctx := context.Background() - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - - repo, err := registry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - return &manifestStoreTestEnv{ - ctx: ctx, - driver: driver, - registry: registry, - repository: repo, - name: name, - tag: tag, - } -} - -func TestManifestStorage(t *testing.T) { - env := newManifestStoreTestEnv(t, "foo/bar", "thetag") - ms := env.repository.Manifests() - - exists, err := ms.ExistsByTag(env.ctx, env.tag) - if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) - } - - if exists { - t.Fatalf("manifest should not exist") - } - - if _, err := ms.GetByTag(env.ctx, env.tag); true { - switch err.(type) { - case distribution.ErrManifestUnknown: - break - default: - t.Fatalf("expected manifest unknown error: %#v", err) - } - } - - m := manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: env.name, - Tag: env.tag, - } - - // Build up some test layers and add them to the manifest, saving the - // readseekers for upload later. - testLayers := map[digest.Digest]io.ReadSeeker{} - for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("unexpected error generating test layer file") - } - dgst := digest.Digest(ds) - - testLayers[digest.Digest(dgst)] = rs - m.FSLayers = append(m.FSLayers, manifest.FSLayer{ - BlobSum: dgst, - }) - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm, err := manifest.Sign(&m, pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - err = ms.Put(env.ctx, sm) - if err == nil { - t.Fatalf("expected errors putting manifest") - } - - // TODO(stevvooe): We expect errors describing all of the missing layers. - - // Now, upload the layers that were missing! - for dgst, rs := range testLayers { - upload, err := env.repository.Layers().Upload() - if err != nil { - t.Fatalf("unexpected error creating test upload: %v", err) - } - - if _, err := io.Copy(upload, rs); err != nil { - t.Fatalf("unexpected error copying to upload: %v", err) - } - - if _, err := upload.Finish(dgst); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - } - - if err = ms.Put(env.ctx, sm); err != nil { - t.Fatalf("unexpected error putting manifest: %v", err) - } - - exists, err = ms.ExistsByTag(env.ctx, env.tag) - if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) - } - - if !exists { - t.Fatalf("manifest should exist") - } - - fetchedManifest, err := ms.GetByTag(env.ctx, env.tag) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if !reflect.DeepEqual(fetchedManifest, sm) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) - } - - fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures") - if err != nil { - t.Fatalf("unexpected error parsing jws: %v", err) - } - - payload, err := fetchedJWS.Payload() - if err != nil { - t.Fatalf("unexpected error extracting payload: %v", err) - } - - // Now that we have a payload, take a moment to check that the manifest is - // return by the payload digest. - dgst, err := digest.FromBytes(payload) - if err != nil { - t.Fatalf("error getting manifest digest: %v", err) - } - - exists, err = ms.Exists(env.ctx, dgst) - if err != nil { - t.Fatalf("error checking manifest existence by digest: %v", err) - } - - if !exists { - t.Fatalf("manifest %s should exist", dgst) - } - - fetchedByDigest, err := ms.Get(env.ctx, dgst) - if err != nil { - t.Fatalf("unexpected error fetching manifest by digest: %v", err) - } - - if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) - } - - sigs, err := fetchedJWS.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) - } - - // Grabs the tags and check that this tagged manifest is present - tags, err := ms.Tags(env.ctx) - if err != nil { - t.Fatalf("unexpected error fetching tags: %v", err) - } - - if len(tags) != 1 { - t.Fatalf("unexpected tags returned: %v", tags) - } - - if tags[0] != env.tag { - t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{env.tag}) - } - - // Now, push the same manifest with a different key - pk2, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm2, err := manifest.Sign(&m, pk2) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures") - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } - - sigs2, err := jws2.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs2) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) - } - - if err = ms.Put(env.ctx, sm2); err != nil { - t.Fatalf("unexpected error putting manifest: %v", err) - } - - fetched, err := ms.GetByTag(env.ctx, env.tag) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if _, err := manifest.Verify(fetched); err != nil { - t.Fatalf("unexpected error verifying manifest: %v", err) - } - - // Assemble our payload and two signatures to get what we expect! - expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0]) - if err != nil { - t.Fatalf("unexpected error merging jws: %v", err) - } - - expectedSigs, err := expectedJWS.Signatures() - if err != nil { - t.Fatalf("unexpected error getting expected signatures: %v", err) - } - - receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures") - if err != nil { - t.Fatalf("unexpected error parsing jws: %v", err) - } - - receivedPayload, err := receivedJWS.Payload() - if err != nil { - t.Fatalf("unexpected error extracting received payload: %v", err) - } - - if !bytes.Equal(receivedPayload, payload) { - t.Fatalf("payloads are not equal") - } - - receivedSigs, err := receivedJWS.Signatures() - if err != nil { - t.Fatalf("error getting signatures: %v", err) - } - - for i, sig := range receivedSigs { - if !bytes.Equal(sig, expectedSigs[i]) { - t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) - } - } - - // TODO(stevvooe): Currently, deletes are not supported due to some - // complexity around managing tag indexes. We'll add this support back in - // when the manifest format has settled. For now, we expect an error for - // all deletes. - if err := ms.Delete(env.ctx, dgst); err == nil { - t.Fatalf("unexpected an error deleting manifest by digest: %v", err) - } -} - -blob -mark :197 -data 16056 -package storage - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/distribution/digest" -) - -const storagePathVersion = "v2" - -// pathMapper maps paths based on "object names" and their ids. The "object -// names" mapped by pathMapper are internal to the storage system. -// -// The path layout in the storage backend is roughly as follows: -// -// /v2 -// -> repositories/ -// ->/ -// -> _manifests/ -// revisions -// -> -// -> link -// -> signatures -// //link -// tags/ -// -> current/link -// -> index -// -> //link -// -> _layers/ -// -// -> _uploads/ -// data -// startedat -// hashstates// -// -> blob/ -// -// -// The storage backend layout is broken up into a content- addressable blob -// store and repositories. The content-addressable blob store holds most data -// throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controled through links from the -// repository to blobstore. -// -// A repository is made up of layers, manifests and tags. The layers component -// is just a directory of layers which are "linked" into a repository. A layer -// can only be accessed through a qualified repository name if it is linked in -// the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload uuid. When all data for an upload is received, the -// data is moved into the blob store and the upload directory is deleted. -// Abandoned uploads can be garbage collected by reading the startedat file -// and removing uploads that have been active for longer than a certain time. -// -// The third component of the repository directory is the manifests store, -// which is made up of a revision store and tag store. Manifests are stored in -// the blob store and linked into the revision store. Signatures are separated -// from the manifest payload data and linked into the blob store, as well. -// While the registry can save all revisions of a manifest, no relationship is -// implied as to the ordering of changes to a manifest. The tag store provides -// support for name, tag lookups of manifests, using "current/link" under a -// named tag directory. An index is maintained to support deletions of all -// revisions of a given manifest tag. -// -// We cover the path formats implemented by this path mapper below. -// -// Manifests: -// -// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// -// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link -// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ -// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link -// -// Tags: -// -// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ -// manifestTagPathSpec: /v2/repositories//_manifests/tags// -// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link -// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ -// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// -// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link -// -// Layers: -// -// layerLinkPathSpec: /v2/repositories//_layers/tarsum////link -// -// Uploads: -// -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// -// -// Blob Store: -// -// blobPathSpec: /v2/blobs/// -// blobDataPathSpec: /v2/blobs////data -// -// For more information on the semantic meaning of each path and their -// contents, please see the path spec documentation. -type pathMapper struct { - root string - version string // should be a constant? -} - -var defaultPathMapper = &pathMapper{ - root: "/docker/registry/", - version: storagePathVersion, -} - -// path returns the path identified by spec. -func (pm *pathMapper) path(spec pathSpec) (string, error) { - - // Switch on the path object type and return the appropriate path. At - // first glance, one may wonder why we don't use an interface to - // accomplish this. By keep the formatting separate from the pathSpec, we - // keep separate the path generation componentized. These specs could be - // passed to a completely different mapper implementation and generate a - // different set of paths. - // - // For example, imagine migrating from one backend to the other: one could - // build a filesystem walker that converts a string path in one version, - // to an intermediate path object, than can be consumed and mapped by the - // other version. - - rootPrefix := []string{pm.root, pm.version} - repoPrefix := append(rootPrefix, "repositories") - - switch v := spec.(type) { - - case manifestRevisionPathSpec: - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil - case manifestRevisionLinkPathSpec: - root, err := pm.path(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestSignaturesPathSpec: - root, err := pm.path(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "signatures"), nil - case manifestSignatureLinkPathSpec: - root, err := pm.path(manifestSignaturesPathSpec{ - name: v.name, - revision: v.revision, - }) - if err != nil { - return "", err - } - - signatureComponents, err := digestPathComponents(v.signature, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil - case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil - case manifestTagPathSpec: - root, err := pm.path(manifestTagsPathSpec{ - name: v.name, - }) - if err != nil { - return "", err - } - - return path.Join(root, v.tag), nil - case manifestTagCurrentPathSpec: - root, err := pm.path(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - if err != nil { - return "", err - } - - return path.Join(root, "current", "link"), nil - case manifestTagIndexPathSpec: - root, err := pm.path(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - if err != nil { - return "", err - } - - return path.Join(root, "index"), nil - case manifestTagIndexEntryLinkPathSpec: - root, err := pm.path(manifestTagIndexEntryPathSpec{ - name: v.name, - tag: v.tag, - revision: v.revision, - }) - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagIndexEntryPathSpec: - root, err := pm.path(manifestTagIndexPathSpec{ - name: v.name, - tag: v.tag, - }) - if err != nil { - return "", err - } - - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(components...)), nil - case layerLinkPathSpec: - components, err := digestPathComponents(v.digest, false) - if err != nil { - return "", err - } - - layerLinkPathComponents := append(repoPrefix, v.name, "_layers") - - return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil - case blobDataPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - components = append(components, "data") - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - - case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "data")...), nil - case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "startedat")...), nil - case uploadHashStatePathSpec: - offset := fmt.Sprintf("%d", v.offset) - if v.list { - offset = "" // Limit to the prefix for listing offsets. - } - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "hashstates", v.alg, offset)...), nil - case repositoriesRootPathSpec: - return path.Join(repoPrefix...), nil - default: - // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). - return "", fmt.Errorf("unknown path spec: %#v", v) - } -} - -// pathSpec is a type to mark structs as path specs. There is no -// implementation because we'd like to keep the specs and the mappers -// decoupled. -type pathSpec interface { - pathSpec() -} - -// manifestRevisionPathSpec describes the components of the directory path for -// a manifest revision. -type manifestRevisionPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionPathSpec) pathSpec() {} - -// manifestRevisionLinkPathSpec describes the path components required to look -// up the data link for a revision of a manifest. If this file is not present, -// the manifest blob is not available in the given repo. The contents of this -// file should just be the digest. -type manifestRevisionLinkPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionLinkPathSpec) pathSpec() {} - -// manifestSignaturesPathSpec decribes the path components for the directory -// containing all the signatures for the target blob. Entries are named with -// the underlying key id. -type manifestSignaturesPathSpec struct { - name string - revision digest.Digest -} - -func (manifestSignaturesPathSpec) pathSpec() {} - -// manifestSignatureLinkPathSpec decribes the path components used to look up -// a signature file by the hash of its blob. -type manifestSignatureLinkPathSpec struct { - name string - revision digest.Digest - signature digest.Digest -} - -func (manifestSignatureLinkPathSpec) pathSpec() {} - -// manifestTagsPathSpec describes the path elements required to point to the -// manifest tags directory. -type manifestTagsPathSpec struct { - name string -} - -func (manifestTagsPathSpec) pathSpec() {} - -// manifestTagPathSpec describes the path elements required to point to the -// manifest tag links files under a repository. These contain a blob id that -// can be used to look up the data and signatures. -type manifestTagPathSpec struct { - name string - tag string -} - -func (manifestTagPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the current revision for a -// given tag. -type manifestTagCurrentPathSpec struct { - name string - tag string -} - -func (manifestTagCurrentPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the index of revisions -// with the given tag. -type manifestTagIndexPathSpec struct { - name string - tag string -} - -func (manifestTagIndexPathSpec) pathSpec() {} - -// manifestTagIndexEntryPathSpec contains the entries of the index by revision. -type manifestTagIndexEntryPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryPathSpec) pathSpec() {} - -// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a -// manifest with given tag within the index. -type manifestTagIndexEntryLinkPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} - -// layerLink specifies a path for a layer link, which is a file with a blob -// id. The layer link will contain a content addressable blob id reference -// into the blob store. The format of the contents is as follows: -// -// : -// -// The following example of the file contents is more illustrative: -// -// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 -// -// This says indicates that there is a blob with the id/digest, calculated via -// sha256 that can be fetched from the blob store. -type layerLinkPathSpec struct { - name string - digest digest.Digest -} - -func (layerLinkPathSpec) pathSpec() {} - -// blobAlgorithmReplacer does some very simple path sanitization for user -// input. Mostly, this is to provide some heirachry for tarsum digests. Paths -// should be "safe" before getting this far due to strict digest requirements -// but we can add further path conversion here, if needed. -var blobAlgorithmReplacer = strings.NewReplacer( - "+", "/", - ".", "/", - ";", "/", -) - -// // blobPathSpec contains the path for the registry global blob store. -// type blobPathSpec struct { -// digest digest.Digest -// } - -// func (blobPathSpec) pathSpec() {} - -// blobDataPathSpec contains the path for the registry global blob store. For -// now, this contains layer data, exclusively. -type blobDataPathSpec struct { - digest digest.Digest -} - -func (blobDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters of the data file for -// uploads. -type uploadDataPathSpec struct { - name string - uuid string -} - -func (uploadDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters for the file that stores the -// start time of an uploads. If it is missing, the upload is considered -// unknown. Admittedly, the presence of this file is an ugly hack to make sure -// we have a way to cleanup old or stalled uploads that doesn't rely on driver -// FileInfo behavior. If we come up with a more clever way to do this, we -// should remove this file immediately and rely on the startetAt field from -// the client to enforce time out policies. -type uploadStartedAtPathSpec struct { - name string - uuid string -} - -func (uploadStartedAtPathSpec) pathSpec() {} - -// uploadHashStatePathSpec defines the path parameters for the file that stores -// the hash function state of an upload at a specific byte offset. If `list` is -// set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, uuid, and alg. -type uploadHashStatePathSpec struct { - name string - uuid string - alg string - offset int64 - list bool -} - -func (uploadHashStatePathSpec) pathSpec() {} - -// repositoriesRootPathSpec returns the root of repositories -type repositoriesRootPathSpec struct { -} - -func (repositoriesRootPathSpec) pathSpec() {} - -// digestPathComponents provides a consistent path breakdown for a given -// digest. For a generic digest, it will be as follows: -// -// / -// -// Most importantly, for tarsum, the layout looks like this: -// -// tarsum/// -// -// If multilevel is true, the first two bytes of the digest will separate -// groups of digest folder. It will be as follows: -// -// // -// -func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { - if err := dgst.Validate(); err != nil { - return nil, err - } - - algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) - hex := dgst.Hex() - prefix := []string{algorithm} - - var suffix []string - - if multilevel { - suffix = append(suffix, hex[:2]) - } - - suffix = append(suffix, hex) - - if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { - // We have a tarsum! - version := tsi.Version - if version == "" { - version = "v0" - } - - prefix = []string{ - "tarsum", - version, - tsi.Algorithm, - } - } - - return append(prefix, suffix...), nil -} - -blob -mark :198 -data 3980 -package storage - -import ( - "testing" - - "github.com/docker/distribution/digest" -) - -func TestPathMapper(t *testing.T) { - pm := &pathMapper{ - root: "/pathmapper-test", - } - - for _, testcase := range []struct { - spec pathSpec - expected string - err error - }{ - { - spec: manifestRevisionPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", - }, - { - spec: manifestRevisionLinkPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", - }, - { - spec: manifestSignatureLinkPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789", - signature: "sha256:abcdef0123456789", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", - }, - { - spec: manifestSignaturesPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", - }, - { - spec: manifestTagsPathSpec{ - name: "foo/bar", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags", - }, - { - spec: manifestTagPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag", - }, - { - spec: manifestTagCurrentPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/current/link", - }, - { - spec: manifestTagIndexPathSpec{ - name: "foo/bar", - tag: "thetag", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index", - }, - { - spec: manifestTagIndexEntryPathSpec{ - name: "foo/bar", - tag: "thetag", - revision: "sha256:abcdef0123456789", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", - }, - { - spec: manifestTagIndexEntryLinkPathSpec{ - name: "foo/bar", - tag: "thetag", - revision: "sha256:abcdef0123456789", - }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", - }, - { - spec: layerLinkPathSpec{ - name: "foo/bar", - digest: "tarsum.v1+test:abcdef", - }, - expected: "/pathmapper-test/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), - }, - expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), - }, - expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", - }, - - { - spec: uploadDataPathSpec{ - name: "foo/bar", - uuid: "asdf-asdf-asdf-adsf", - }, - expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", - }, - { - spec: uploadStartedAtPathSpec{ - name: "foo/bar", - uuid: "asdf-asdf-asdf-adsf", - }, - expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", - }, - } { - p, err := pm.path(testcase.spec) - if err != nil { - t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) - } - - if p != testcase.expected { - t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) - } - } - - // Add a few test cases to ensure we cover some errors - - // Specify a path that requires a revision and get a digest validation error. - badpath, err := pm.path(manifestSignaturesPathSpec{ - name: "foo/bar", - }) - if err == nil { - t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) - } - -} - -blob -mark :199 -data 3891 -package storage - -import ( - "path" - "strings" - "time" - - "code.google.com/p/go-uuid/uuid" - log "github.com/Sirupsen/logrus" - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// uploadData stored the location of temporary files created during a layer upload -// along with the date the upload was started -type uploadData struct { - containingDir string - startedAt time.Time -} - -func newUploadData() uploadData { - return uploadData{ - containingDir: "", - // default to far in future to protect against missing startedat - startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), - } -} - -// PurgeUploads deletes files from the upload directory -// created before olderThan. The list of files deleted and errors -// encountered are returned -func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { - log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(driver) - var deleted []string - for _, uploadData := range uploadData { - if uploadData.startedAt.Before(olderThan) { - var err error - log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", - uploadData.containingDir, uploadData.startedAt, olderThan) - if actuallyDelete { - err = driver.Delete(uploadData.containingDir) - } - if err == nil { - deleted = append(deleted, uploadData.containingDir) - } else { - errors = append(errors, err) - } - } - } - - log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) - return deleted, errors -} - -// getOutstandingUploads walks the upload directory, collecting files -// which could be eligible for deletion. The only reliable way to -// classify the age of a file is with the date stored in the startedAt -// file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploadData, []error) { - var errors []error - uploads := make(map[string]uploadData, 0) - - inUploadDir := false - root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) - if err != nil { - return uploads, append(errors, err) - } - err = Walk(driver, root, func(fileInfo storageDriver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - if file[0] == '_' { - // Reserved directory - inUploadDir = (file == "_uploads") - - if fileInfo.IsDir() && !inUploadDir { - return ErrSkipDir - } - - } - - uuid, isContainingDir := uUIDFromPath(filePath) - if uuid == "" { - // Cannot reliably delete - return nil - } - ud, ok := uploads[uuid] - if !ok { - ud = newUploadData() - } - if isContainingDir { - ud.containingDir = filePath - } - if file == "startedat" { - if t, err := readStartedAtFile(driver, filePath); err == nil { - ud.startedAt = t - } else { - errors = pushError(errors, filePath, err) - } - - } - - uploads[uuid] = ud - return nil - }) - - if err != nil { - errors = pushError(errors, root, err) - } - return uploads, errors -} - -// uUIDFromPath extracts the upload UUID from a given path -// If the UUID is the last path component, this is the containing -// directory for all upload files -func uUIDFromPath(path string) (string, bool) { - components := strings.Split(path, "/") - for i := len(components) - 1; i >= 0; i-- { - if uuid := uuid.Parse(components[i]); uuid != nil { - return uuid.String(), i == len(components)-1 - } - } - return "", false -} - -// readStartedAtFile reads the date from an upload's startedAtFile -func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - startedAtBytes, err := driver.GetContent(path) - if err != nil { - return time.Now(), err - } - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return time.Now(), err - } - return startedAt, nil -} - -blob -mark :200 -data 4562 -package storage - -import ( - "path" - "strings" - "testing" - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -var pm = defaultPathMapper - -func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) driver.StorageDriver { - d := inmemory.New() - for i := 0; i < numUploads; i++ { - addUploads(t, d, uuid.New(), repoName, startedAt) - } - return d -} - -func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID}) - if err != nil { - t.Fatalf("Unable to resolve path") - } - if err := d.PutContent(dataPath, []byte("")); err != nil { - t.Fatalf("Unable to write data file") - } - - startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, uuid: uploadID}) - if err != nil { - t.Fatalf("Unable to resolve path") - } - - if d.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - t.Fatalf("Unable to write startedAt file") - } - -} - -func TestPurgeGather(t *testing.T) { - uploadCount := 5 - fs := testUploadFS(t, uploadCount, "test-repo", time.Now()) - uploadData, errs := getOutstandingUploads(fs) - if len(errs) != 0 { - t.Errorf("Unexepected errors: %q", errs) - } - if len(uploadData) != uploadCount { - t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) - } -} - -func TestPurgeNone(t *testing.T) { - fs := testUploadFS(t, 10, "test-repo", time.Now()) - oneHourAgo := time.Now().Add(-1 * time.Hour) - deleted, errs := PurgeUploads(fs, oneHourAgo, true) - if len(errs) != 0 { - t.Error("Unexpected errors", errs) - } - if len(deleted) != 0 { - t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) - } -} - -func TestPurgeAll(t *testing.T) { - uploadCount := 10 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) - - // Ensure > 1 repos are purged - addUploads(t, fs, uuid.New(), "test-repo2", oneHourAgo) - uploadCount++ - - deleted, errs := PurgeUploads(fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors:", errs) - } - fileCount := uploadCount - if len(deleted) != fileCount { - t.Errorf("Unexpectedly deleted file count %d != %d", - len(deleted), fileCount) - } -} - -func TestPurgeSome(t *testing.T) { - oldUploadCount := 5 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) - - newUploadCount := 4 - - for i := 0; i < newUploadCount; i++ { - addUploads(t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) - } - - deleted, errs := PurgeUploads(fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors:", errs) - } - if len(deleted) != oldUploadCount { - t.Errorf("Unexpectedly deleted file count %d != %d", - len(deleted), oldUploadCount) - } -} - -func TestPurgeOnlyUploads(t *testing.T) { - oldUploadCount := 5 - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) - - // Create a directory tree outside _uploads and ensure - // these files aren't deleted. - dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", uuid: uuid.New()}) - if err != nil { - t.Fatalf(err.Error()) - } - nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) - if strings.Index(nonUploadPath, "_upload") != -1 { - t.Fatalf("Non-upload path not created correctly") - } - - nonUploadFile := path.Join(nonUploadPath, "file") - if err = fs.PutContent(nonUploadFile, []byte("")); err != nil { - t.Fatalf("Unable to write data file") - } - - deleted, errs := PurgeUploads(fs, time.Now(), true) - if len(errs) != 0 { - t.Error("Unexpected errors", errs) - } - for _, file := range deleted { - if strings.Index(file, "_upload") == -1 { - t.Errorf("Non-upload file deleted") - } - } -} - -func TestPurgeMissingStartedAt(t *testing.T) { - oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, 1, "test-repo", oneHourAgo) - err := Walk(fs, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - - if file == "startedat" { - if err := fs.Delete(filePath); err != nil { - t.Fatalf("Unable to delete startedat file: %s", filePath) - } - } - return nil - }) - if err != nil { - t.Fatalf("Unexpected error during Walk: %s ", err.Error()) - } - deleted, errs := PurgeUploads(fs, time.Now(), true) - if len(errs) > 0 { - t.Errorf("Unexpected errors") - } - if len(deleted) > 0 { - t.Errorf("Files unexpectedly deleted: %s", deleted) - } -} - -blob -mark :201 -data 3600 -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// registry is the top-level implementation of Registry for use in the storage -// package. All instances should descend from this object. -type registry struct { - driver storagedriver.StorageDriver - pm *pathMapper - blobStore *blobStore - layerInfoCache cache.LayerInfoCache -} - -// NewRegistryWithDriver creates a new registry instance from the provided -// driver. The resulting registry may be shared by multiple goroutines but is -// cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { - bs := &blobStore{ - driver: driver, - pm: defaultPathMapper, - } - - return ®istry{ - driver: driver, - blobStore: bs, - - // TODO(sday): This should be configurable. - pm: defaultPathMapper, - layerInfoCache: layerInfoCache, - } -} - -// Scope returns the namespace scope for a registry. The registry -// will only serve repositories contained within this scope. -func (reg *registry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -// Repository returns an instance of the repository tied to the registry. -// Instances should not be shared between goroutines but are cheap to -// allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - if err := v2.ValidateRespositoryName(name); err != nil { - return nil, distribution.ErrRepositoryNameInvalid{ - Name: name, - Reason: err, - } - } - - return &repository{ - ctx: ctx, - registry: reg, - name: name, - }, nil -} - -// repository provides name-scoped access to various services. -type repository struct { - *registry - ctx context.Context - name string -} - -// Name returns the name of the repository. -func (repo *repository) Name() string { - return repo.name -} - -// Manifests returns an instance of ManifestService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Manifests() distribution.ManifestService { - return &manifestStore{ - repository: repo, - revisionStore: &revisionStore{ - repository: repo, - }, - tagStore: &tagStore{ - repository: repo, - }, - } -} - -// Layers returns an instance of the LayerService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Layers() distribution.LayerService { - ls := &layerStore{ - repository: repo, - } - - if repo.registry.layerInfoCache != nil { - // TODO(stevvooe): This is not the best place to setup a cache. We would - // really like to decouple the cache from the backend but also have the - // manifeset service use the layer service cache. For now, we can simply - // integrate the cache directly. The main issue is that we have layer - // access and layer data coupled in a single object. Work is already under - // way to decouple this. - - return &cachedLayerService{ - LayerService: ls, - repository: repo, - ctx: repo.ctx, - driver: repo.driver, - blobStore: repo.blobStore, - cache: repo.registry.layerInfoCache, - } - } - - return ls -} - -func (repo *repository) Signatures() distribution.SignatureService { - return &signatureStore{ - repository: repo, - } -} - -blob -mark :202 -data 3342 -package storage - -import ( - "encoding/json" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/libtrust" -) - -// revisionStore supports storing and managing manifest revisions. -type revisionStore struct { - *repository -} - -// exists returns true if the revision is available in the named repository. -func (rs *revisionStore) exists(revision digest.Digest) (bool, error) { - revpath, err := rs.pm.path(manifestRevisionPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return false, err - } - - exists, err := exists(rs.driver, revpath) - if err != nil { - return false, err - } - - return exists, nil -} - -// get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, error) { - // Ensure that this revision is available in this repository. - if exists, err := rs.exists(revision); err != nil { - return nil, err - } else if !exists { - return nil, distribution.ErrUnknownManifestRevision{ - Name: rs.Name(), - Revision: revision, - } - } - - content, err := rs.blobStore.get(revision) - if err != nil { - return nil, err - } - - // Fetch the signatures for the manifest - signatures, err := rs.Signatures().Get(revision) - if err != nil { - return nil, err - } - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm manifest.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - - return &sm, nil -} - -// put stores the manifest in the repository, if not already present. Any -// updated signatures will be stored, as well. -func (rs *revisionStore) put(sm *manifest.SignedManifest) (digest.Digest, error) { - // Resolve the payload in the manifest. - payload, err := sm.Payload() - if err != nil { - return "", err - } - - // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.put(payload) - if err != nil { - logrus.Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := rs.link(revision); err != nil { - return "", err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return "", err - } - - if err := rs.Signatures().Put(revision, signatures...); err != nil { - return "", err - } - - return revision, nil -} - -// link links the revision into the repository. -func (rs *revisionStore) link(revision digest.Digest) error { - revisionPath, err := rs.pm.path(manifestRevisionLinkPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return err - } - - if exists, err := exists(rs.driver, revisionPath); err != nil { - return err - } else if exists { - // Revision has already been linked! - return nil - } - - return rs.blobStore.link(revisionPath, revision) -} - -// delete removes the specified manifest revision from storage. -func (rs *revisionStore) delete(revision digest.Digest) error { - revisionPath, err := rs.pm.path(manifestRevisionPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return err - } - - return rs.driver.Delete(revisionPath) -} - -blob -mark :203 -data 2391 -package storage - -import ( - "path" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -type signatureStore struct { - *repository -} - -var _ distribution.SignatureService = &signatureStore{} - -func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := s.pm.path(manifestSignaturesPathSpec{ - name: s.Name(), - revision: dgst, - }) - - if err != nil { - return nil, err - } - - // Need to append signature digest algorithm to path to get all items. - // Perhaps, this should be in the pathMapper but it feels awkward. This - // can be eliminated by implementing listAll on drivers. - signaturesPath = path.Join(signaturesPath, "sha256") - - signaturePaths, err := s.driver.List(signaturesPath) - if err != nil { - return nil, err - } - - var wg sync.WaitGroup - type result struct { - index int - signature []byte - err error - } - ch := make(chan result) - - for i, sigPath := range signaturePaths { - // Append the link portion - sigPath = path.Join(sigPath, "link") - - wg.Add(1) - go func(idx int, sigPath string) { - defer wg.Done() - context.GetLogger(s.ctx). - Debugf("fetching signature from %q", sigPath) - - r := result{index: idx} - if p, err := s.blobStore.linked(sigPath); err != nil { - context.GetLogger(s.ctx). - Errorf("error fetching signature from %q: %v", sigPath, err) - r.err = err - } else { - r.signature = p - } - - ch <- r - }(i, sigPath) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - // aggregrate the results - signatures := make([][]byte, len(signaturePaths)) -loop: - for { - select { - case result := <-ch: - signatures[result.index] = result.signature - if result.err != nil && err == nil { - // only set the first one. - err = result.err - } - case <-done: - break loop - } - } - - return signatures, err -} - -func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { - for _, signature := range signatures { - signatureDigest, err := s.blobStore.put(signature) - if err != nil { - return err - } - - signaturePath, err := s.pm.path(manifestSignatureLinkPathSpec{ - name: s.Name(), - revision: dgst, - signature: signatureDigest, - }) - - if err != nil { - return err - } - - if err := s.blobStore.link(signaturePath, signatureDigest); err != nil { - return err - } - } - return nil -} - -blob -mark :204 -data 3512 -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// tagStore provides methods to manage manifest tags in a backend storage driver. -type tagStore struct { - *repository -} - -// tags lists the manifest tags for the specified repository. -func (ts *tagStore) tags() ([]string, error) { - p, err := ts.pm.path(manifestTagPathSpec{ - name: ts.name, - }) - if err != nil { - return nil, err - } - - var tags []string - entries, err := ts.driver.List(p) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return nil, distribution.ErrRepositoryUnknown{Name: ts.name} - default: - return nil, err - } - } - - for _, entry := range entries { - _, filename := path.Split(entry) - - tags = append(tags, filename) - } - - return tags, nil -} - -// exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(tag string) (bool, error) { - tagPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), - tag: tag, - }) - if err != nil { - return false, err - } - - exists, err := exists(ts.driver, tagPath) - if err != nil { - return false, err - } - - return exists, nil -} - -// tag tags the digest with the given tag, updating the the store to point at -// the current tag. The digest must point to a manifest. -func (ts *tagStore) tag(tag string, revision digest.Digest) error { - indexEntryPath, err := ts.pm.path(manifestTagIndexEntryLinkPathSpec{ - name: ts.Name(), - tag: tag, - revision: revision, - }) - - if err != nil { - return err - } - - currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), - tag: tag, - }) - - if err != nil { - return err - } - - // Link into the index - if err := ts.blobStore.link(indexEntryPath, revision); err != nil { - return err - } - - // Overwrite the current link - return ts.blobStore.link(currentPath, revision) -} - -// resolve the current revision for name and tag. -func (ts *tagStore) resolve(tag string) (digest.Digest, error) { - currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), - tag: tag, - }) - - if err != nil { - return "", err - } - - if exists, err := exists(ts.driver, currentPath); err != nil { - return "", err - } else if !exists { - return "", distribution.ErrManifestUnknown{Name: ts.Name(), Tag: tag} - } - - revision, err := ts.blobStore.readlink(currentPath) - if err != nil { - return "", err - } - - return revision, nil -} - -// revisions returns all revisions with the specified name and tag. -func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) { - manifestTagIndexPath, err := ts.pm.path(manifestTagIndexPathSpec{ - name: ts.Name(), - tag: tag, - }) - - if err != nil { - return nil, err - } - - // TODO(stevvooe): Need to append digest alg to get listing of revisions. - manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256") - - entries, err := ts.driver.List(manifestTagIndexPath) - if err != nil { - return nil, err - } - - var revisions []digest.Digest - for _, entry := range entries { - revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry))) - } - - return revisions, nil -} - -// delete removes the tag from repository, including the history of all -// revisions that have the specified tag. -func (ts *tagStore) delete(tag string) error { - tagPath, err := ts.pm.path(manifestTagPathSpec{ - name: ts.Name(), - tag: tag, - }) - if err != nil { - return err - } - - return ts.driver.Delete(tagPath) -} - -blob -mark :205 -data 1397 -package storage - -import ( - "errors" - "fmt" - - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// SkipDir is used as a return value from onFileFunc to indicate that -// the directory named in the call is to be skipped. It is not returned -// as an error by any function. -var ErrSkipDir = errors.New("skip this directory") - -// WalkFn is called once per file by Walk -// If the returned error is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -type WalkFn func(fileInfo storageDriver.FileInfo) error - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(from) - if err != nil { - return err - } - for _, child := range children { - fileInfo, err := driver.Stat(child) - if err != nil { - return err - } - err = f(fileInfo) - skipDir := (err == ErrSkipDir) - if err != nil && !skipDir { - return err - } - - if fileInfo.IsDir() && !skipDir { - Walk(driver, child, f) - } - } - return nil -} - -// pushError formats an error type given a path and an error -// and pushes it to a slice of errors -func pushError(errors []error, path string, err error) []error { - return append(errors, fmt.Errorf("%s: %s", path, err)) -} - -blob -mark :206 -data 2569 -package storage - -import ( - "fmt" - "testing" - - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { - d := inmemory.New() - c := []byte("") - if err := d.PutContent("/a/b/c/d", c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } - if err := d.PutContent("/a/b/c/e", c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } - - expected := map[string]string{ - "/a": "dir", - "/a/b": "dir", - "/a/b/c": "dir", - "/a/b/c/d": "file", - "/a/b/c/e": "file", - } - - return d, expected -} - -func TestWalkErrors(t *testing.T) { - d, expected := testFS(t) - fileCount := len(expected) - err := Walk(d, "", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Error("Expected invalid root err") - } - - err = Walk(d, "/", func(fileInfo driver.FileInfo) error { - // error on the 2nd file - if fileInfo.Path() == "/a/b" { - return fmt.Errorf("Early termination") - } - delete(expected, fileInfo.Path()) - return nil - }) - if len(expected) != fileCount-1 { - t.Error("Walk failed to terminate with error") - } - if err != nil { - t.Error(err.Error()) - } - - err = Walk(d, "/nonexistant", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Errorf("Expected missing file err") - } - -} - -func TestWalk(t *testing.T) { - d, expected := testFS(t) - err := Walk(d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - filetype, ok := expected[filePath] - if !ok { - t.Fatalf("Unexpected file in walk: %q", filePath) - } - - if fileInfo.IsDir() { - if filetype != "dir" { - t.Errorf("Unexpected file type: %q", filePath) - } - } else { - if filetype != "file" { - t.Errorf("Unexpected file type: %q", filePath) - } - } - delete(expected, filePath) - return nil - }) - if len(expected) > 0 { - t.Errorf("Missed files in walk: %q", expected) - } - if err != nil { - t.Fatalf(err.Error()) - } -} - -func TestWalkSkipDir(t *testing.T) { - d, expected := testFS(t) - err := Walk(d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - if filePath == "/a/b" { - // skip processing /a/b/c and /a/b/c/d - return ErrSkipDir - } - delete(expected, filePath) - return nil - }) - if err != nil { - t.Fatalf(err.Error()) - } - if _, ok := expected["/a/b/c"]; !ok { - t.Errorf("/a/b/c not skipped") - } - if _, ok := expected["/a/b/c/d"]; !ok { - t.Errorf("/a/b/c/d not skipped") - } - if _, ok := expected["/a/b/c/e"]; !ok { - t.Errorf("/a/b/c/e not skipped") - } - -} - -blob -mark :207 -data 3045 -package testutil - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "sort" - "strings" -) - -// RequestResponseMap is an ordered mapping from Requests to Responses -type RequestResponseMap []RequestResponseMapping - -// RequestResponseMapping defines a Response to be sent in response to a given -// Request -type RequestResponseMapping struct { - Request Request - Response Response -} - -// TODO(bbland): add support for request headers - -// Request is a simplified http.Request object -type Request struct { - // Method is the http method of the request, for example GET - Method string - - // Route is the http route of this request - Route string - - // QueryParams are the query parameters of this request - QueryParams map[string][]string - - // Body is the byte contents of the http request - Body []byte -} - -func (r Request) String() string { - queryString := "" - if len(r.QueryParams) > 0 { - queryString = "?" - keys := make([]string, 0, len(r.QueryParams)) - for k := range r.QueryParams { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - queryString += strings.Join(r.QueryParams[k], "&") + "&" - } - queryString = queryString[:len(queryString)-1] - } - return fmt.Sprintf("%s %s%s\n%s", r.Method, r.Route, queryString, r.Body) -} - -// Response is a simplified http.Response object -type Response struct { - // Statuscode is the http status code of the Response - StatusCode int - - // Headers are the http headers of this Response - Headers http.Header - - // Body is the response body - Body []byte -} - -// testHandler is an http.Handler with a defined mapping from Request to an -// ordered list of Response objects -type testHandler struct { - responseMap map[string][]Response -} - -// NewHandler returns a new test handler that responds to defined requests -// with specified responses -// Each time a Request is received, the next Response is returned in the -// mapping, until no Responses are defined, at which point a 404 is sent back -func NewHandler(requestResponseMap RequestResponseMap) http.Handler { - responseMap := make(map[string][]Response) - for _, mapping := range requestResponseMap { - responses, ok := responseMap[mapping.Request.String()] - if ok { - responseMap[mapping.Request.String()] = append(responses, mapping.Response) - } else { - responseMap[mapping.Request.String()] = []Response{mapping.Response} - } - } - return &testHandler{responseMap: responseMap} -} - -func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - requestBody, _ := ioutil.ReadAll(r.Body) - request := Request{ - Method: r.Method, - Route: r.URL.Path, - QueryParams: r.URL.Query(), - Body: requestBody, - } - - responses, ok := app.responseMap[request.String()] - - if !ok || len(responses) == 0 { - http.NotFound(w, r) - return - } - - response := responses[0] - app.responseMap[request.String()] = responses[1:] - - responseHeader := w.Header() - for k, v := range response.Headers { - responseHeader[k] = v - } - - w.WriteHeader(response.StatusCode) - - io.Copy(w, bytes.NewReader(response.Body)) -} - -blob -mark :208 -data 2184 -package testutil - -import ( - "archive/tar" - "bytes" - "crypto/rand" - "fmt" - "io" - "io/ioutil" - mrand "math/rand" - "time" - - "github.com/docker/docker/pkg/tarsum" -) - -// CreateRandomTarFile creates a random tarfile, returning it as an -// io.ReadSeeker along with its tarsum. An error is returned if there is a -// problem generating valid content. -func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) { - nFiles := mrand.Intn(10) + 10 - target := &bytes.Buffer{} - wr := tar.NewWriter(target) - - // Perturb this on each iteration of the loop below. - header := &tar.Header{ - Mode: 0644, - ModTime: time.Now(), - Typeflag: tar.TypeReg, - Uname: "randocalrissian", - Gname: "cloudcity", - AccessTime: time.Now(), - ChangeTime: time.Now(), - } - - for fileNumber := 0; fileNumber < nFiles; fileNumber++ { - fileSize := mrand.Int63n(1<<20) + 1<<20 - - header.Name = fmt.Sprint(fileNumber) - header.Size = fileSize - - if err := wr.WriteHeader(header); err != nil { - return nil, "", err - } - - randomData := make([]byte, fileSize) - - // Fill up the buffer with some random data. - n, err := rand.Read(randomData) - - if n != len(randomData) { - return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) - } - - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(wr, bytes.NewReader(randomData)) - if nn != fileSize { - return nil, "", fmt.Errorf("short copy writing random file to tar") - } - - if err != nil { - return nil, "", err - } - - if err := wr.Flush(); err != nil { - return nil, "", err - } - } - - if err := wr.Close(); err != nil { - return nil, "", err - } - - reader := bytes.NewReader(target.Bytes()) - - // A tar builder that supports tarsum inline calculation would be awesome - // here. - ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(ioutil.Discard, ts) - if nn != int64(len(target.Bytes())) { - return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) - } - - if err != nil { - return nil, "", err - } - - return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil -} - -blob -mark :209 -data 584 -package version - -import ( - "fmt" - "io" - "os" -) - -// FprintVersion outputs the version string to the writer, in the following -// format, followed by a newline: -// -// -// -// For example, a binary "registry" built from github.com/docker/distribution -// with version "v2.0" would print the following: -// -// registry github.com/docker/distribution v2.0 -// -func FprintVersion(w io.Writer) { - fmt.Fprintln(w, os.Args[0], Package, Version) -} - -// PrintVersion outputs the version information, from Fprint, to stdout. -func PrintVersion() { - FprintVersion(os.Stdout) -} - -blob -mark :210 -data 477 -package version - -// Package is the overall, canonical project import path under which the -// package was built. -var Package = "github.com/docker/distribution" - -// Version indicates which version of the binary is running. This is set to -// the latest release tag by hand, always suffixed by "+unknown". During -// build, it will be replaced by the actual version. The value here will be -// used if the registry is run after a go get based install. -var Version = "v2.0.0+unknown" - -blob -mark :211 -data 815 -#!/bin/sh - -# This bash script outputs the current, desired content of version.go, using -# git describe. For best effect, pipe this to the target file. Generally, this -# only needs to updated for releases. The actual value of will be replaced -# during build time if the makefile is used. - -set -e - -cat < 1427143596 -0700 -committer Clayton Coleman 1430833393 -0400 -data 49 -UPSTREAM: add context to ManifestService methods -M 100644 :1 Godeps/_workspace/src/github.com/docker/distribution/.drone.yml -M 100644 :2 Godeps/_workspace/src/github.com/docker/distribution/.gitignore -M 100644 :3 Godeps/_workspace/src/github.com/docker/distribution/.mailmap -M 100644 :4 Godeps/_workspace/src/github.com/docker/distribution/AUTHORS -M 100644 :5 Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md -M 100644 :6 Godeps/_workspace/src/github.com/docker/distribution/Dockerfile -M 100644 :7 Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json -M 100644 :8 Godeps/_workspace/src/github.com/docker/distribution/Godeps/Readme -M 100644 :9 Godeps/_workspace/src/github.com/docker/distribution/LICENSE -M 100644 :10 Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS -M 100644 :11 Godeps/_workspace/src/github.com/docker/distribution/Makefile -M 100644 :12 Godeps/_workspace/src/github.com/docker/distribution/README.md -M 100644 :13 Godeps/_workspace/src/github.com/docker/distribution/ROADMAP.md -M 100644 :14 Godeps/_workspace/src/github.com/docker/distribution/circle.yml -M 100644 :15 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/list.go -M 100644 :16 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/main.go -M 100644 :17 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/pull.go -M 100644 :18 Godeps/_workspace/src/github.com/docker/distribution/cmd/dist/push.go -M 100644 :19 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go -M 100644 :20 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-azure/main.go -M 100644 :21 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-filesystem/main.go -M 100644 :22 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-inmemory/main.go -M 100644 :23 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-storagedriver-s3/main.go -M 100644 :24 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config.yml -M 100644 :25 Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go -M 100644 :26 Godeps/_workspace/src/github.com/docker/distribution/configuration/README.md -M 100644 :27 Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go -M 100644 :28 Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go -M 100644 :29 Godeps/_workspace/src/github.com/docker/distribution/configuration/parser.go -M 100644 :30 Godeps/_workspace/src/github.com/docker/distribution/context/context.go -M 100644 :31 Godeps/_workspace/src/github.com/docker/distribution/context/doc.go -M 100644 :32 Godeps/_workspace/src/github.com/docker/distribution/context/http.go -M 100644 :33 Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go -M 100644 :34 Godeps/_workspace/src/github.com/docker/distribution/context/logger.go -M 100644 :35 Godeps/_workspace/src/github.com/docker/distribution/context/trace.go -M 100644 :36 Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go -M 100644 :37 Godeps/_workspace/src/github.com/docker/distribution/context/util.go -M 100644 :38 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/README.md -M 100644 :39 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml -M 100644 :40 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile -M 100644 :41 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf -M 100644 :42 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf -M 100644 :43 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf -M 100644 :44 Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf -M 100644 :45 Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go -M 100644 :46 Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go -M 100644 :47 Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go -M 100644 :48 Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable.go -M 100644 :49 Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go -M 100644 :50 Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go -M 100644 :51 Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go -M 100644 :52 Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go -M 100644 :53 Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go -M 100644 :54 Godeps/_workspace/src/github.com/docker/distribution/doc.go -M 100644 :55 Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile -M 100644 :56 Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md -M 100644 :57 Godeps/_workspace/src/github.com/docker/distribution/docs/building.md -M 100644 :58 Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md -M 100644 :59 Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md -M 100644 :60 Godeps/_workspace/src/github.com/docker/distribution/docs/distribution.md -M 100644 :61 Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md -M 100644 :62 Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy -M 100644 :63 Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png -M 100644 :64 Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg -M 100644 :65 Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.gliffy -M 100644 :66 Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.png -M 100644 :67 Godeps/_workspace/src/github.com/docker/distribution/docs/images/registry.svg -M 100644 :68 Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md -M 100644 :69 Godeps/_workspace/src/github.com/docker/distribution/docs/mkdocs.yml -M 100644 :70 Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md -M 100644 :71 Godeps/_workspace/src/github.com/docker/distribution/docs/overview.md -M 100644 :73 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl -M 100644 :72 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md -M 100644 :74 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md -M 100644 :75 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md -M 100644 :76 Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md -M 100644 :77 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md -M 100644 :78 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md -M 100644 :79 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md -M 100644 :80 Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md -M 100644 :81 Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md -M 100644 :82 Godeps/_workspace/src/github.com/docker/distribution/errors.go -M 100644 :83 Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go -M 100644 :84 Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go -M 100644 :85 Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks.go -M 100644 :86 Godeps/_workspace/src/github.com/docker/distribution/health/doc.go -M 100644 :87 Godeps/_workspace/src/github.com/docker/distribution/health/health.go -M 100644 :88 Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go -M 100644 :89 Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go -M 100644 :90 Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest_test.go -M 100644 :91 Godeps/_workspace/src/github.com/docker/distribution/manifest/sign.go -M 100644 :92 Godeps/_workspace/src/github.com/docker/distribution/manifest/verify.go -M 100644 :93 Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go -M 100644 :94 Godeps/_workspace/src/github.com/docker/distribution/notifications/endpoint.go -M 100644 :95 Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go -M 100644 :96 Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go -M 100644 :97 Godeps/_workspace/src/github.com/docker/distribution/notifications/http.go -M 100644 :98 Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go -M 100644 :99 Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go -M 100644 :100 Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go -M 100644 :101 Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go -M 100644 :102 Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks.go -M 100644 :103 Godeps/_workspace/src/github.com/docker/distribution/notifications/sinks_test.go -M 100644 :104 Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile -M 100644 :105 Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md -M 100644 :106 Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh -M 100644 :107 Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit -M 100644 :108 Godeps/_workspace/src/github.com/docker/distribution/registry.go -M 100644 :109 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go -M 100644 :110 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/doc.go -M 100644 :111 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go -M 100644 :112 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors_test.go -M 100644 :113 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go -M 100644 :114 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go -M 100644 :115 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go -M 100644 :116 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go -M 100644 :117 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go -M 100644 :118 Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go -M 100644 :119 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go -M 100644 :120 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go -M 100644 :121 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go -M 100644 :122 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go -M 100644 :123 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go -M 100644 :124 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go -M 100644 :125 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go -M 100644 :126 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go -M 100644 :127 Godeps/_workspace/src/github.com/docker/distribution/registry/client/client.go -M 100644 :128 Godeps/_workspace/src/github.com/docker/distribution/registry/client/client_test.go -M 100644 :129 Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go -M 100644 :130 Godeps/_workspace/src/github.com/docker/distribution/registry/client/objectstore.go -M 100644 :131 Godeps/_workspace/src/github.com/docker/distribution/registry/client/pull.go -M 100644 :132 Godeps/_workspace/src/github.com/docker/distribution/registry/client/push.go -M 100644 :133 Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go -M 100644 :134 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go -M 100644 :135 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go -M 100644 :136 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go -M 100644 :137 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth.go -M 100644 :138 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/basicauth_prego14.go -M 100644 :139 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go -M 100644 :140 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go -M 100644 :141 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go -M 100644 :142 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go -M 100644 :143 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go -M 100644 :144 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layer.go -M 100644 :145 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layerupload.go -M 100644 :146 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go -M 100644 :147 Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go -M 100644 :148 Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go -M 100644 :149 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go -M 100644 :150 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go -M 100644 :151 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go -M 100644 :152 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go -M 100644 :153 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go -M 100644 :154 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go -M 100644 :155 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go -M 100644 :156 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/doc.go -M 100644 :157 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go -M 100644 :158 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go -M 100644 :159 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go -M 100644 :160 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go -M 100644 :161 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go -M 100644 :162 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go -M 100644 :163 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go -M 100644 :164 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go -M 100644 :165 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go -M 100644 :166 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go -M 100644 :167 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go -M 100644 :168 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go -M 100644 :169 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/fileinfo.go -M 100644 :170 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go -M 100644 :171 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go -M 100644 :172 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go -M 100644 :173 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go -M 100644 :174 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go -M 100644 :175 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/client.go -M 100644 :176 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/ipc.go -M 100644 :177 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/server.go -M 100644 :178 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go -M 100644 :179 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go -M 100644 :180 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go -M 100644 :181 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go -M 100644 :182 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go -M 100644 :183 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go -M 100644 :184 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go -M 100644 :185 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go -M 100644 :186 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go -M 100644 :187 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go -M 100644 :188 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go -M 100644 :189 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go -M 100644 :190 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go -M 100644 :191 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go -M 100644 :192 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go -M 100644 :193 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go -M 100644 :194 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go -M 100644 :195 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go -M 100644 :196 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go -M 100644 :197 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go -M 100644 :198 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go -M 100644 :199 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go -M 100644 :200 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go -M 100644 :201 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go -M 100644 :202 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go -M 100644 :203 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go -M 100644 :204 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go -M 100644 :205 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go -M 100644 :206 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go -M 100644 :207 Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go -M 100644 :208 Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go -M 100644 :209 Godeps/_workspace/src/github.com/docker/distribution/version/print.go -M 100644 :210 Godeps/_workspace/src/github.com/docker/distribution/version/version.go -M 100644 :211 Godeps/_workspace/src/github.com/docker/distribution/version/version.sh - -blob -mark :213 -data 6725 -package distribution - -import ( - "io" - "net/http" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "golang.org/x/net/context" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name string) (Repository, error) -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Name returns the name of the repository. - Name() string - - // Manifests returns a reference to this repository's manifest service. - Manifests() ManifestService - - // Layers returns a reference to this repository's layers service. - Layers() LayerService - - // Signatures returns a reference to this repository's signatures service. - Signatures() SignatureService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. - -// ManifestService provides operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the identified by the digest, if it exists. - Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) - - // Delete removes the manifest, if it exists. - Delete(ctx context.Context, dgst digest.Digest) error - - // Put creates or updates the manifest. - Put(ctx context.Context, manifest *manifest.SignedManifest) error - - // TODO(stevvooe): The methods after this message should be moved to a - // discrete TagService, per active proposals. - - // Tags lists the tags under the named repository. - Tags(ctx context.Context) ([]string, error) - - // ExistsByTag returns true if the manifest exists. - ExistsByTag(ctx context.Context, tag string) (bool, error) - - // GetByTag retrieves the named manifest, if it exists. - GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) - - // TODO(stevvooe): There are several changes that need to be done to this - // interface: - // - // 1. Allow explicit tagging with Tag(digest digest.Digest, tag string) - // 2. Support reading tags with a re-entrant reader to avoid large - // allocations in the registry. - // 3. Long-term: Provide All() method that lets one scroll through all of - // the manifest entries. - // 4. Long-term: break out concept of signing from manifests. This is - // really a part of the distribution sprint. - // 5. Long-term: Manifest should be an interface. This code shouldn't - // really be concerned with the storage format. -} - -// LayerService provides operations on layer files in a backend storage. -type LayerService interface { - // Exists returns true if the layer exists. - Exists(digest digest.Digest) (bool, error) - - // Fetch the layer identifed by TarSum. - Fetch(digest digest.Digest) (Layer, error) - - // Delete unlinks the layer from a Repository. - Delete(dgst digest.Digest) error - - // Upload begins a layer upload to repository identified by name, - // returning a handle. - Upload() (LayerUpload, error) - - // Resume continues an in progress layer upload, returning a handle to the - // upload. The caller should seek to the latest desired upload location - // before proceeding. - Resume(uuid string) (LayerUpload, error) -} - -// Layer provides a readable and seekable layer object. Typically, -// implementations are *not* goroutine safe. -type Layer interface { - // http.ServeContent requires an efficient implementation of - // ReadSeeker.Seek(0, os.SEEK_END). - io.ReadSeeker - io.Closer - - // Digest returns the unique digest of the blob. - Digest() digest.Digest - - // Length returns the length in bytes of the blob. - Length() int64 - - // CreatedAt returns the time this layer was created. - CreatedAt() time.Time - - // Handler returns an HTTP handler which serves the layer content, whether - // by providing a redirect directly to the content, or by serving the - // content itself. - Handler(r *http.Request) (http.Handler, error) -} - -// LayerUpload provides a handle for working with in-progress uploads. -// Instances can be obtained from the LayerService.Upload and -// LayerService.Resume. -type LayerUpload interface { - io.WriteSeeker - io.ReaderFrom - io.Closer - - // UUID returns the identifier for this upload. - UUID() string - - // StartedAt returns the time this layer upload was started. - StartedAt() time.Time - - // Finish marks the upload as completed, returning a valid handle to the - // uploaded layer. The digest is validated against the contents of the - // uploaded layer. - Finish(digest digest.Digest) (Layer, error) - - // Cancel the layer upload process. - Cancel() error -} - -// SignatureService provides operations on signatures. -type SignatureService interface { - // Get retrieves all of the signature blobs for the specified digest. - Get(dgst digest.Digest) ([][]byte, error) - - // Put stores the signature for the provided digest. - Put(dgst digest.Digest, signatures ...[]byte) error -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Length in bytes of content. - Length int64 `json:"length,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -blob -mark :214 -data 3650 -// Package cache provides facilities to speed up access to the storage -// backend. Typically cache implementations deal with internal implementation -// details at the backend level, rather than generalized caches for -// distribution related interfaces. In other words, unless the cache is -// specific to the storage package, it belongs in another package. -package cache - -import ( - "fmt" - - "github.com/docker/distribution/digest" - "golang.org/x/net/context" -) - -// ErrNotFound is returned when a meta item is not found. -var ErrNotFound = fmt.Errorf("not found") - -// LayerMeta describes the backend location and length of layer data. -type LayerMeta struct { - Path string - Length int64 -} - -// LayerInfoCache is a driver-aware cache of layer metadata. Basically, it -// provides a fast cache for checks against repository metadata, avoiding -// round trips to backend storage. Note that this is different from a pure -// layer cache, which would also provide access to backing data, as well. Such -// a cache should be implemented as a middleware, rather than integrated with -// the storage backend. -// -// Note that most implementations rely on the caller to do strict checks on on -// repo and dgst arguments, since these are mostly used behind existing -// implementations. -type LayerInfoCache interface { - // Contains returns true if the repository with name contains the layer. - Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) - - // Add includes the layer in the given repository cache. - Add(ctx context.Context, repo string, dgst digest.Digest) error - - Delete(ctx context.Context, repo string, dgst digest.Digest) error - - // Meta provides the location of the layer on the backend and its size. Membership of a - // repository should be tested before using the result, if required. - Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) - - // SetMeta sets the meta data for the given layer. - SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error -} - -// base implements common checks between cache implementations. Note that -// these are not full checks of input, since that should be done by the -// caller. -type base struct { - LayerInfoCache -} - -func (b *base) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - if repo == "" { - return false, fmt.Errorf("cache: cannot check for empty repository name") - } - - if dgst == "" { - return false, fmt.Errorf("cache: cannot check for empty digests") - } - - return b.LayerInfoCache.Contains(ctx, repo, dgst) -} - -func (b *base) Add(ctx context.Context, repo string, dgst digest.Digest) error { - if repo == "" { - return fmt.Errorf("cache: cannot add empty repository name") - } - - if dgst == "" { - return fmt.Errorf("cache: cannot add empty digest") - } - - return b.LayerInfoCache.Add(ctx, repo, dgst) -} - -func (b *base) Delete(ctx context.Context, repo string, dgst digest.Digest) error { - if repo == "" { - return fmt.Errorf("cache: cannot delete empty repository name") - } - - if dgst == "" { - return fmt.Errorf("cache: cannot delete empty digest") - } - - return b.LayerInfoCache.Delete(ctx, repo, dgst) -} - -func (b *base) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - if dgst == "" { - return LayerMeta{}, fmt.Errorf("cache: cannot get meta for empty digest") - } - - return b.LayerInfoCache.Meta(ctx, dgst) -} - -func (b *base) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - if dgst == "" { - return fmt.Errorf("cache: cannot set meta for empty digest") - } - - if meta.Path == "" { - return fmt.Errorf("cache: cannot set empty path for meta") - } - - return b.LayerInfoCache.SetMeta(ctx, dgst, meta) -} - -blob -mark :215 -data 1994 -package cache - -import ( - "github.com/docker/distribution/digest" - "golang.org/x/net/context" -) - -// inmemoryLayerInfoCache is a map-based implementation of LayerInfoCache. -type inmemoryLayerInfoCache struct { - membership map[string]map[digest.Digest]struct{} - meta map[digest.Digest]LayerMeta -} - -// NewInMemoryLayerInfoCache provides an implementation of LayerInfoCache that -// stores results in memory. -func NewInMemoryLayerInfoCache() LayerInfoCache { - return &base{&inmemoryLayerInfoCache{ - membership: make(map[string]map[digest.Digest]struct{}), - meta: make(map[digest.Digest]LayerMeta), - }} -} - -func (ilic *inmemoryLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - members, ok := ilic.membership[repo] - if !ok { - return false, nil - } - - _, ok = members[dgst] - return ok, nil -} - -// Add adds the layer to the redis repository blob set. -func (ilic *inmemoryLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - members, ok := ilic.membership[repo] - if !ok { - members = make(map[digest.Digest]struct{}) - ilic.membership[repo] = members - } - - members[dgst] = struct{}{} - - return nil -} - -func (ilic *inmemoryLayerInfoCache) Delete(ctx context.Context, repo string, dgst digest.Digest) error { - members, ok := ilic.membership[repo] - if !ok { - return nil - } - delete(members, dgst) - return nil -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (ilic *inmemoryLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - meta, ok := ilic.meta[dgst] - if !ok { - return LayerMeta{}, ErrNotFound - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (ilic *inmemoryLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - ilic.meta[dgst] = meta - return nil -} - -blob -mark :216 -data 3718 -package cache - -import ( - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/garyburd/redigo/redis" - "golang.org/x/net/context" -) - -// redisLayerInfoCache provides an implementation of storage.LayerInfoCache -// based on redis. Layer info is stored in two parts. The first provide fast -// access to repository membership through a redis set for each repo. The -// second is a redis hash keyed by the digest of the layer, providing path and -// length information. Note that there is no implied relationship between -// these two caches. The layer may exist in one, both or none and the code -// must be written this way. -type redisLayerInfoCache struct { - pool *redis.Pool - - // TODO(stevvooe): We use a pool because we don't have great control over - // the cache lifecycle to manage connections. A new connection if fetched - // for each operation. Once we have better lifecycle management of the - // request objects, we can change this to a connection. -} - -// NewRedisLayerInfoCache returns a new redis-based LayerInfoCache using the -// provided redis connection pool. -func NewRedisLayerInfoCache(pool *redis.Pool) LayerInfoCache { - return &base{&redisLayerInfoCache{ - pool: pool, - }} -} - -// Contains does a membership check on the repository blob set in redis. This -// is used as an access check before looking up global path information. If -// false is returned, the caller should still check the backend to if it -// exists elsewhere. -func (rlic *redisLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - conn := rlic.pool.Get() - defer conn.Close() - - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Contains(%q, %q)", repo, dgst) - return redis.Bool(conn.Do("SISMEMBER", rlic.repositoryBlobSetKey(repo), dgst)) -} - -// Add adds the layer to the redis repository blob set. -func (rlic *redisLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - conn := rlic.pool.Get() - defer conn.Close() - - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Add(%q, %q)", repo, dgst) - _, err := conn.Do("SADD", rlic.repositoryBlobSetKey(repo), dgst) - return err -} - -func (rlic *redisLayerInfoCache) Delete(ctx context.Context, repo string, dgst digest.Digest) error { - //TODO - return nil -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (rlic *redisLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - conn := rlic.pool.Get() - defer conn.Close() - - reply, err := redis.Values(conn.Do("HMGET", rlic.blobMetaHashKey(dgst), "path", "length")) - if err != nil { - return LayerMeta{}, err - } - - if len(reply) < 2 || reply[0] == nil || reply[1] == nil { - return LayerMeta{}, ErrNotFound - } - - var meta LayerMeta - if _, err := redis.Scan(reply, &meta.Path, &meta.Length); err != nil { - return LayerMeta{}, err - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (rlic *redisLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - conn := rlic.pool.Get() - defer conn.Close() - - _, err := conn.Do("HMSET", rlic.blobMetaHashKey(dgst), "path", meta.Path, "length", meta.Length) - return err -} - -// repositoryBlobSetKey returns the key for the blob set in the cache. -func (rlic *redisLayerInfoCache) repositoryBlobSetKey(repo string) string { - return "repository::" + repo + "::blobs" -} - -// blobMetaHashKey returns the cache key for immutable blob meta data. -func (rlic *redisLayerInfoCache) blobMetaHashKey(dgst digest.Digest) string { - return "blobs::" + dgst.String() -} - -blob -mark :217 -data 7212 -package storage - -import ( - "expvar" - "sync/atomic" - "time" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// cachedLayerService implements the layer service with path-aware caching, -// using a LayerInfoCache interface. -type cachedLayerService struct { - distribution.LayerService // upstream layer service - repository distribution.Repository - ctx context.Context - driver driver.StorageDriver - *blobStore // global blob store - cache cache.LayerInfoCache -} - -// Exists checks for existence of the digest in the cache, immediately -// returning if it exists for the repository. If not, the upstream is checked. -// When a positive result is found, it is written into the cache. -func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst) - now := time.Now() - defer func() { - // TODO(stevvooe): Replace this with a decent context-based metrics solution - ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)). - Infof("(*cachedLayerService).Exists(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1) - return true, nil - } - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1) - exists, err := lc.LayerService.Exists(dgst) - if err != nil { - return exists, err - } - - if exists { - // we can only cache this if the existence is positive. - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return exists, err -} - -// Fetch checks for the availability of the layer in the repository via the -// cache. If present, the metadata is resolved and the layer is returned. If -// any operation fails, the layer is read directly from the upstream. The -// results are cached, if possible. -func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst) - now := time.Now() - defer func() { - ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)). - Infof("(*layerInfoCache).Fetch(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - // fast path: get the layer info and return - meta, err := lc.cache.Meta(lc.ctx, dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1) - return newLayerReader(lc.driver, dgst, meta.Path, meta.Length) - } - - // NOTE(stevvooe): Unfortunately, the cache here only makes checks for - // existing layers faster. We'd have to provide more careful - // synchronization with the backend to make the missing case as fast. - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1) - layer, err := lc.LayerService.Fetch(dgst) - if err != nil { - return nil, err - } - - // add the layer to the repository - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err) - } - - // lookup layer path and add it to the cache, if it succeds. Note that we - // still return the layer even if we have trouble caching it. - if path, err := lc.resolveLayerPath(layer); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err) - } else { - // add the layer to the cache once we've resolved the path. - if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return layer, err -} - -func (lc *cachedLayerService) Delete(dgst digest.Digest) error { - ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Delete(%q)", dgst) - if err := lc.cache.Delete(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error deleting layer link from cache; repo=%s, layer=%s: %v", lc.repository.Name(), dgst, err) - } - return lc.LayerService.Delete(dgst) -} - -// extractLayerInfo pulls the layerInfo from the layer, attempting to get the -// path information from either the concrete object or by resolving the -// primary blob store path. -func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) { - // try and resolve the type and driver, so we don't have to traverse links - switch v := layer.(type) { - case *layerReader: - // only set path if we have same driver instance. - if v.driver == lc.driver { - return v.path, nil - } - } - - ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest()) - // we have to do an expensive stat to resolve the layer location but no - // need to check the link, since we already have layer instance for this - // repository. - bp, err := lc.blobStore.path(layer.Digest()) - if err != nil { - return "", err - } - - return bp, nil -} - -// layerInfoCacheMetrics keeps track of cache metrics for layer info cache -// requests. Note this is kept globally and made available via expvar. For -// more detailed metrics, its recommend to instrument a particular cache -// implementation. -var layerInfoCacheMetrics struct { - // Exists tracks calls to the Exists caches. - Exists struct { - Requests uint64 - Hits uint64 - Misses uint64 - } - - // Fetch tracks calls to the fetch caches. - Fetch struct { - Requests uint64 - Hits uint64 - Misses uint64 - } -} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return layerInfoCacheMetrics - })) -} - -blob -mark :218 -data 4614 -package storage - -import ( - "strings" - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -type layerStore struct { - repository *repository -} - -func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") - - // Because this implementation just follows blob links, an existence check - // is pretty cheap by starting and closing a fetch. - _, err := ls.Fetch(digest) - - if err != nil { - switch err.(type) { - case distribution.ErrUnknownLayer: - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch") - bp, err := ls.path(dgst) - if err != nil { - return nil, err - } - - fr, err := newFileReader(ls.repository.driver, bp) - if err != nil { - return nil, err - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -func (ls *layerStore) Delete(dgst digest.Digest) error { - lp, err := ls.linkPath(dgst) - if err != nil { - return err - } - - lp = strings.TrimSuffix(lp, "/link") - - return ls.repository.driver.Delete(lp) -} - -// Upload begins a layer upload, returning a handle. If the layer upload -// is already in progress or the layer has already been uploaded, this -// will return an error. -func (ls *layerStore) Upload() (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload") - - // NOTE(stevvooe): Consider the issues with allowing concurrent upload of - // the same two layers. Should it be disallowed? For now, we allow both - // parties to proceed and the the first one uploads the layer. - - uuid := uuid.New() - startedAt := time.Now().UTC() - - path, err := ls.repository.registry.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// Resume continues an in progress layer upload, returning the current -// state of the upload. -func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") - startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return nil, distribution.ErrLayerUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := ls.repository.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// newLayerUpload allocates a new upload controller with the given state. -func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) { - fw, err := newFileWriter(ls.repository.driver, path) - if err != nil { - return nil, err - } - - lw := &layerWriter{ - layerStore: ls, - uuid: uuid, - startedAt: startedAt, - bufferedFileWriter: *fw, - } - - lw.setupResumableDigester() - - return lw, nil -} - -func (ls *layerStore) linkPath(dgst digest.Digest) (string, error) { - return ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) -} - -func (ls *layerStore) path(dgst digest.Digest) (string, error) { - // We must traverse this path through the link to enforce ownership. - layerLinkPath, err := ls.linkPath(dgst) - if err != nil { - return "", err - } - - blobPath, err := ls.repository.blobStore.resolve(layerLinkPath) - - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return "", distribution.ErrUnknownLayer{ - FSLayer: manifest.FSLayer{BlobSum: dgst}, - } - default: - return "", err - } - } - - return blobPath, nil -} - -commit 902855ed3aa5587ec1d2ed737a885b079cd7131d -mark :219 -author Andy Goldstein 1430158986 -0400 -committer Andy Goldstein 1432140076 -0400 -data 101 -UPSTREAM(docker/distribution): add layer unlinking - -Add ability to unlink a layer from a repository. -from :212 -M 100644 :213 Godeps/_workspace/src/github.com/docker/distribution/registry.go -M 100644 :214 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go -M 100644 :215 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go -M 100644 :216 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go -M 100644 :217 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go -M 100644 :218 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go - -blob -mark :220 -data 6813 -package distribution - -import ( - "io" - "net/http" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "golang.org/x/net/context" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name string) (Repository, error) - - Blobs() BlobService -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Name returns the name of the repository. - Name() string - - // Manifests returns a reference to this repository's manifest service. - Manifests() ManifestService - - // Layers returns a reference to this repository's layers service. - Layers() LayerService - - // Signatures returns a reference to this repository's signatures service. - Signatures() SignatureService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. - -// ManifestService provides operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the identified by the digest, if it exists. - Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) - - // Delete removes the manifest, if it exists. - Delete(ctx context.Context, dgst digest.Digest) error - - // Put creates or updates the manifest. - Put(ctx context.Context, manifest *manifest.SignedManifest) error - - // TODO(stevvooe): The methods after this message should be moved to a - // discrete TagService, per active proposals. - - // Tags lists the tags under the named repository. - Tags(ctx context.Context) ([]string, error) - - // ExistsByTag returns true if the manifest exists. - ExistsByTag(ctx context.Context, tag string) (bool, error) - - // GetByTag retrieves the named manifest, if it exists. - GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) - - // TODO(stevvooe): There are several changes that need to be done to this - // interface: - // - // 1. Allow explicit tagging with Tag(digest digest.Digest, tag string) - // 2. Support reading tags with a re-entrant reader to avoid large - // allocations in the registry. - // 3. Long-term: Provide All() method that lets one scroll through all of - // the manifest entries. - // 4. Long-term: break out concept of signing from manifests. This is - // really a part of the distribution sprint. - // 5. Long-term: Manifest should be an interface. This code shouldn't - // really be concerned with the storage format. -} - -// LayerService provides operations on layer files in a backend storage. -type LayerService interface { - // Exists returns true if the layer exists. - Exists(digest digest.Digest) (bool, error) - - // Fetch the layer identifed by TarSum. - Fetch(digest digest.Digest) (Layer, error) - - // Delete unlinks the layer from a Repository. - Delete(dgst digest.Digest) error - - // Upload begins a layer upload to repository identified by name, - // returning a handle. - Upload() (LayerUpload, error) - - // Resume continues an in progress layer upload, returning a handle to the - // upload. The caller should seek to the latest desired upload location - // before proceeding. - Resume(uuid string) (LayerUpload, error) -} - -// Layer provides a readable and seekable layer object. Typically, -// implementations are *not* goroutine safe. -type Layer interface { - // http.ServeContent requires an efficient implementation of - // ReadSeeker.Seek(0, os.SEEK_END). - io.ReadSeeker - io.Closer - - // Digest returns the unique digest of the blob. - Digest() digest.Digest - - // Length returns the length in bytes of the blob. - Length() int64 - - // CreatedAt returns the time this layer was created. - CreatedAt() time.Time - - // Handler returns an HTTP handler which serves the layer content, whether - // by providing a redirect directly to the content, or by serving the - // content itself. - Handler(r *http.Request) (http.Handler, error) -} - -// LayerUpload provides a handle for working with in-progress uploads. -// Instances can be obtained from the LayerService.Upload and -// LayerService.Resume. -type LayerUpload interface { - io.WriteSeeker - io.ReaderFrom - io.Closer - - // UUID returns the identifier for this upload. - UUID() string - - // StartedAt returns the time this layer upload was started. - StartedAt() time.Time - - // Finish marks the upload as completed, returning a valid handle to the - // uploaded layer. The digest is validated against the contents of the - // uploaded layer. - Finish(digest digest.Digest) (Layer, error) - - // Cancel the layer upload process. - Cancel() error -} - -// SignatureService provides operations on signatures. -type SignatureService interface { - // Get retrieves all of the signature blobs for the specified digest. - Get(dgst digest.Digest) ([][]byte, error) - - // Put stores the signature for the provided digest. - Put(dgst digest.Digest, signatures ...[]byte) error -} - -type BlobService interface { - Delete(dgst digest.Digest) error -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Length in bytes of content. - Length int64 `json:"length,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -blob -mark :221 -data 19334 -package handlers - -import ( - "expvar" - "fmt" - "math/rand" - "net" - "net/http" - "os" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/notifications" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - registrymiddleware "github.com/docker/distribution/registry/middleware/registry" - repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/garyburd/redigo/redis" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// App is a global registry application object. Shared resources can be placed -// on this object that will be accessible from all requests. Any writable -// fields should be protected. -type App struct { - context.Context - - Config configuration.Configuration - - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application - - // events contains notification related configuration. - events struct { - sink notifications.Sink - source notifications.SourceRecord - } - - redis *redis.Pool -} - -// NewApp takes a configuration and returns a configured app, ready to serve -// requests. The app only implements ServeHTTP and can be wrapped in other -// handlers accordingly. -func NewApp(ctx context.Context, configuration configuration.Configuration) *App { - app := &App{ - Config: configuration, - Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - } - - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) - - // Register the handler dispatchers. - app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { - return http.HandlerFunc(apiBase) - }) - app.register(v2.RouteNameManifest, imageManifestDispatcher) - app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, layerDispatcher) - app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) - - var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - - if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. - panic(err) - } - - startUploadPurger(app.driver, ctxu.GetLogger(app)) - - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) - if err != nil { - panic(err) - } - - app.configureEvents(&configuration) - app.configureRedis(&configuration) - - // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { - switch cc["layerinfo"] { - case "redis": - if app.redis == nil { - panic("redis configuration required to use for layerinfo cache") - } - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) - ctxu.GetLogger(app).Infof("using redis layerinfo cache") - case "inmemory": - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) - ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") - default: - if cc["layerinfo"] != "" { - ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) - } - } - } - - if app.registry == nil { - // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.driver, nil) - } - - app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) - if err != nil { - panic(err) - } - - authType := configuration.Auth.Type() - - if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) - if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) - } - app.accessController = accessController - } - - return app -} - -func (app *App) Registry() distribution.Namespace { - return app.registry -} - -// register a handler with the application, by route name. The handler will be -// passed through the application filters and context will be constructed at -// request time. -func (app *App) register(routeName string, dispatch dispatchFunc) { - - // TODO(stevvooe): This odd dispatcher/route registration is by-product of - // some limitations in the gorilla/mux router. We are using it to keep - // routing consistent between the client and server, but we may want to - // replace it with manual routing and structure-based dispatch for better - // control over the request execution. - - app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) -} - -// configureEvents prepares the event sink for action. -func (app *App) configureEvents(configuration *configuration.Configuration) { - // Configure all of the endpoint sinks. - var sinks []notifications.Sink - for _, endpoint := range configuration.Notifications.Endpoints { - if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) - continue - } - - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) - endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ - Timeout: endpoint.Timeout, - Threshold: endpoint.Threshold, - Backoff: endpoint.Backoff, - Headers: endpoint.Headers, - }) - - sinks = append(sinks, endpoint) - } - - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as - // replacing broadcaster with a rabbitmq implementation. It's recommended - // that the registry instances also act as the workers to keep deployment - // simple. - app.events.sink = notifications.NewBroadcaster(sinks...) - - // Populate registry event source - hostname, err := os.Hostname() - if err != nil { - hostname = configuration.HTTP.Addr - } else { - // try to pick the port off the config - _, port, err := net.SplitHostPort(configuration.HTTP.Addr) - if err == nil { - hostname = net.JoinHostPort(hostname, port) - } - } - - app.events.source = notifications.SourceRecord{ - Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), - } -} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") - return - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) - - done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, "redis.connect.startedat")) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.DialTimeout("tcp", - configuration.Redis.Addr, - configuration.Redis.DialTimeout, - configuration.Redis.ReadTimeout, - configuration.Redis.WriteTimeout) - if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - if configuration.Redis.Password != "" { - if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - app.redis = pool - - // setup expvar - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { - return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), - } - })) -} - -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() // ensure that request body is always closed. - - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) - defer func() { - ctxu.GetResponseLogger(ctx).Infof("response completed") - }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } - - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - -// dispatchFunc takes a context and request and returns a constructed handler -// for the route. The dispatcher will use this to dynamically create request -// specific handlers for each endpoint without creating a new router for each -// request. -type dispatchFunc func(ctx *Context, r *http.Request) http.Handler - -// TODO(stevvooe): dispatchers should probably have some validation error -// chain with proper error reporting. - -// dispatcher returns a handler that constructs a request specific context and -// handler, using the dispatch factory function. -func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - context := app.context(w, r) - - if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) - return - } - - // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) - - if app.nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) - - if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) - - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - context.Errors.Push(v2.ErrorCodeNameUnknown, err) - case distribution.ErrRepositoryNameInvalid: - context.Errors.Push(v2.ErrorCodeNameInvalid, err) - } - - w.WriteHeader(http.StatusBadRequest) - serveJSON(w, context.Errors) - return - } - - // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - repository, - app.eventBridge(context, r)) - - context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) - if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - serveJSON(w, context.Errors) - return - } - } - - dispatch(context, r).ServeHTTP(w, r) - - // Automated error response handling here. Handlers may return their - // own errors if they need different behavior (such as range errors - // for layer upload). - if context.Errors.Len() > 0 { - if context.Value("http.response.status") == 0 { - // TODO(stevvooe): Getting this value from the context is a - // bit of a hack. We can further address with some of our - // future refactoring. - w.WriteHeader(http.StatusBadRequest) - } - serveJSON(w, context.Errors) - } - }) -} - -// context constructs the context object for the application. This only be -// called once per request. -func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, - "vars.name", - "vars.reference", - "vars.digest", - "vars.uuid")) - - context := &Context{ - App: app, - Context: ctx, - urlBuilder: v2.NewURLBuilderFromRequest(r), - } - - return context -} - -// authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the context may access the requested -// repository. An error will be returned if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { - ctxu.GetLogger(context).Debug("authorizing request") - repo := getName(context) - - if app.accessController == nil { - return nil // access controller is not enabled. - } - - var accessRecords []auth.Access - - if repo != "" { - accessRecords = appendAccessRecords(accessRecords, r.Method, repo) - } else { - // Only allow the name not to be set on the base route. - if app.nameRequired(r) { - // For this to be properly secured, repo must always be set for a - // resource that may make a modification. The only condition under - // which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making - // that mistake elsewhere in the code, allowing any operation to - // proceed. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusForbidden) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized) - serveJSON(w, errs) - return fmt.Errorf("forbidden: no repository name") - } - } - - ctx, err := app.accessController.Authorized(context.Context, accessRecords...) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - w.Header().Set("Content-Type", "application/json; charset=utf-8") - err.ServeHTTP(w, r) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized, accessRecords) - serveJSON(w, errs) - default: - // This condition is a potential security problem either in - // the configuration or whatever is backing the access - // controller. Just return a bad request with no information - // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - - return err - } - - // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context - // should be replaced by another, rather than replacing the context on a - // mutable object. - context.Context = ctx - return nil -} - -// eventBridge returns a bridge for the current request, configured with the -// correct actor and source. -func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - actor := notifications.ActorRecord{ - Name: getUserName(ctx, r), - } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) - - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) -} - -// nameRequired returns true if the route requires a name. -func (app *App) nameRequired(r *http.Request) bool { - route := mux.CurrentRoute(r) - return route == nil || route.GetName() != v2.RouteNameBase -} - -// apiBase implements a simple yes-man for doing overall checks against the -// api. This can support auth roundtrips to support docker login. -func apiBase(w http.ResponseWriter, r *http.Request) { - const emptyJSON = "{}" - // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) - - fmt.Fprint(w, emptyJSON) -} - -// appendAccessRecords checks the method and adds the appropriate Access records to the records list. -func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch method { - case "GET", "HEAD": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - records = append(records, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return records -} - -// applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { - for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) - if err != nil { - return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) - } - registry = rmw - } - return registry, nil - -} - -// applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { - for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, repository) - if err != nil { - return nil, err - } - repository = rmw - } - return repository, nil -} - -// applyStorageMiddleware wraps a storage driver with the configured middlewares -func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { - for _, mw := range middlewares { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) - if err != nil { - return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) - } - driver = smw - } - return driver, nil -} - -// startUploadPurger schedules a goroutine which will periodically -// check upload directories for old files and delete them -func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute - - // Start with reasonable defaults - // TODO:(richardscothern) make configurable - purgeAge := time.Duration(7 * 24 * time.Hour) - timeBetweenPurges := time.Duration(1 * 24 * time.Hour) - - go func() { - log.Infof("Starting upload purge in %s", jitter) - time.Sleep(jitter) - - for { - storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true) - log.Infof("Starting upload purge in %s", timeBetweenPurges) - time.Sleep(timeBetweenPurges) - } - }() - -} - -blob -mark :222 -data 4588 -package storage - -import ( - "fmt" - "strings" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// TODO(stevvooe): Currently, the blobStore implementation used by the -// manifest store. The layer store should be refactored to better leverage the -// blobStore, reducing duplicated code. - -// blobStore implements a generalized blob store over a driver, supporting the -// read side and link management. This object is intentionally a leaky -// abstraction, providing utility methods that support creating and traversing -// backend links. -type blobStore struct { - driver storagedriver.StorageDriver - pm *pathMapper - ctx context.Context -} - -var _ distribution.BlobService = &blobStore{} - -func (bs *blobStore) Delete(dgst digest.Digest) error { - found, err := bs.exists(dgst) - if err != nil { - return err - } - - if !found { - // TODO if the blob doesn't exist, should this be an error? - return nil - } - - path, err := bs.path(dgst) - - if err != nil { - return err - } - - path = strings.TrimSuffix(path, "/data") - - return bs.driver.Delete(path) -} - -// exists reports whether or not the path exists. If the driver returns error -// other than storagedriver.PathNotFound, an error may be returned. -func (bs *blobStore) exists(dgst digest.Digest) (bool, error) { - path, err := bs.path(dgst) - - if err != nil { - return false, err - } - - ok, err := exists(bs.driver, path) - if err != nil { - return false, err - } - - return ok, nil -} - -// get retrieves the blob by digest, returning it a byte slice. This should -// only be used for small objects. -func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) { - bp, err := bs.path(dgst) - if err != nil { - return nil, err - } - - return bs.driver.GetContent(bp) -} - -// link links the path to the provided digest by writing the digest into the -// target file. -func (bs *blobStore) link(path string, dgst digest.Digest) error { - if exists, err := bs.exists(dgst); err != nil { - return err - } else if !exists { - return fmt.Errorf("cannot link non-existent blob") - } - - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(path, []byte(dgst)) -} - -// linked reads the link at path and returns the content. -func (bs *blobStore) linked(path string) ([]byte, error) { - linked, err := bs.readlink(path) - if err != nil { - return nil, err - } - - return bs.get(linked) -} - -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(path) - if err != nil { - return "", err - } - - linked, err := digest.ParseDigest(string(content)) - if err != nil { - return "", err - } - - if exists, err := bs.exists(linked); err != nil { - return "", err - } else if !exists { - return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked) - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store link. -func (bs *blobStore) resolve(path string) (string, error) { - dgst, err := bs.readlink(path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -// put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. -func (bs *blobStore) put(p []byte) (digest.Digest, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - ctxu.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) - return "", err - } - - bp, err := bs.path(dgst) - if err != nil { - return "", err - } - - // If the content already exists, just return the digest. - if exists, err := bs.exists(dgst); err != nil { - return "", err - } else if exists { - return dgst, nil - } - - return dgst, bs.driver.PutContent(bp, p) -} - -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := bs.pm.path(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return "", err - } - - return bp, nil -} - -// exists provides a utility method to test whether or not -func exists(driver storagedriver.StorageDriver, path string) (bool, error) { - if _, err := driver.Stat(path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} - -blob -mark :223 -data 3681 -package storage - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// registry is the top-level implementation of Registry for use in the storage -// package. All instances should descend from this object. -type registry struct { - driver storagedriver.StorageDriver - pm *pathMapper - blobStore *blobStore - layerInfoCache cache.LayerInfoCache -} - -// NewRegistryWithDriver creates a new registry instance from the provided -// driver. The resulting registry may be shared by multiple goroutines but is -// cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { - bs := &blobStore{ - driver: driver, - pm: defaultPathMapper, - } - - return ®istry{ - driver: driver, - blobStore: bs, - - // TODO(sday): This should be configurable. - pm: defaultPathMapper, - layerInfoCache: layerInfoCache, - } -} - -// Scope returns the namespace scope for a registry. The registry -// will only serve repositories contained within this scope. -func (reg *registry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -// Repository returns an instance of the repository tied to the registry. -// Instances should not be shared between goroutines but are cheap to -// allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - if err := v2.ValidateRespositoryName(name); err != nil { - return nil, distribution.ErrRepositoryNameInvalid{ - Name: name, - Reason: err, - } - } - - return &repository{ - ctx: ctx, - registry: reg, - name: name, - }, nil -} - -// repository provides name-scoped access to various services. -type repository struct { - *registry - ctx context.Context - name string -} - -// Name returns the name of the repository. -func (repo *repository) Name() string { - return repo.name -} - -// Manifests returns an instance of ManifestService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Manifests() distribution.ManifestService { - return &manifestStore{ - repository: repo, - revisionStore: &revisionStore{ - repository: repo, - }, - tagStore: &tagStore{ - repository: repo, - }, - } -} - -// Layers returns an instance of the LayerService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Layers() distribution.LayerService { - ls := &layerStore{ - repository: repo, - } - - if repo.registry.layerInfoCache != nil { - // TODO(stevvooe): This is not the best place to setup a cache. We would - // really like to decouple the cache from the backend but also have the - // manifeset service use the layer service cache. For now, we can simply - // integrate the cache directly. The main issue is that we have layer - // access and layer data coupled in a single object. Work is already under - // way to decouple this. - - return &cachedLayerService{ - LayerService: ls, - repository: repo, - ctx: repo.ctx, - driver: repo.driver, - blobStore: repo.blobStore, - cache: repo.registry.layerInfoCache, - } - } - - return ls -} - -func (repo *repository) Signatures() distribution.SignatureService { - return &signatureStore{ - repository: repo, - } -} - -func (reg *registry) Blobs() distribution.BlobService { - return reg.blobStore -} - -commit 902855ed3aa5587ec1d2ed737a885b079cd7131d -mark :224 -author Andy Goldstein 1432138622 -0400 -committer Andy Goldstein 1432140532 -0400 -data 126 -UPSTREAM(docker/distribution): add BlobService - -Add Blobs() to Registry. -Add BlobService with the ability to Delete() a blob. -from :219 -M 100644 :220 Godeps/_workspace/src/github.com/docker/distribution/registry.go -M 100644 :221 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go -M 100644 :222 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go -M 100644 :223 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go - -blob -mark :225 -data 20217 -package handlers - -import ( - "expvar" - "fmt" - "math/rand" - "net" - "net/http" - "os" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/notifications" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - registrymiddleware "github.com/docker/distribution/registry/middleware/registry" - repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/garyburd/redigo/redis" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// App is a global registry application object. Shared resources can be placed -// on this object that will be accessible from all requests. Any writable -// fields should be protected. -type App struct { - context.Context - - Config configuration.Configuration - - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application - - // events contains notification related configuration. - events struct { - sink notifications.Sink - source notifications.SourceRecord - } - - redis *redis.Pool -} - -// NewApp takes a configuration and returns a configured app, ready to serve -// requests. The app only implements ServeHTTP and can be wrapped in other -// handlers accordingly. -func NewApp(ctx context.Context, configuration configuration.Configuration) *App { - app := &App{ - Config: configuration, - Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - } - - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) - - // Register the handler dispatchers. - app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { - return http.HandlerFunc(apiBase) - }) - app.register(v2.RouteNameManifest, imageManifestDispatcher) - app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, layerDispatcher) - app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) - - var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - - if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. - panic(err) - } - - startUploadPurger(app.driver, ctxu.GetLogger(app)) - - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) - if err != nil { - panic(err) - } - - app.configureEvents(&configuration) - app.configureRedis(&configuration) - - // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { - switch cc["layerinfo"] { - case "redis": - if app.redis == nil { - panic("redis configuration required to use for layerinfo cache") - } - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) - ctxu.GetLogger(app).Infof("using redis layerinfo cache") - case "inmemory": - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) - ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") - default: - if cc["layerinfo"] != "" { - ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) - } - } - } - - if app.registry == nil { - // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.driver, nil) - } - - app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) - if err != nil { - panic(err) - } - - authType := configuration.Auth.Type() - - if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) - if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) - } - app.accessController = accessController - } - - return app -} - -func (app *App) Registry() distribution.Namespace { - return app.registry -} - -type customAccessRecordsFunc func(*http.Request) []auth.Access - -func NoCustomAccessRecords(*http.Request) []auth.Access { - return []auth.Access{} -} - -func NameNotRequired(*http.Request) bool { - return false -} - -func NameRequired(*http.Request) bool { - return true -} - -// register a handler with the application, by route name. The handler will be -// passed through the application filters and context will be constructed at -// request time. -func (app *App) register(routeName string, dispatch dispatchFunc) { - app.RegisterRoute(app.router.GetRoute(routeName), dispatch, app.nameRequired, NoCustomAccessRecords) -} - -func (app *App) RegisterRoute(route *mux.Route, dispatch dispatchFunc, nameRequired nameRequiredFunc, accessRecords customAccessRecordsFunc) { - // TODO(stevvooe): This odd dispatcher/route registration is by-product of - // some limitations in the gorilla/mux router. We are using it to keep - // routing consistent between the client and server, but we may want to - // replace it with manual routing and structure-based dispatch for better - // control over the request execution. - route.Handler(app.dispatcher(dispatch, nameRequired, accessRecords)) -} - -func (app *App) NewRoute() *mux.Route { - return app.router.NewRoute() -} - -// configureEvents prepares the event sink for action. -func (app *App) configureEvents(configuration *configuration.Configuration) { - // Configure all of the endpoint sinks. - var sinks []notifications.Sink - for _, endpoint := range configuration.Notifications.Endpoints { - if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) - continue - } - - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) - endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ - Timeout: endpoint.Timeout, - Threshold: endpoint.Threshold, - Backoff: endpoint.Backoff, - Headers: endpoint.Headers, - }) - - sinks = append(sinks, endpoint) - } - - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as - // replacing broadcaster with a rabbitmq implementation. It's recommended - // that the registry instances also act as the workers to keep deployment - // simple. - app.events.sink = notifications.NewBroadcaster(sinks...) - - // Populate registry event source - hostname, err := os.Hostname() - if err != nil { - hostname = configuration.HTTP.Addr - } else { - // try to pick the port off the config - _, port, err := net.SplitHostPort(configuration.HTTP.Addr) - if err == nil { - hostname = net.JoinHostPort(hostname, port) - } - } - - app.events.source = notifications.SourceRecord{ - Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), - } -} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") - return - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) - - done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, "redis.connect.startedat")) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.DialTimeout("tcp", - configuration.Redis.Addr, - configuration.Redis.DialTimeout, - configuration.Redis.ReadTimeout, - configuration.Redis.WriteTimeout) - if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - if configuration.Redis.Password != "" { - if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - app.redis = pool - - // setup expvar - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { - return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), - } - })) -} - -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() // ensure that request body is always closed. - - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) - defer func() { - ctxu.GetResponseLogger(ctx).Infof("response completed") - }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } - - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - -// dispatchFunc takes a context and request and returns a constructed handler -// for the route. The dispatcher will use this to dynamically create request -// specific handlers for each endpoint without creating a new router for each -// request. -type dispatchFunc func(ctx *Context, r *http.Request) http.Handler - -// TODO(stevvooe): dispatchers should probably have some validation error -// chain with proper error reporting. - -// dispatcher returns a handler that constructs a request specific context and -// handler, using the dispatch factory function. -func (app *App) dispatcher(dispatch dispatchFunc, nameRequired nameRequiredFunc, accessRecords customAccessRecordsFunc) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - context := app.context(w, r) - - if err := app.authorized(w, r, context, nameRequired, accessRecords(r)); err != nil { - ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) - return - } - - // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) - - if nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) - - if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) - - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - context.Errors.Push(v2.ErrorCodeNameUnknown, err) - case distribution.ErrRepositoryNameInvalid: - context.Errors.Push(v2.ErrorCodeNameInvalid, err) - } - - w.WriteHeader(http.StatusBadRequest) - serveJSON(w, context.Errors) - return - } - - // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - repository, - app.eventBridge(context, r)) - - context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) - if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - serveJSON(w, context.Errors) - return - } - } - - dispatch(context, r).ServeHTTP(w, r) - - // Automated error response handling here. Handlers may return their - // own errors if they need different behavior (such as range errors - // for layer upload). - if context.Errors.Len() > 0 { - if context.Value("http.response.status") == 0 { - // TODO(stevvooe): Getting this value from the context is a - // bit of a hack. We can further address with some of our - // future refactoring. - w.WriteHeader(http.StatusBadRequest) - } - serveJSON(w, context.Errors) - } - }) -} - -// context constructs the context object for the application. This only be -// called once per request. -func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, - "vars.name", - "vars.reference", - "vars.digest", - "vars.uuid")) - - context := &Context{ - App: app, - Context: ctx, - urlBuilder: v2.NewURLBuilderFromRequest(r), - } - - return context -} - -// authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the context may access the requested -// repository. An error will be returned if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context, nameRequired nameRequiredFunc, customAccessRecords []auth.Access) error { - ctxu.GetLogger(context).Debug("authorizing request") - repo := getName(context) - - if app.accessController == nil { - return nil // access controller is not enabled. - } - - var accessRecords []auth.Access - accessRecords = append(accessRecords, customAccessRecords...) - - if repo != "" { - accessRecords = appendAccessRecords(accessRecords, r.Method, repo) - } - - if len(accessRecords) == 0 { - // Only allow the name not to be set on the base route. - if nameRequired(r) { - // For this to be properly secured, repo must always be set for a - // resource that may make a modification. The only condition under - // which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making - // that mistake elsewhere in the code, allowing any operation to - // proceed. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusForbidden) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized) - serveJSON(w, errs) - return fmt.Errorf("forbidden: no repository name") - } - } - - ctx, err := app.accessController.Authorized(context.Context, accessRecords...) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - w.Header().Set("Content-Type", "application/json; charset=utf-8") - err.ServeHTTP(w, r) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized, accessRecords) - serveJSON(w, errs) - default: - // This condition is a potential security problem either in - // the configuration or whatever is backing the access - // controller. Just return a bad request with no information - // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - - return err - } - - // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context - // should be replaced by another, rather than replacing the context on a - // mutable object. - context.Context = ctx - return nil -} - -// eventBridge returns a bridge for the current request, configured with the -// correct actor and source. -func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - actor := notifications.ActorRecord{ - Name: getUserName(ctx, r), - } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) - - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) -} - -type nameRequiredFunc func(*http.Request) bool - -// nameRequired returns true if the route requires a name. -func (app *App) nameRequired(r *http.Request) bool { - route := mux.CurrentRoute(r) - return route == nil || route.GetName() != v2.RouteNameBase -} - -// apiBase implements a simple yes-man for doing overall checks against the -// api. This can support auth roundtrips to support docker login. -func apiBase(w http.ResponseWriter, r *http.Request) { - const emptyJSON = "{}" - // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) - - fmt.Fprint(w, emptyJSON) -} - -// appendAccessRecords checks the method and adds the appropriate Access records to the records list. -func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch method { - case "GET", "HEAD": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - records = append(records, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return records -} - -// applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { - for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) - if err != nil { - return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) - } - registry = rmw - } - return registry, nil - -} - -// applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { - for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, repository) - if err != nil { - return nil, err - } - repository = rmw - } - return repository, nil -} - -// applyStorageMiddleware wraps a storage driver with the configured middlewares -func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { - for _, mw := range middlewares { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) - if err != nil { - return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) - } - driver = smw - } - return driver, nil -} - -// startUploadPurger schedules a goroutine which will periodically -// check upload directories for old files and delete them -func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute - - // Start with reasonable defaults - // TODO:(richardscothern) make configurable - purgeAge := time.Duration(7 * 24 * time.Hour) - timeBetweenPurges := time.Duration(1 * 24 * time.Hour) - - go func() { - log.Infof("Starting upload purge in %s", jitter) - time.Sleep(jitter) - - for { - storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true) - log.Infof("Starting upload purge in %s", timeBetweenPurges) - time.Sleep(timeBetweenPurges) - } - }() - -} - -commit 902855ed3aa5587ec1d2ed737a885b079cd7131d -mark :226 -author Andy Goldstein 1431103588 -0400 -committer Andy Goldstein 1432140647 -0400 -data 116 -UPSTREAM(docker/distribution): custom routes/auth - -Add support for custom routes and custom auth records per route. -from :224 -M 100644 :225 Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go - -blob -mark :227 -data 3857 -package storage - -import ( - "fmt" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -type manifestStore struct { - repository *repository - - revisionStore *revisionStore - tagStore *tagStore -} - -var _ distribution.ManifestService = &manifestStore{} - -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Exists") - return ms.revisionStore.exists(dgst) -} - -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(dgst) -} - -func (ms *manifestStore) Put(ctx context.Context, manifest *manifest.SignedManifest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Put") - - // TODO(stevvooe): Add check here to see if the revision is already - // present in the repository. If it is, we should merge the signatures, do - // a shallow verify (or a full one, doesn't matter) and return an error - // indicating what happened. - - // Verify the manifest. - if err := ms.verifyManifest(manifest); err != nil { - return err - } - - // Store the revision of the manifest - revision, err := ms.revisionStore.put(manifest) - if err != nil { - return err - } - - // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision) -} - -// Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete") - return ms.revisionStore.delete(dgst) -} - -func (ms *manifestStore) Tags(ctx context.Context) ([]string, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") - return ms.tagStore.tags() -} - -func (ms *manifestStore) ExistsByTag(ctx context.Context, tag string) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).ExistsByTag") - return ms.tagStore.exists(tag) -} - -func (ms *manifestStore) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).GetByTag") - dgst, err := ms.tagStore.resolve(tag) - if err != nil { - return nil, err - } - - return ms.revisionStore.get(dgst) -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { - var errs distribution.ErrManifestVerification - if mnfst.Name != ms.repository.Name() { - // TODO(stevvooe): This needs to be an exported error - errs = append(errs, fmt.Errorf("repository name does not match manifest name")) - } - - if _, err := manifest.Verify(mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - for _, fsLayer := range mnfst.FSLayers { - exists, err := ms.repository.Layers().Exists(fsLayer.BlobSum) - if err != nil { - errs = append(errs, err) - } - - if !exists { - errs = append(errs, distribution.ErrUnknownLayer{FSLayer: fsLayer}) - } - } - - if len(errs) != 0 { - // TODO(stevvooe): These need to be recoverable by a caller. - return errs - } - - return nil -} - -commit 902855ed3aa5587ec1d2ed737a885b079cd7131d -mark :228 -author Andy Goldstein 1432138976 -0400 -committer Andy Goldstein 1432140650 -0400 -data 90 -UPSTREAM(docker/distribution): manifest deletions - -Implement Delete in the manifestStore. -from :226 -M 100644 :227 Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go new file mode 100644 index 000000000000..4e49d1182798 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/checks/checks_test.go @@ -0,0 +1,25 @@ +package checks + +import ( + "testing" +) + +func TestFileChecker(t *testing.T) { + if err := FileChecker("/tmp").Check(); err == nil { + t.Errorf("/tmp was expected as exists") + } + + if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { + t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) + } +} + +func TestHTTPChecker(t *testing.T) { + if err := HTTPChecker("https://www.google.cybertron").Check(); err == nil { + t.Errorf("Google on Cybertron was expected as not exists") + } + + if err := HTTPChecker("https://www.google.pt").Check(); err != nil { + t.Errorf("Google at Portugal was expected as exists, error:%v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go index 512539c1c15b..ba9549192b5f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/health.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health.go @@ -2,9 +2,13 @@ package health import ( "encoding/json" + "fmt" "net/http" "sync" "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" ) var ( @@ -140,7 +144,7 @@ func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int } // CheckStatus returns a map with all the current health check errors -func CheckStatus() map[string]string { +func CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type mutex.RLock() defer mutex.RUnlock() statusKeys := make(map[string]string) @@ -174,13 +178,13 @@ func RegisterFunc(name string, check func() error) { // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker // from an arbitrary func() error -func RegisterPeriodicFunc(name string, check func() error, period time.Duration) { +func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { Register(name, PeriodicChecker(CheckFunc(check), period)) } // RegisterPeriodicThresholdFunc allows the convenience of registering a // PeriodicChecker from an arbitrary func() error -func RegisterPeriodicThresholdFunc(name string, check func() error, period time.Duration, threshold int) { +func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) } @@ -189,20 +193,61 @@ func RegisterPeriodicThresholdFunc(name string, check func() error, period time. // Returns 503 if any Error status exists, 200 otherwise func StatusHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - checksStatus := CheckStatus() + checks := CheckStatus() + status := http.StatusOK + // If there is an error, return 503 - if len(checksStatus) != 0 { - w.WriteHeader(http.StatusServiceUnavailable) + if len(checks) != 0 { + status = http.StatusServiceUnavailable + } + + statusResponse(w, r, status, checks) + } else { + http.NotFound(w, r) + } +} + +// Handler returns a handler that will return 503 response code if the health +// checks have failed. If everything is okay with the health checks, the +// handler will pass through to the provided handler. Use this handler to +// disable a web application when the health checks fail. +func Handler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks := CheckStatus() + if len(checks) != 0 { + errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. + WithDetail("health check failed: please see /debug/health")) + return } - err := json.NewEncoder(w).Encode(checksStatus) - // Parsing of the JSON failed. Returning generic error message + handler.ServeHTTP(w, r) // pass through + }) +} + +// statusResponse completes the request with a response describing the health +// of the service. +func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { + p, err := json.Marshal(checks) + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) + p, err = json.Marshal(struct { + ServerError string `json:"server_error"` + }{ + ServerError: "Could not parse error message", + }) + status = http.StatusInternalServerError + if err != nil { - w.Write([]byte("{server_error: 'Could not parse error message'}")) + context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) + return } - } else { - w.WriteHeader(http.StatusNotFound) + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.WriteHeader(status) + if _, err := w.Write(p); err != nil { + context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go index 7989f0b28b0f..3228cb8015e6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/health/health_test.go @@ -2,6 +2,7 @@ package health import ( "errors" + "fmt" "net/http" "net/http/httptest" "testing" @@ -45,3 +46,62 @@ func TestReturns503IfThereAreErrorChecks(t *testing.T) { t.Errorf("Did not get a 503.") } } + +// TestHealthHandler ensures that our handler implementation correct protects +// the web application when things aren't so healthy. +func TestHealthHandler(t *testing.T) { + // clear out existing checks. + registeredChecks = make(map[string]Checker) + + // protect an http server + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + + // wrap it in our health handler + handler = Handler(handler) + + // use this swap check status + updater := NewStatusUpdater() + Register("test_check", updater) + + // now, create a test server + server := httptest.NewServer(handler) + + checkUp := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting success status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusNoContent) + } + // NOTE(stevvooe): we really don't care about the body -- the format is + // not standardized or supported, yet. + } + + checkDown := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting down status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusServiceUnavailable { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusServiceUnavailable) + } + } + + // server should be up + checkUp(t, "initial health check") + + // now, we fail the health check + updater.Update(fmt.Errorf("the server is now out of commission")) + checkDown(t, "server should be down") // should be down + + // bring server back up + updater.Update(nil) + checkUp(t, "when server is back up") // now we should be back up. +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go index 666b6b36bf44..48467d48e200 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifest.go @@ -8,7 +8,7 @@ import ( ) // TODO(stevvooe): When we rev the manifest format, the contents of this -// package should me moved to manifest/v1. +// package should be moved to manifest/v1. const ( // ManifestMediaType specifies the mediaType for the current version. Note diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go index baa90a5bf978..b97925a51494 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go @@ -4,11 +4,11 @@ import ( "net/http" "time" - "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/uuid" ) type bridge struct { @@ -53,31 +53,31 @@ func NewRequestRecord(id string, r *http.Request) RequestRecord { } } -func (b *bridge) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) ManifestPushed(repo string, sm *manifest.SignedManifest) error { return b.createManifestEventAndWrite(EventActionPush, repo, sm) } -func (b *bridge) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) ManifestPulled(repo string, sm *manifest.SignedManifest) error { return b.createManifestEventAndWrite(EventActionPull, repo, sm) } -func (b *bridge) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) ManifestDeleted(repo string, sm *manifest.SignedManifest) error { return b.createManifestEventAndWrite(EventActionDelete, repo, sm) } -func (b *bridge) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionPush, repo, layer) +func (b *bridge) BlobPushed(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) } -func (b *bridge) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionPull, repo, layer) +func (b *bridge) BlobPulled(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) } -func (b *bridge) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionDelete, repo, layer) +func (b *bridge) BlobDeleted(repo string, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionDelete, repo, desc) } -func (b *bridge) createManifestEventAndWrite(action string, repo distribution.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) createManifestEventAndWrite(action string, repo string, sm *manifest.SignedManifest) error { manifestEvent, err := b.createManifestEvent(action, repo, sm) if err != nil { return err @@ -86,10 +86,10 @@ func (b *bridge) createManifestEventAndWrite(action string, repo distribution.Re return b.sink.Write(*manifestEvent) } -func (b *bridge) createManifestEvent(action string, repo distribution.Repository, sm *manifest.SignedManifest) (*Event, error) { +func (b *bridge) createManifestEvent(action string, repo string, sm *manifest.SignedManifest) (*Event, error) { event := b.createEvent(action) event.Target.MediaType = manifest.ManifestMediaType - event.Target.Repository = repo.Name() + event.Target.Repository = repo p, err := sm.Payload() if err != nil { @@ -97,15 +97,13 @@ func (b *bridge) createManifestEvent(action string, repo distribution.Repository } event.Target.Length = int64(len(p)) - + event.Target.Size = int64(len(p)) event.Target.Digest, err = digest.FromBytes(p) if err != nil { return nil, err } - // TODO(stevvooe): Currently, the is the "tag" url: once the digest url is - // implemented, this should be replaced. - event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, sm.Tag) + event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, event.Target.Digest.String()) if err != nil { return nil, err } @@ -113,8 +111,8 @@ func (b *bridge) createManifestEvent(action string, repo distribution.Repository return event, nil } -func (b *bridge) createLayerEventAndWrite(action string, repo distribution.Repository, layer distribution.Layer) error { - event, err := b.createLayerEvent(action, repo, layer) +func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) if err != nil { return err } @@ -122,18 +120,14 @@ func (b *bridge) createLayerEventAndWrite(action string, repo distribution.Repos return b.sink.Write(*event) } -func (b *bridge) createLayerEvent(action string, repo distribution.Repository, layer distribution.Layer) (*Event, error) { +func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) { event := b.createEvent(action) - event.Target.MediaType = layerMediaType - event.Target.Repository = repo.Name() - - event.Target.Length = layer.Length() - - dgst := layer.Digest() - event.Target.Digest = dgst + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo var err error - event.Target.URL, err = b.ub.BuildBlobURL(repo.Name(), dgst) + event.Target.URL, err = b.ub.BuildBlobURL(repo, desc.Digest) if err != nil { return nil, err } @@ -154,7 +148,7 @@ func (b *bridge) createEvent(action string) *Event { // createEvent returns a new event, timestamped, with the specified action. func createEvent(action string) *Event { return &Event{ - ID: uuid.New(), + ID: uuid.Generate().String(), Timestamp: time.Now(), Action: action, } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go new file mode 100644 index 000000000000..fbf557d8a017 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go @@ -0,0 +1,166 @@ +package notifications + +import ( + "testing" + + "github.com/docker/distribution/digest" + + "github.com/docker/libtrust" + + "github.com/docker/distribution/manifest" + + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/uuid" +) + +var ( + // common environment for expected manifest events. + + repo = "test/repo" + source = SourceRecord{ + Addr: "remote.test", + InstanceID: uuid.Generate().String(), + } + ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/")) + + actor = ActorRecord{ + Name: "test", + } + request = RequestRecord{} + m = manifest.Manifest{ + Name: repo, + Tag: "latest", + } + + sm *manifest.SignedManifest + payload []byte + dgst digest.Digest +) + +func TestEventBridgeManifestPulled(t *testing.T) { + + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + + return nil + })) + + if err := l.ManifestPulled(repo, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushed(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + + return nil + })) + + if err := l.ManifestPushed(repo, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionDelete, events...) + + return nil + })) + + if err := l.ManifestDeleted(repo, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func createTestEnv(t *testing.T, fn testSinkFn) Listener { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating private key: %v", err) + } + + sm, err = manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + payload, err = sm.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + dgst, err = digest.FromBytes(payload) + if err != nil { + t.Fatalf("error digesting manifest payload: %v", err) + } + + return NewBridge(ub, source, actor, request, fn) +} + +func checkCommonManifest(t *testing.T, action string, events ...Event) { + checkCommon(t, events...) + + event := events[0] + if event.Action != action { + t.Fatalf("unexpected event action: %q != %q", event.Action, action) + } + + u, err := ub.BuildManifestURL(repo, dgst.String()) + if err != nil { + t.Fatalf("error building expected url: %v", err) + } + + if event.Target.URL != u { + t.Fatalf("incorrect url passed: %q != %q", event.Target.URL, u) + } +} + +func checkCommon(t *testing.T, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Length != int64(len(payload)) { + t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +type testSinkFn func(events ...Event) error + +func (tsf testSinkFn) Write(events ...Event) error { + return tsf(events...) +} + +func (tsf testSinkFn) Close() error { return nil } + +func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { + if err != nil { + panic(err) + } + + return ub +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go index a898021b9716..97030026490c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go @@ -54,6 +54,10 @@ type Event struct { distribution.Descriptor + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + // Repository identifies the named repository. Repository string `json:"repository,omitempty"` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go index c413ce79bd46..ac4dfd93f0e8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go @@ -22,8 +22,9 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { "action": "push", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, + "size": 1, "digest": "sha256:0123456789abcdef0", + "length": 1, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, @@ -47,8 +48,9 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { "action": "push", "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 2, + "size": 2, "digest": "tarsum.v2+sha256:0123456789abcdef1", + "length": 2, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, @@ -72,8 +74,9 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { "action": "push", "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 3, + "size": 3, "digest": "tarsum.v2+sha256:0123456789abcdef2", + "length": 3, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, @@ -115,7 +118,8 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { manifestPush = prototype manifestPush.ID = "asdf-asdf-asdf-asdf-0" manifestPush.Target.Digest = "sha256:0123456789abcdef0" - manifestPush.Target.Length = int64(1) + manifestPush.Target.Length = 1 + manifestPush.Target.Size = 1 manifestPush.Target.MediaType = manifest.ManifestMediaType manifestPush.Target.Repository = "library/test" manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" @@ -125,6 +129,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { layerPush0.ID = "asdf-asdf-asdf-asdf-1" layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" layerPush0.Target.Length = 2 + layerPush0.Target.Size = 2 layerPush0.Target.MediaType = layerMediaType layerPush0.Target.Repository = "library/test" layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" @@ -134,6 +139,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { layerPush1.ID = "asdf-asdf-asdf-asdf-2" layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" layerPush1.Target.Length = 3 + layerPush1.Target.Size = 3 layerPush1.Target.MediaType = layerMediaType layerPush1.Target.Repository = "library/test" layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go index e585c1d877f5..b86fa8a468a1 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go @@ -1,39 +1,41 @@ package notifications import ( + "net/http" + "github.com/Sirupsen/logrus" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "golang.org/x/net/context" ) // ManifestListener describes a set of methods for listening to events related to manifests. type ManifestListener interface { - ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error - ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error + ManifestPushed(repo string, sm *manifest.SignedManifest) error + ManifestPulled(repo string, sm *manifest.SignedManifest) error // TODO(stevvooe): Please note that delete support is still a little shaky // and we'll need to propagate these in the future. - ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error + ManifestDeleted(repo string, sm *manifest.SignedManifest) error } -// LayerListener describes a listener that can respond to layer related events. -type LayerListener interface { - LayerPushed(repo distribution.Repository, layer distribution.Layer) error - LayerPulled(repo distribution.Repository, layer distribution.Layer) error +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo string, desc distribution.Descriptor) error + BlobPulled(repo string, desc distribution.Descriptor) error // TODO(stevvooe): Please note that delete support is still a little shaky // and we'll need to propagate these in the future. - LayerDeleted(repo distribution.Repository, layer distribution.Layer) error + BlobDeleted(repo string, desc distribution.Descriptor) error } // Listener combines all repository events into a single interface. type Listener interface { ManifestListener - LayerListener + BlobListener } type repositoryListener struct { @@ -49,17 +51,21 @@ func Listen(repo distribution.Repository, listener Listener) distribution.Reposi } } -func (rl *repositoryListener) Manifests() distribution.ManifestService { +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } return &manifestServiceListener{ - ManifestService: rl.Repository.Manifests(), + ManifestService: manifests, parent: rl, - } + }, nil } -func (rl *repositoryListener) Layers() distribution.LayerService { - return &layerServiceListener{ - LayerService: rl.Repository.Layers(), - parent: rl, +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, } } @@ -68,10 +74,10 @@ type manifestServiceListener struct { parent *repositoryListener } -func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.Get(ctx, dgst) +func (msl *manifestServiceListener) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm, err := msl.ManifestService.Get(dgst) if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository, sm); err != nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { logrus.Errorf("error dispatching manifest pull to listener: %v", err) } } @@ -79,11 +85,11 @@ func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest) return sm, err } -func (msl *manifestServiceListener) Put(ctx context.Context, sm *manifest.SignedManifest) error { - err := msl.ManifestService.Put(ctx, sm) +func (msl *manifestServiceListener) Put(sm *manifest.SignedManifest) error { + err := msl.ManifestService.Put(sm) if err == nil { - if err := msl.parent.listener.ManifestPushed(msl.parent.Repository, sm); err != nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { logrus.Errorf("error dispatching manifest push to listener: %v", err) } } @@ -91,10 +97,10 @@ func (msl *manifestServiceListener) Put(ctx context.Context, sm *manifest.Signed return err } -func (msl *manifestServiceListener) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.GetByTag(ctx, tag) +func (msl *manifestServiceListener) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + sm, err := msl.ManifestService.GetByTag(tag, options...) if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository, sm); err != nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { logrus.Errorf("error dispatching manifest pull to listener: %v", err) } } @@ -102,51 +108,98 @@ func (msl *manifestServiceListener) GetByTag(ctx context.Context, tag string) (* return sm, err } -type layerServiceListener struct { - distribution.LayerService +type blobServiceListener struct { + distribution.BlobStore parent *repositoryListener } -func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (distribution.Layer, error) { - layer, err := lsl.LayerService.Fetch(dgst) +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) if err == nil { - if err := lsl.parent.listener.LayerPulled(lsl.parent.Repository, layer); err != nil { - logrus.Errorf("error dispatching layer pull to listener: %v", err) + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } - return layer, err + return desc, err } -func (lsl *layerServiceListener) Upload() (distribution.LayerUpload, error) { - lu, err := lsl.LayerService.Upload() - return lsl.decorateUpload(lu), err +func (bsl *blobServiceListener) Create(ctx context.Context) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx) + return bsl.decorateWriter(wr), err } -func (lsl *layerServiceListener) Resume(uuid string) (distribution.LayerUpload, error) { - lu, err := lsl.LayerService.Resume(uuid) - return lsl.decorateUpload(lu), err +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err } -func (lsl *layerServiceListener) decorateUpload(lu distribution.LayerUpload) distribution.LayerUpload { - return &layerUploadListener{ - LayerUpload: lu, - parent: lsl, +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, } } -type layerUploadListener struct { - distribution.LayerUpload - parent *layerServiceListener +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener } -func (lul *layerUploadListener) Finish(dgst digest.Digest) (distribution.Layer, error) { - layer, err := lul.LayerUpload.Finish(dgst) +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) if err == nil { - if err := lul.parent.parent.listener.LayerPushed(lul.parent.parent.Repository, layer); err != nil { - logrus.Errorf("error dispatching layer push to listener: %v", err) + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) } } - return layer, err + return committed, err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go index db16ad36e1c1..48b2708eed02 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go @@ -6,22 +6,23 @@ import ( "testing" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "golang.org/x/net/context" ) func TestListener(t *testing.T) { - registry := storage.NewRegistryWithDriver(inmemory.New(), cache.NewInMemoryLayerInfoCache()) + ctx := context.Background() + registry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false) tl := &testListener{ ops: make(map[string]int), } - ctx := context.Background() + repository, err := registry.Repository(ctx, "foo/bar") if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -29,7 +30,7 @@ func TestListener(t *testing.T) { repository = Listen(repository, tl) // Now take the registry through a number of operations - checkExerciseRepository(t, ctx, repository) + checkExerciseRepository(t, repository) expectedOps := map[string]int{ "manifest:push": 1, @@ -50,45 +51,45 @@ type testListener struct { ops map[string]int } -func (tl *testListener) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { +func (tl *testListener) ManifestPushed(repo string, sm *manifest.SignedManifest) error { tl.ops["manifest:push"]++ return nil } -func (tl *testListener) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { +func (tl *testListener) ManifestPulled(repo string, sm *manifest.SignedManifest) error { tl.ops["manifest:pull"]++ return nil } -func (tl *testListener) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { +func (tl *testListener) ManifestDeleted(repo string, sm *manifest.SignedManifest) error { tl.ops["manifest:delete"]++ return nil } -func (tl *testListener) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { +func (tl *testListener) BlobPushed(repo string, desc distribution.Descriptor) error { tl.ops["layer:push"]++ return nil } -func (tl *testListener) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { +func (tl *testListener) BlobPulled(repo string, desc distribution.Descriptor) error { tl.ops["layer:pull"]++ return nil } -func (tl *testListener) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { +func (tl *testListener) BlobDeleted(repo string, desc distribution.Descriptor) error { tl.ops["layer:delete"]++ return nil } // checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. -func checkExerciseRepository(t *testing.T, ctx context.Context, repository distribution.Repository) { +func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. - + ctx := context.Background() tag := "thetag" m := manifest.Manifest{ Versioned: manifest.Versioned{ @@ -98,27 +99,28 @@ func checkExerciseRepository(t *testing.T, ctx context.Context, repository distr Tag: tag, } - layers := repository.Layers() + blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) - upload, err := layers.Upload() + + wr, err := blobs.Create(ctx) if err != nil { t.Fatalf("error creating layer upload: %v", err) } // Use the resumes, as well! - upload, err = layers.Resume(upload.UUID()) + wr, err = blobs.Resume(ctx, wr.ID()) if err != nil { t.Fatalf("error resuming layer upload: %v", err) } - io.Copy(upload, rs) + io.Copy(wr, rs) - if _, err := upload.Finish(dgst); err != nil { + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } @@ -126,9 +128,11 @@ func checkExerciseRepository(t *testing.T, ctx context.Context, repository distr BlobSum: dgst, }) - // Then fetch the layers - if _, err := layers.Fetch(dgst); err != nil { + // Then fetch the blobs + if rc, err := blobs.Open(ctx, dgst); err != nil { t.Fatalf("error fetching layer: %v", err) + } else { + defer rc.Close() } } @@ -142,9 +146,12 @@ func checkExerciseRepository(t *testing.T, ctx context.Context, repository distr t.Fatalf("unexpected error signing manifest: %v", err) } - manifests := repository.Manifests() + manifests, err := repository.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } - if err := manifests.Put(ctx, sm); err != nil { + if err = manifests.Put(sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } @@ -158,7 +165,7 @@ func checkExerciseRepository(t *testing.T, ctx context.Context, repository distr t.Fatalf("unexpected error digesting manifest payload: %v", err) } - fetchedByManifest, err := manifests.Get(ctx, dgst) + fetchedByManifest, err := manifests.Get(dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } @@ -167,7 +174,7 @@ func checkExerciseRepository(t *testing.T, ctx context.Context, repository distr t.Fatalf("retrieved unexpected manifest: %v", err) } - fetched, err := manifests.GetByTag(ctx, tag) + fetched, err := manifests.GetByTag(tag) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh old mode 100644 new mode 100755 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit old mode 100644 new mode 100755 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry.go index dcff2a946eca..609ae010ee08 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry.go @@ -1,13 +1,9 @@ package distribution import ( - "io" - "net/http" - "time" - + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "golang.org/x/net/context" ) // Scope defines the set of items that match a namespace. @@ -40,19 +36,34 @@ type Namespace interface { // reference. Repository(ctx context.Context, name string) (Repository, error) + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a reference to registry's blob service. Blobs() BlobService } +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption func(ManifestService) error + // Repository is a named collection of manifests and layers. type Repository interface { // Name returns the name of the repository. Name() string // Manifests returns a reference to this repository's manifest service. - Manifests() ManifestService + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore - // Layers returns a reference to this repository's layers service. - Layers() LayerService + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. // Signatures returns a reference to this repository's signatures service. Signatures() SignatureService @@ -65,28 +76,31 @@ type Repository interface { // ManifestService provides operations on image manifests. type ManifestService interface { // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) + Exists(dgst digest.Digest) (bool, error) // Get retrieves the identified by the digest, if it exists. - Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) + Get(dgst digest.Digest) (*manifest.SignedManifest, error) + + // Enumerate returns an array of manifest revisions in repository. + Enumerate() ([]digest.Digest, error) // Delete removes the manifest, if it exists. - Delete(ctx context.Context, dgst digest.Digest) error + Delete(dgst digest.Digest) error // Put creates or updates the manifest. - Put(ctx context.Context, manifest *manifest.SignedManifest) error + Put(manifest *manifest.SignedManifest) error // TODO(stevvooe): The methods after this message should be moved to a // discrete TagService, per active proposals. // Tags lists the tags under the named repository. - Tags(ctx context.Context) ([]string, error) + Tags() ([]string, error) // ExistsByTag returns true if the manifest exists. - ExistsByTag(ctx context.Context, tag string) (bool, error) + ExistsByTag(tag string) (bool, error) // GetByTag retrieves the named manifest, if it exists. - GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) + GetByTag(tag string, options ...ManifestServiceOption) (*manifest.SignedManifest, error) // TODO(stevvooe): There are several changes that need to be done to this // interface: @@ -102,103 +116,19 @@ type ManifestService interface { // really be concerned with the storage format. } -// LayerService provides operations on layer files in a backend storage. -type LayerService interface { - // Exists returns true if the layer exists. - Exists(digest digest.Digest) (bool, error) - - // Fetch the layer identifed by TarSum. - Fetch(digest digest.Digest) (Layer, error) - - // Delete unlinks the layer from a Repository. - Delete(dgst digest.Digest) error - - // Upload begins a layer upload to repository identified by name, - // returning a handle. - Upload() (LayerUpload, error) - - // Resume continues an in progress layer upload, returning a handle to the - // upload. The caller should seek to the latest desired upload location - // before proceeding. - Resume(uuid string) (LayerUpload, error) -} - -// Layer provides a readable and seekable layer object. Typically, -// implementations are *not* goroutine safe. -type Layer interface { - // http.ServeContent requires an efficient implementation of - // ReadSeeker.Seek(0, os.SEEK_END). - io.ReadSeeker - io.Closer - - // Digest returns the unique digest of the blob. - Digest() digest.Digest - - // Length returns the length in bytes of the blob. - Length() int64 - - // CreatedAt returns the time this layer was created. - CreatedAt() time.Time - - // Handler returns an HTTP handler which serves the layer content, whether - // by providing a redirect directly to the content, or by serving the - // content itself. - Handler(r *http.Request) (http.Handler, error) -} - -// LayerUpload provides a handle for working with in-progress uploads. -// Instances can be obtained from the LayerService.Upload and -// LayerService.Resume. -type LayerUpload interface { - io.WriteSeeker - io.ReaderFrom - io.Closer - - // UUID returns the identifier for this upload. - UUID() string - - // StartedAt returns the time this layer upload was started. - StartedAt() time.Time - - // Finish marks the upload as completed, returning a valid handle to the - // uploaded layer. The digest is validated against the contents of the - // uploaded layer. - Finish(digest digest.Digest) (Layer, error) - - // Cancel the layer upload process. - Cancel() error -} - // SignatureService provides operations on signatures. type SignatureService interface { // Get retrieves all of the signature blobs for the specified digest. Get(dgst digest.Digest) ([][]byte, error) + // Enumerate retrieves all signature digests for given manifest revision, + // if it exists. + Enumerate(dgst digest.Digest) ([]digest.Digest, error) + // Put stores the signature for the provided digest. Put(dgst digest.Digest, signatures ...[]byte) error -} - -type BlobService interface { - Delete(dgst digest.Digest) error -} -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Length in bytes of content. - Length int64 `json:"length,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. + // Delete removes a signature link from particular manifest revision, if + // it exists. + Delete(revision, dgst digest.Digest) error } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 000000000000..fdaddbcf8e66 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,259 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + return ec.Descriptor().Value +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", + strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), + e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go new file mode 100644 index 000000000000..27fb1cec7aab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go @@ -0,0 +1,179 @@ +package errcode + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" +) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + if len(errorCodeToDescriptors) == 0 { + t.Fatal("errors aren't loaded!") + } + + for ec, desc := range errorCodeToDescriptors { + if ec != desc.Code { + t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) + } + + if idToDescriptors[desc.Value].Code != ec { + t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) + } + + if ec.Message() != desc.Message { + t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) + } + + // Test (de)serializing the ErrorCode + p, err := json.Marshal(ec) + if err != nil { + t.Fatalf("couldn't marshal ec %v: %v", ec, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", ec) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) + } + } + +} + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs = append(errs, ErrorCodeTest1) + errs = append(errs, ErrorCodeTest2.WithDetail( + map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := `{"errors":[` + + `{"code":"TEST1","message":"test error 1"},` + + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + + `]}` + + if string(p) != expectedJSON { + t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) + } + + // Now test the reverse + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Test the arg substitution stuff + e1 := unmarshaled[3].(Error) + exp1 := `Sorry "BOOGIE" isn't valid` + if e1.Message != exp1 { + t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) + } + + exp1 = "test3: " + exp1 + if e1.Error() != exp1 { + t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) + } + + // Test again with a single value this time + errs = Errors{ErrorCodeUnknown} + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + // Now test the reverse + unmarshaled = nil + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Verify that calling WithArgs() more than once does the right thing. + // Meaning creates a new Error and uses the ErrorCode Message + e1 = ErrorCodeTest3.WithArgs("test1") + e2 := e1.WithArgs("test2") + if &e1 == &e2 { + t.Fatalf("args: e2 and e1 should not be the same, but they are") + } + if e2.Message != `Sorry "test2" isn't valid` { + t.Fatalf("e2 had wrong message: %q", e2.Message) + } + + // Verify that calling WithDetail() more than once does the right thing. + // Meaning creates a new Error and overwrites the old detail field + e1 = ErrorCodeTest3.WithDetail("stuff1") + e2 = e1.WithDetail("stuff2") + if &e1 == &e2 { + t.Fatalf("detail: e2 and e1 should not be the same, but they are") + } + if e2.Detail != `stuff2` { + t.Fatalf("e2 had wrong detail: %q", e2.Detail) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 000000000000..49a64a86eb50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 000000000000..f3062ffaffcd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,116 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeUnavailable provides a common error to report unavialability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go index d7c4a880cb8f..c5630fed2c2c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -5,6 +5,7 @@ import ( "regexp" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" ) var ( @@ -16,12 +17,12 @@ var ( Description: `Name of the target repository.`, } - tagParameterDescriptor = ParameterDescriptor{ - Name: "tag", + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", Type: "string", Format: TagNameRegexp.String(), Required: true, - Description: `Tag of the target manifiest.`, + Description: `Tag or digest of the target manifest.`, } uuidParameterDescriptor = ParameterDescriptor{ @@ -86,6 +87,30 @@ var ( Format: "", } + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + unauthorizedResponse = ResponseDescriptor{ Description: "The client does not have access to the repository.", StatusCode: http.StatusUnauthorized, @@ -98,8 +123,8 @@ var ( Format: "", }, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -119,8 +144,8 @@ var ( Format: "", }, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -171,13 +196,8 @@ const ( var APIDescriptor = struct { // RouteDescriptors provides a list of the routes available in the API. RouteDescriptors []RouteDescriptor - - // ErrorDescriptors provides a list of the error codes and their - // associated documentation and metadata. - ErrorDescriptors []ErrorDescriptor }{ RouteDescriptors: routeDescriptors, - ErrorDescriptors: errorDescriptors, } // RouteDescriptor describes a route specified by name. @@ -273,9 +293,12 @@ type ResponseDescriptor struct { // Headers covers any headers that may be returned from the response. Headers []ParameterDescriptor + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + // ErrorCodes enumerates the error codes that may be returned along with // the response. - ErrorCodes []ErrorCode + ErrorCodes []errcode.ErrorCode // Body describes the body of the response, if any. Body BodyDescriptor @@ -317,30 +340,6 @@ type ParameterDescriptor struct { Examples []string } -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCodes provides a list of status under which this error - // condition may arise. If it is empty, the error condition may be seen - // for any status code. - HTTPStatusCodes []int -} - var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBase, @@ -374,8 +373,8 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, }, { @@ -399,6 +398,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "Fetch the tags under the repository identified by `name`.", Requests: []RequestDescriptor{ { + Name: "Tags", + Description: "Return all tags for the repository", Headers: []ParameterDescriptor{ hostHeader, authHeader, @@ -438,7 +439,62 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + }, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, }, }, @@ -449,8 +505,8 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, }, }, @@ -463,7 +519,7 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameManifest, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", - Description: "Create, update and retrieve manifests.", + Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ { Method: "GET", @@ -476,11 +532,11 @@ var routeDescriptors = []RouteDescriptor{ }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, - tagParameterDescriptor, + referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { - Description: "The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ digestHeader, @@ -495,7 +551,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, @@ -511,14 +567,14 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, }, { Description: "The named manifest is not known to the registry.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, @@ -542,7 +598,7 @@ var routeDescriptors = []RouteDescriptor{ }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, - tagParameterDescriptor, + referenceParameterDescriptor, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -551,7 +607,7 @@ var routeDescriptors = []RouteDescriptor{ Successes: []ResponseDescriptor{ { Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusAccepted, + StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", @@ -573,7 +629,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, ErrorCodeManifestInvalid, @@ -588,15 +644,15 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, }, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ @@ -625,21 +681,29 @@ var routeDescriptors = []RouteDescriptor{ Format: "", }, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, }, }, { Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -648,7 +712,7 @@ var routeDescriptors = []RouteDescriptor{ }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, - tagParameterDescriptor, + referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { @@ -657,10 +721,10 @@ var routeDescriptors = []RouteDescriptor{ }, Failures: []ResponseDescriptor{ { - Name: "Invalid Name or Tag", - Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.", + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, @@ -680,8 +744,8 @@ var routeDescriptors = []RouteDescriptor{ Format: "", }, }, - ErrorCodes: []ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -690,9 +754,9 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: "Unknown Manifest", - Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, @@ -701,6 +765,14 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, }, @@ -712,9 +784,8 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameBlob, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", - Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest.", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ - { Method: "GET", Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", @@ -765,7 +836,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, @@ -782,7 +853,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, @@ -834,7 +905,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, @@ -846,7 +917,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponse, { StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, @@ -863,6 +934,70 @@ var routeDescriptors = []RouteDescriptor{ }, }, }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + // TODO(stevvooe): We may want to add a PUT request here to // kickoff an upload of a blob, integrated with the blob upload // API. @@ -872,7 +1007,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUpload, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", - Entity: "Intiate Blob Upload", + Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ { @@ -926,12 +1061,20 @@ var routeDescriptors = []RouteDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, unauthorizedResponsePush, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, { @@ -970,7 +1113,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, @@ -1024,7 +1167,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1038,7 +1181,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1096,7 +1239,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1110,7 +1253,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1175,7 +1318,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1189,7 +1332,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1266,10 +1409,11 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -1280,7 +1424,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1321,7 +1465,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "An error was encountered processing the delete. The client may ignore this error.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, @@ -1334,7 +1478,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1348,145 +1492,83 @@ var routeDescriptors = []RouteDescriptor{ }, }, }, -} - -// ErrorDescriptors provides a list of HTTP API Error codes that may be -// encountered when interacting with the registry API. -var errorDescriptors = []ErrorDescriptor{ - { - Code: ErrorCodeUnknown, - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - }, - { - Code: ErrorCodeUnsupported, - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - }, - { - Code: ErrorCodeUnauthorized, - Value: "UNAUTHORIZED", - Message: "access to the requested resource is not authorized", - Description: `The access controller denied access for the operation on - a resource. Often this will be accompanied by a 401 Unauthorized - response status.`, - }, - { - Code: ErrorCodeDigestInvalid, - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeSizeInvalid, - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeNameInvalid, - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeTagInvalid, - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeNameUnknown, - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestUnknown, - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestInvalid, - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeManifestUnverified, - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeBlobUnknown, - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - - { - Code: ErrorCodeBlobUploadUnknown, - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, { - Code: ErrorCodeBlobUploadInvalid, - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCodes: []int{http.StatusNotFound}, + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch Complete", + Description: "Request an unabridged list of repositories available.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, }, } -var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor -var idToDescriptors map[string]ErrorDescriptor var routeDescriptorsMap map[string]RouteDescriptor func init() { - errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors)) - idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors)) routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - for _, descriptor := range errorDescriptors { - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - } for _, descriptor := range routeDescriptors { routeDescriptorsMap[descriptor.Name] = descriptor } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go index cbae020efb1a..ece52a2cd090 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go @@ -1,194 +1,136 @@ package v2 import ( - "fmt" - "strings" -) - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -const ( - // ErrorCodeUnknown is a catch-all for errors not defined below. - ErrorCodeUnknown ErrorCode = iota + "net/http" - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported + "github.com/docker/distribution/registry/api/errcode" +) - // ErrorCodeUnauthorized is returned if a request is not authorized. - ErrorCodeUnauthorized +const errGroup = "registry.api.v2" +var ( // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. - ErrorCodeDigestInvalid + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - // size does not match the content length. - ErrorCodeSizeInvalid + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) // ErrorCodeNameInvalid is returned when the name in the manifest does not // match the provided name. - ErrorCodeNameInvalid + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) // ErrorCodeTagInvalid is returned when the tag in the manifest does not // match the provided tag. - ErrorCodeTagInvalid + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) // ErrorCodeManifestInvalid returned when an image manifest is invalid, // typically during a PUT operation. This error encompasses all errors // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) // ErrorCodeManifestUnverified is returned when the manifest fails // signature verfication. - ErrorCodeManifestUnverified + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid -) - -// ParseErrorCode attempts to parse the error code string, returning -// ErrorCodeUnknown if the error is not known. -func ParseErrorCode(s string) ErrorCode { - desc, ok := idToDescriptors[s] - - if !ok { - return ErrorCodeUnknown - } - - return desc.Code -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors struct { - Errors []Error `json:"errors,omitempty"` -} - -// Push pushes an error on to the error stack, with the optional detail -// argument. It is a programming error (ie panic) to push more than one -// detail at a time. -func (errs *Errors) Push(code ErrorCode, details ...interface{}) { - if len(details) > 1 { - panic("please specify zero or one detail items for this error") - } - - var detail interface{} - if len(details) > 0 { - detail = details[0] - } - - if err, ok := detail.(error); ok { - detail = err.Error() - } - - errs.PushErr(Error{ - Code: code, - Message: code.Message(), - Detail: detail, + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, }) -} - -// PushErr pushes an error interface onto the error stack. -func (errs *Errors) PushErr(err error) { - switch err.(type) { - case Error: - errs.Errors = append(errs.Errors, err.(Error)) - default: - errs.Errors = append(errs.Errors, Error{Message: err.Error()}) - } -} - -func (errs *Errors) Error() string { - switch errs.Len() { - case 0: - return "" - case 1: - return errs.Errors[0].Error() - default: - msg := "errors:\n" - for _, err := range errs.Errors { - msg += err.Error() + "\n" - } - return msg - } -} - -// Clear clears the errors. -func (errs *Errors) Clear() { - errs.Errors = errs.Errors[:0] -} - -// Len returns the current number of errors. -func (errs *Errors) Len() int { - return len(errs.Errors) -} +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors_test.go deleted file mode 100644 index 9cc831c44018..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package v2 - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/docker/distribution/digest" -) - -// TestErrorCodes ensures that error code format, mappings and -// marshaling/unmarshaling. round trips are stable. -func TestErrorCodes(t *testing.T) { - for _, desc := range errorDescriptors { - if desc.Code.String() != desc.Value { - t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) - } - - if desc.Code.Message() != desc.Message { - t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message) - } - - // Serialize the error code using the json library to ensure that we - // get a string and it works round trip. - p, err := json.Marshal(desc.Code) - - if err != nil { - t.Fatalf("error marshaling error code %v: %v", desc.Code, err) - } - - if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v", desc.Code) - } - - // First, unmarshal to interface and ensure we have a string. - var ecUnspecified interface{} - if err := json.Unmarshal(p, &ecUnspecified); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) - } - - if _, ok := ecUnspecified.(string); !ok { - t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified) - } - - // Now, unmarshal with the error code type and ensure they are equal - var ecUnmarshaled ErrorCode - if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) - } - - if ecUnmarshaled != desc.Code { - t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code) - } - } -} - -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -func TestErrorsManagement(t *testing.T) { - var errs Errors - - errs.Push(ErrorCodeDigestInvalid) - errs.Push(ErrorCodeBlobUnknown, - map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"}) - - p, err := json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } - - errs.Clear() - errs.Push(ErrorCodeUnknown) - expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" - p, err = json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } -} - -// TestMarshalUnmarshal ensures that api errors can round trip through json -// without losing information. -func TestMarshalUnmarshal(t *testing.T) { - - var errors Errors - - for _, testcase := range []struct { - description string - err Error - }{ - { - description: "unknown error", - err: Error{ - - Code: ErrorCodeUnknown, - Message: ErrorCodeUnknown.Descriptor().Message, - }, - }, - { - description: "unknown manifest", - err: Error{ - Code: ErrorCodeManifestUnknown, - Message: ErrorCodeManifestUnknown.Descriptor().Message, - }, - }, - { - description: "unknown manifest", - err: Error{ - Code: ErrorCodeBlobUnknown, - Message: ErrorCodeBlobUnknown.Descriptor().Message, - Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"}, - }, - }, - } { - fatalf := func(format string, args ...interface{}) { - t.Fatalf(testcase.description+": "+format, args...) - } - - unexpectedErr := func(err error) { - fatalf("unexpected error: %v", err) - } - - p, err := json.Marshal(testcase.err) - if err != nil { - unexpectedErr(err) - } - - var unmarshaled Error - if err := json.Unmarshal(p, &unmarshaled); err != nil { - unexpectedErr(err) - } - - if !reflect.DeepEqual(unmarshaled, testcase.err) { - fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err) - } - - // Roll everything up into an error response envelope. - errors.PushErr(testcase.err) - } - - p, err := json.Marshal(errors) - if err != nil { - t.Fatalf("unexpected error marshaling error envelope: %v", err) - } - - var unmarshaled Errors - if err := json.Unmarshal(p, &unmarshaled); err != nil { - t.Fatalf("unexpected error unmarshaling error envelope: %v", err) - } - - if !reflect.DeepEqual(unmarshaled, errors) { - t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go index e4a98861cbad..14b7ea60a6ef 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names.go @@ -6,19 +6,10 @@ import ( "strings" ) -// TODO(stevvooe): Move these definitions back to an exported package. While -// they are used with v2 definitions, their relevance expands beyond. -// "distribution/names" is a candidate package. +// TODO(stevvooe): Move these definitions to the future "reference" package. +// While they are used with v2 definitions, their relevance expands beyond. const ( - // RepositoryNameComponentMinLength is the minimum number of characters in a - // single repository name slash-delimited component - RepositoryNameComponentMinLength = 2 - - // RepositoryNameMinComponents is the minimum number of slash-delimited - // components that a repository name must have - RepositoryNameMinComponents = 1 - // RepositoryNameTotalLengthMax is the maximum total number of characters in // a repository name RepositoryNameTotalLengthMax = 255 @@ -40,17 +31,13 @@ var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentReg // TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) -// TODO(stevvooe): Contribute these exports back to core, so they are shared. +// TagNameAnchoredRegexp matches valid tag names, anchored at the start and +// end of the matched string. +var TagNameAnchoredRegexp = regexp.MustCompile("^" + TagNameRegexp.String() + "$") var ( - // ErrRepositoryNameComponentShort is returned when a repository name - // contains a component which is shorter than - // RepositoryNameComponentMinLength - ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) - - // ErrRepositoryNameMissingComponents is returned when a repository name - // contains fewer than RepositoryNameMinComponents components - ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) + // ErrRepositoryNameEmpty is returned for empty, invalid repository names. + ErrRepositoryNameEmpty = fmt.Errorf("repository name must have at least one component") // ErrRepositoryNameLong is returned when a repository name is longer than // RepositoryNameTotalLengthMax @@ -61,7 +48,7 @@ var ( ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) ) -// ValidateRespositoryName ensures the repository name is valid for use in the +// ValidateRepositoryName ensures the repository name is valid for use in the // registry. This function accepts a superset of what might be accepted by // docker core or docker hub. If the name does not pass validation, an error, // describing the conditions, is returned. @@ -75,22 +62,18 @@ var ( // // The result of the production, known as the "namespace", should be limited // to 255 characters. -func ValidateRespositoryName(name string) error { +func ValidateRepositoryName(name string) error { + if name == "" { + return ErrRepositoryNameEmpty + } + if len(name) > RepositoryNameTotalLengthMax { return ErrRepositoryNameLong } components := strings.Split(name, "/") - if len(components) < RepositoryNameMinComponents { - return ErrRepositoryNameMissingComponents - } - for _, component := range components { - if len(component) < RepositoryNameComponentMinLength { - return ErrRepositoryNameComponentShort - } - if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { return ErrRepositoryNameComponentInvalid } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go index de6a168f0f65..656ae8466fdf 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/names_test.go @@ -1,15 +1,28 @@ package v2 import ( + "strconv" "strings" "testing" ) -func TestRepositoryNameRegexp(t *testing.T) { - for _, testcase := range []struct { +var ( + // regexpTestcases is a unified set of testcases for + // TestValidateRepositoryName and TestRepositoryNameRegexp. + // Some of them are valid inputs for one and not the other. + regexpTestcases = []struct { + // input is the repository name or name component testcase input string - err error + // err is the error expected from ValidateRepositoryName, or nil + err error + // invalid should be true if the testcase is *not* expected to + // match RepositoryNameRegexp + invalid bool }{ + { + input: "", + err: ErrRepositoryNameEmpty, + }, { input: "short", }, @@ -30,15 +43,50 @@ func TestRepositoryNameRegexp(t *testing.T) { }, { input: "a/a/a/b/b", - err: ErrRepositoryNameComponentShort, }, { - input: "a/a/a/a/", - err: ErrRepositoryNameComponentShort, + input: "a/a/a/a/", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "a//a/a", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "a", + }, + { + input: "a/aa", + }, + { + input: "aa/a", + }, + { + input: "a/aa/a", + }, + { + input: "foo.com/", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + // TODO: this testcase should be valid once we switch to + // the reference package. + input: "foo.com:8080/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo.com/bar", }, { input: "foo.com/bar/baz", }, + { + input: "foo.com/bar/baz/quux", + }, { input: "blog.foo.com/bar/baz", }, @@ -46,8 +94,9 @@ func TestRepositoryNameRegexp(t *testing.T) { input: "asdf", }, { - input: "asdf$$^/aa", - err: ErrRepositoryNameComponentInvalid, + input: "asdf$$^/aa", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { input: "aa-a/aa", @@ -59,12 +108,9 @@ func TestRepositoryNameRegexp(t *testing.T) { input: "a-a/a-a", }, { - input: "a", - err: ErrRepositoryNameComponentShort, - }, - { - input: "a-/a/a/a", - err: ErrRepositoryNameComponentInvalid, + input: "a-/a/a/a", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { input: strings.Repeat("a", 255), @@ -73,14 +119,81 @@ func TestRepositoryNameRegexp(t *testing.T) { input: strings.Repeat("a", 256), err: ErrRepositoryNameLong, }, - } { + { + input: "-foo/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo/bar-", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo-/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo/-bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "_foo/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo/bar_", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "____/____", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "_docker/_docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "docker_/docker_", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "b.gcr.io/test.example.com/my-app", // embedded domain component + }, + // TODO(stevvooe): The following is a punycode domain name that we may + // want to allow in the future. Currently, this is not allowed but we + // may want to change this in the future. Adding this here as invalid + // for the time being. + { + input: "xn--n3h.com/myimage", // http://☃.com in punycode + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "xn--7o8h.com/myimage", // http://🐳.com in punycode + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + } +) +// TestValidateRepositoryName tests the ValidateRepositoryName function, +// which uses RepositoryNameComponentAnchoredRegexp for validation +func TestValidateRepositoryName(t *testing.T) { + for _, testcase := range regexpTestcases { failf := func(format string, v ...interface{}) { - t.Logf(testcase.input+": "+format, v...) + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } - if err := ValidateRespositoryName(testcase.input); err != testcase.err { + if err := ValidateRepositoryName(testcase.input); err != testcase.err { if testcase.err != nil { if err != nil { failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) @@ -98,3 +211,21 @@ func TestRepositoryNameRegexp(t *testing.T) { } } } + +func TestRepositoryNameRegexp(t *testing.T) { + for _, testcase := range regexpTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + matches := RepositoryNameRegexp.FindString(testcase.input) == testcase.input + if matches == testcase.invalid { + if testcase.invalid { + failf("expected invalid repository name %s", testcase.input) + } else { + failf("expected valid repository name %s", testcase.input) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go index 69f9d9012a43..5b80d5be76a5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes.go @@ -11,10 +11,12 @@ const ( RouteNameBlob = "blob" RouteNameBlobUpload = "blob-upload" RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" ) var allEndpoints = []string{ RouteNameManifest, + RouteNameCatalog, RouteNameTags, RouteNameBlob, RouteNameBlobUpload, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go index fb268336f96f..b8d724dfe1b9 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -66,6 +66,27 @@ func TestRouter(t *testing.T) { "name": "foo/bar", }, }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar/baz", + }, + }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", @@ -263,6 +284,7 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee } if testcase.StatusCode != http.StatusOK { + resp.Body.Close() // We don't care about json response. continue } @@ -291,6 +313,8 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } + + resp.Body.Close() } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go index 60aad5659d06..4297439400b4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go @@ -100,6 +100,18 @@ func (ub *URLBuilder) BuildBaseURL() (string, error) { return baseURL.String(), nil } +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + // BuildTagsURL constructs a url to list the tags in the named repository. func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { route := ub.cloneRoute(RouteNameTags) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go index ec82b46977df..862c8d28cd4f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go @@ -34,7 +34,7 @@ import ( "fmt" "net/http" - "golang.org/x/net/context" + "github.com/docker/distribution/context" ) // UserInfo carries information about @@ -61,12 +61,12 @@ type Access struct { // header values based on the error. type Challenge interface { error - // ServeHTTP prepares the request to conduct the appropriate challenge - // response. For most implementations, simply calling ServeHTTP should be - // sufficient. Because no body is written, users may write a custom body after - // calling ServeHTTP, but any headers must be written before the call and may - // be overwritten. - ServeHTTP(w http.ResponseWriter, r *http.Request) + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) } // AccessController controls access to registry resources based on a request diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go new file mode 100644 index 000000000000..5ac3d84a7afb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -0,0 +1,102 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "errors" + "fmt" + "net/http" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + ErrAuthenticationFailure = errors.New("authentication failured") +) + +type accessController struct { + realm string + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + f, err := os.Open(path.(string)) + if err != nil { + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + return nil, err + } + + return &accessController{realm: realm.(string), htpasswd: h}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: ErrInvalidCredential, + } + } + + if err := ac.htpasswd.authenticateUser(username, password); err != nil { + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge: %#v", ch) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go new file mode 100644 index 000000000000..db0405475819 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go @@ -0,0 +1,122 @@ +package htpasswd + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestBasicAccessController(t *testing.T) { + testRealm := "The-Shire" + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + ctx := context.Background() + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + var userNumber = 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + nonbcrypt := map[string]struct{}{ + "bilbo": {}, + "DeokMan": {}, + } + + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) + + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 000000000000..494ad0a76880 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,80 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if an syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go new file mode 100644 index 000000000000..309c359ad9f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go @@ -0,0 +1,85 @@ +package htpasswd + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go index 39318d1a3978..2b801d946e18 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go @@ -12,9 +12,8 @@ import ( "net/http" "strings" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" ) // accessController provides a simple implementation of auth.AccessController @@ -44,7 +43,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, // Authorized simply checks for the existence of the authorization header, // responding with a bearer challenge if it doesn't exist. func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := ctxu.GetRequest(ctx) + req, err := context.GetRequest(ctx) if err != nil { return nil, err } @@ -75,7 +74,10 @@ type challenge struct { scope string } -func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) if ch.scope != "" { @@ -83,10 +85,9 @@ func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { } w.Header().Set("WWW-Authenticate", header) - w.WriteHeader(http.StatusUnauthorized) } -func (ch *challenge) Error() string { +func (ch challenge) Error() string { return fmt.Sprintf("silly authentication challenge: %#v", ch) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go index d579e8780338..8b5ecb80170e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -21,7 +21,8 @@ func TestSillyAccessController(t *testing.T) { if err != nil { switch err := err.(type) { case auth.Challenge: - err.ServeHTTP(w, r) + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) return default: t.Fatalf("unexpected error authorizing request: %v", err) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go index 4547336a45e3..5b1ff7caa98e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -11,10 +11,9 @@ import ( "os" "strings" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" - "golang.org/x/net/context" ) // accessSet maps a typed, named resource to @@ -82,20 +81,22 @@ type authChallenge struct { accessSet accessSet } +var _ auth.Challenge = authChallenge{} + // Error returns the internal error string for this authChallenge. -func (ac *authChallenge) Error() string { +func (ac authChallenge) Error() string { return ac.err.Error() } // Status returns the HTTP Response Status Code for this authChallenge. -func (ac *authChallenge) Status() int { +func (ac authChallenge) Status() int { return http.StatusUnauthorized } // challengeParams constructs the value to be used in // the WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 -func (ac *authChallenge) challengeParams() string { +func (ac authChallenge) challengeParams() string { str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) if scope := ac.accessSet.scopeParam(); scope != "" { @@ -111,16 +112,9 @@ func (ac *authChallenge) challengeParams() string { return str } -// SetHeader sets the WWW-Authenticate value for the given header. -func (ac *authChallenge) SetHeader(header http.Header) { - header.Add("WWW-Authenticate", ac.challengeParams()) -} - -// ServeHttp handles writing the challenge response -// by setting the challenge header and status code. -func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ac.SetHeader(w.Header()) - w.WriteHeader(ac.Status()) +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) } // accessController implements the auth.AccessController interface. @@ -225,7 +219,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth. accessSet: newAccessSet(accessItems...), } - req, err := ctxu.GetRequest(ctx) + req, err := context.GetRequest(ctx) if err != nil { return nil, err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 000000000000..7d8f1d957685 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go new file mode 100644 index 000000000000..a6ad45d85439 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -0,0 +1,219 @@ +package auth + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// ChallengeManager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type ChallengeManager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint string) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleChallengeManager returns an instance of +// ChallengeManger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleChallengeManager() ChallengeManager { + return simpleChallengeManager{} +} + +type simpleChallengeManager map[string][]Challenge + +func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { + challenges := m[endpoint] + return challenges, nil +} + +func (m simpleChallengeManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + m[urlCopy.String()] = challenges + + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go new file mode 100644 index 000000000000..9b6a5adc9d2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go @@ -0,0 +1,38 @@ +package auth + +import ( + "net/http" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + challenge := challenges[0] + + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) + } + + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) + } + + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 000000000000..27a2aa719106 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,256 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/transport" +) + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges ChallengeManager + handlers []AuthenticationHandler + transport http.RoundTripper +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + v2Root := strings.Index(req.URL.Path, "/v2/") + if v2Root == -1 { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: req.URL.Path[:v2Root+4], + } + + pingEndpoint := ping.String() + + challenges, err := ea.challenges.GetChallenges(pingEndpoint) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, challenge := range challenges { + if challenge.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +type tokenHandler struct { + header http.Header + creds CredentialStore + scope tokenScope + transport http.RoundTripper + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +// tokenScope represents the scope at which a token will be requested. +// This represents a specific action on a registry resource. +type tokenScope struct { + Resource string + Scope string + Actions []string +} + +func (ts tokenScope) String() string { + return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +} + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + return &tokenHandler{ + transport: transport, + creds: creds, + scope: tokenScope{ + Resource: "repository", + Scope: scope, + Actions: actions, + }, + } +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if err := th.refreshToken(params); err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + + return nil +} + +func (th *tokenHandler) refreshToken(params map[string]string) error { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + now := time.Now() + if now.After(th.tokenExpiration) { + token, err := th.fetchToken(params) + if err != nil { + return err + } + th.tokenCache = token + th.tokenExpiration = now.Add(time.Minute) + } + + return nil +} + +type tokenResponse struct { + Token string `json:"token"` +} + +func (th *tokenHandler) fetchToken(params map[string]string) (token string, err error) { + //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := th.scope.String() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if th.creds != nil { + username, password := th.creds.Basic(realmURL) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return errors.New("no basic auth credentials") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go new file mode 100644 index 000000000000..1b4754abf9c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go @@ -0,0 +1,311 @@ +package auth + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/testutil" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +type testAuthenticationWrapper struct { + headers http.Header + authCheck func(string) bool + next http.Handler +} + +func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || !w.authCheck(auth) { + h := rw.Header() + for k, values := range w.headers { + h[k] = values + } + rw.WriteHeader(http.StatusUnauthorized) + return + } + w.next.ServeHTTP(rw, r) +} + +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { + h := testutil.NewHandler(rrm) + wrapper := &testAuthenticationWrapper{ + + headers: http.Header(map[string][]string{ + "X-API-Version": {"registry/2.0"}, + "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, + "WWW-Authenticate": {authenticate}, + }), + authCheck: authCheck, + next: h, + } + + s := httptest.NewServer(wrapper) + return s.URL, s.Close +} + +// ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { + resp, err := http.Get(endpoint) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return nil, err + } + + return APIVersions(resp, versionHeader), err +} + +type testCredentialStore struct { + username string + password string +} + +func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { + return tcs.username, tcs.password +} + +func TestEndpointAuthorizeToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"badtoken"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := NewSimpleChallengeManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + badCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e2, c2 := testServerWithAuth(m, authenicate, badCheck) + defer c2() + + challengeManager2 := NewSimpleChallengeManager() + versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 3 { + t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) + } + if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) + } + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func TestEndpointAuthorizeTokenBasic(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + basicCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func TestEndpointAuthorizeBasic(t *testing.T) { + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + username := "user1" + password := "funSecretPa$$word" + authenicate := fmt.Sprintf("Basic realm=localhost") + validCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 000000000000..c7eee4e8c68a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,176 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { + newOffset := hbu.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset += int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + hbu.offset = newOffset + + return hbu.offset, nil +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go new file mode 100644 index 000000000000..099dca4f01f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer_test.go @@ -0,0 +1,211 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +// Test implements distribution.BlobWriter +var _ distribution.BlobWriter = &httpBlobUpload{} + +func TestUploadReadFrom(t *testing.T) { + _, b := newRandomBlob(64) + repo := "test/upload/readfrom" + locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + // Test Valid case + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {"0-63"}, + }), + }, + }, + // Test invalid range + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {""}, + }), + }, + }, + // Test 404 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + }, + }, + // Test 400 valid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte(` + { "errors": + [ + { + "code": "BLOB_UPLOAD_INVALID", + "message": "blob upload invalid", + "detail": "more detail" + } + ] + } `), + }, + }, + // Test 400 invalid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte("something bad happened"), + }, + }, + // Test 500 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + }) + + e, c := testServer(m) + defer c() + + blobUpload := &httpBlobUpload{ + client: &http.Client{}, + } + + // Valid case + blobUpload.location = e + locationPath + n, err := blobUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + if n != 64 { + t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) + } + + // Bad range + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when bad range received") + } + + // 404 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) + } + + // 400 valid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(errcode.Errors); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if len(uploadErr) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) + } else { + v2Err, ok := uploadErr[0].(errcode.Error) + if !ok { + t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) + } + if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { + t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) + } + if expected := "blob upload invalid"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) + } + if expected := "more detail"; v2Err.Detail.(string) != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) + } + } + + // 400 invalid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else { + respStr := string(uploadErr.Response) + if expected := "something bad happened"; respStr != expected { + t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) + } + } + + // 500 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { + t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/client.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/client.go deleted file mode 100644 index 36be960d1154..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/client.go +++ /dev/null @@ -1,573 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "regexp" - "strconv" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" -) - -// Client implements the client interface to the registry http api -type Client interface { - // GetImageManifest returns an image manifest for the image at the given - // name, tag pair. - GetImageManifest(name, tag string) (*manifest.SignedManifest, error) - - // PutImageManifest uploads an image manifest for the image at the given - // name, tag pair. - PutImageManifest(name, tag string, imageManifest *manifest.SignedManifest) error - - // DeleteImage removes the image at the given name, tag pair. - DeleteImage(name, tag string) error - - // ListImageTags returns a list of all image tags with the given repository - // name. - ListImageTags(name string) ([]string, error) - - // BlobLength returns the length of the blob stored at the given name, - // digest pair. - // Returns a length value of -1 on error or if the blob does not exist. - BlobLength(name string, dgst digest.Digest) (int, error) - - // GetBlob returns the blob stored at the given name, digest pair in the - // form of an io.ReadCloser with the length of this blob. - // A nonzero byteOffset can be provided to receive a partial blob beginning - // at the given offset. - GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) - - // InitiateBlobUpload starts a blob upload in the given repository namespace - // and returns a unique location url to use for other blob upload methods. - InitiateBlobUpload(name string) (string, error) - - // GetBlobUploadStatus returns the byte offset and length of the blob at the - // given upload location. - GetBlobUploadStatus(location string) (int, int, error) - - // UploadBlob uploads a full blob to the registry. - UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error - - // UploadBlobChunk uploads a blob chunk with a given length and startByte to - // the registry. - // FinishChunkedBlobUpload must be called to finalize this upload. - UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error - - // FinishChunkedBlobUpload completes a chunked blob upload at a given - // location. - FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error - - // CancelBlobUpload deletes all content at the unfinished blob upload - // location and invalidates any future calls to this blob upload. - CancelBlobUpload(location string) error -} - -var ( - patternRangeHeader = regexp.MustCompile("bytes=0-(\\d+)/(\\d+)") -) - -// New returns a new Client which operates against a registry with the -// given base endpoint -// This endpoint should not include /v2/ or any part of the url after this. -func New(endpoint string) (Client, error) { - ub, err := v2.NewURLBuilderFromString(endpoint) - if err != nil { - return nil, err - } - - return &clientImpl{ - endpoint: endpoint, - ub: ub, - }, nil -} - -// clientImpl is the default implementation of the Client interface -type clientImpl struct { - endpoint string - ub *v2.URLBuilder -} - -// TODO(bbland): use consistent route generation between server and client - -func (r *clientImpl) GetImageManifest(name, tag string) (*manifest.SignedManifest, error) { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return nil, err - } - - response, err := http.Get(manifestURL) - if err != nil { - return nil, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - break - case response.StatusCode == http.StatusNotFound: - return nil, &ImageManifestNotFoundError{Name: name, Tag: tag} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, err - } - return nil, &errs - default: - return nil, &UnexpectedHTTPStatusError{Status: response.Status} - } - - decoder := json.NewDecoder(response.Body) - - manifest := new(manifest.SignedManifest) - err = decoder.Decode(manifest) - if err != nil { - return nil, err - } - return manifest, nil -} - -func (r *clientImpl) PutImageManifest(name, tag string, manifest *manifest.SignedManifest) error { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(manifest.Raw)) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted: - return nil - case response.StatusCode >= 400 && response.StatusCode < 500: - var errors v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) - if err != nil { - return err - } - - return &errors - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) DeleteImage(name, tag string) error { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return err - } - - deleteRequest, err := http.NewRequest("DELETE", manifestURL, nil) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(deleteRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - break - case response.StatusCode == http.StatusNotFound: - return &ImageManifestNotFoundError{Name: name, Tag: tag} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } - - return nil -} - -func (r *clientImpl) ListImageTags(name string) ([]string, error) { - tagsURL, err := r.ub.BuildTagsURL(name) - if err != nil { - return nil, err - } - - response, err := http.Get(tagsURL) - if err != nil { - return nil, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - break - case response.StatusCode == http.StatusNotFound: - return nil, &RepositoryNotFoundError{Name: name} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, err - } - return nil, &errs - default: - return nil, &UnexpectedHTTPStatusError{Status: response.Status} - } - - tags := struct { - Tags []string `json:"tags"` - }{} - - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&tags) - if err != nil { - return nil, err - } - - return tags.Tags, nil -} - -func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { - blobURL, err := r.ub.BuildBlobURL(name, dgst) - if err != nil { - return -1, err - } - - response, err := http.Head(blobURL) - if err != nil { - return -1, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return -1, err - } - return int(length), nil - case response.StatusCode == http.StatusNotFound: - return -1, nil - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return -1, err - } - return -1, &errs - default: - return -1, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { - blobURL, err := r.ub.BuildBlobURL(name, dgst) - if err != nil { - return nil, 0, err - } - - getRequest, err := http.NewRequest("GET", blobURL, nil) - if err != nil { - return nil, 0, err - } - - getRequest.Header.Add("Range", fmt.Sprintf("%d-", byteOffset)) - response, err := http.DefaultClient.Do(getRequest) - if err != nil { - return nil, 0, err - } - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 0) - if err != nil { - return nil, 0, err - } - return response.Body, int(length), nil - case response.StatusCode == http.StatusNotFound: - response.Body.Close() - return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, 0, err - } - return nil, 0, &errs - default: - response.Body.Close() - return nil, 0, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { - uploadURL, err := r.ub.BuildBlobUploadURL(name) - if err != nil { - return "", err - } - - postRequest, err := http.NewRequest("POST", uploadURL, nil) - if err != nil { - return "", err - } - - response, err := http.DefaultClient.Do(postRequest) - if err != nil { - return "", err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusAccepted: - return response.Header.Get("Location"), nil - // case response.StatusCode == http.StatusNotFound: - // return - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return "", err - } - return "", &errs - default: - return "", &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { - response, err := http.Get(location) - if err != nil { - return 0, 0, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - return parseRangeHeader(response.Header.Get("Range")) - case response.StatusCode == http.StatusNotFound: - return 0, 0, &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return 0, 0, err - } - return 0, 0, &errs - default: - return 0, 0, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error { - defer blob.Close() - - putRequest, err := http.NewRequest("PUT", location, blob) - if err != nil { - return err - } - - values := putRequest.URL.Query() - values.Set("digest", dgst.String()) - putRequest.URL.RawQuery = values.Encode() - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", fmt.Sprint(length)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusCreated: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error { - defer blobChunk.Close() - - putRequest, err := http.NewRequest("PUT", location, blobChunk) - if err != nil { - return err - } - - endByte := startByte + length - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", fmt.Sprint(length)) - putRequest.Header.Set("Content-Range", - fmt.Sprintf("%d-%d/%d", startByte, endByte, endByte)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusAccepted: - return nil - case response.StatusCode == http.StatusRequestedRangeNotSatisfiable: - lastValidRange, blobSize, err := parseRangeHeader(response.Header.Get("Range")) - if err != nil { - return err - } - return &BlobUploadInvalidRangeError{ - Location: location, - LastValidRange: lastValidRange, - BlobSize: blobSize, - } - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error { - putRequest, err := http.NewRequest("PUT", location, nil) - if err != nil { - return err - } - - values := putRequest.URL.Query() - values.Set("digest", dgst.String()) - putRequest.URL.RawQuery = values.Encode() - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", "0") - putRequest.Header.Set("Content-Range", - fmt.Sprintf("%d-%d/%d", length, length, length)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusCreated: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) CancelBlobUpload(location string) error { - deleteRequest, err := http.NewRequest("DELETE", location, nil) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(deleteRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -// parseRangeHeader parses out the offset and length from a returned Range -// header -func parseRangeHeader(byteRangeHeader string) (int, int, error) { - submatches := patternRangeHeader.FindStringSubmatch(byteRangeHeader) - if submatches == nil || len(submatches) < 3 { - return 0, 0, fmt.Errorf("Malformed Range header") - } - - offset, err := strconv.Atoi(submatches[1]) - if err != nil { - return 0, 0, err - } - length, err := strconv.Atoi(submatches[2]) - if err != nil { - return 0, 0, err - } - return offset, length, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/client_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/client_test.go deleted file mode 100644 index 2c1d1cc20274..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/client_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "sync" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/testutil" -) - -type testBlob struct { - digest digest.Digest - contents []byte -} - -func TestRangeHeaderParser(t *testing.T) { - const ( - malformedRangeHeader = "bytes=0-A/C" - emptyRangeHeader = "" - rFirst = 100 - rSecond = 200 - ) - - var ( - wellformedRangeHeader = fmt.Sprintf("bytes=0-%d/%d", rFirst, rSecond) - ) - - if _, _, err := parseRangeHeader(malformedRangeHeader); err == nil { - t.Fatalf("malformedRangeHeader: error expected, got nil") - } - - if _, _, err := parseRangeHeader(emptyRangeHeader); err == nil { - t.Fatalf("emptyRangeHeader: error expected, got nil") - } - - first, second, err := parseRangeHeader(wellformedRangeHeader) - if err != nil { - t.Fatalf("wellformedRangeHeader: unexpected error %v", err) - } - - if first != rFirst || second != rSecond { - t.Fatalf("Range has been parsed unproperly: %d/%d", first, second) - } - -} - -func TestPush(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - uploadLocations := make([]string, len(testBlobs)) - blobs := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, blob := range testBlobs { - // TODO(bbland): this is returning the same location for all uploads, - // because we can't know which blob will get which location. - // It's sort of okay because we're using unique digests, but this needs - // to change at some point. - uploadLocations[i] = fmt.Sprintf("/v2/%s/blobs/test-uuid", name) - blobs[i] = manifest.FSLayer{BlobSum: blob.digest} - history[i] = manifest.History{V1Compatibility: blob.digest.String()} - } - - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - }, - } - var err error - m.Raw, err = json.Marshal(m) - - blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) - for i, blob := range testBlobs { - blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + name + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Location": {uploadLocations[i]}, - }), - }, - } - blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: uploadLocations[i], - QueryParams: map[string][]string{ - "digest": {blob.digest.String()}, - }, - Body: blob.contents, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - }, - } - } - - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + name + "/manifests/" + tag, - Body: m.Raw, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - }, - })) - var server *httptest.Server - - // HACK(stevvooe): Super hack to follow: the request response map approach - // above does not let us correctly format the location header to the - // server url. This handler intercepts and re-writes the location header - // to the server url. - - hack := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w = &headerInterceptingResponseWriter{ResponseWriter: w, serverURL: server.URL} - handler.ServeHTTP(w, r) - }) - - server = httptest.NewServer(hack) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - writer, err := l.Writer() - if err != nil { - t.Fatal(err) - } - - writer.SetSize(len(blob.contents)) - writer.Write(blob.contents) - writer.Close() - } - - objectStore.WriteManifest(name, tag, m) - - err = Push(client, objectStore, name, tag) - if err != nil { - t.Fatal(err) - } -} - -func TestPull(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - blobs := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, blob := range testBlobs { - blobs[i] = manifest.FSLayer{BlobSum: blob.digest} - history[i] = manifest.History{V1Compatibility: blob.digest.String()} - } - - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - }, - } - manifestBytes, err := json.Marshal(m) - - blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs)) - for i, blob := range testBlobs { - blobRequestResponseMappings[i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents, - }, - } - } - - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifests/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, - })) - server := httptest.NewServer(handler) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - err = Pull(client, objectStore, name, tag) - if err != nil { - t.Fatal(err) - } - - m, err = objectStore.Manifest(name, tag) - if err != nil { - t.Fatal(err) - } - - mBytes, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - - if string(mBytes) != string(manifestBytes) { - t.Fatal("Incorrect manifest") - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - reader, err := l.Reader() - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - blobBytes, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err) - } - - if string(blobBytes) != string(blob.contents) { - t.Fatal("Incorrect blob") - } - } -} - -func TestPullResume(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - layers := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, layer := range testBlobs { - layers[i] = manifest.FSLayer{BlobSum: layer.digest} - history[i] = manifest.History{V1Compatibility: layer.digest.String()} - } - - m := &manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: layers, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - } - manifestBytes, err := json.Marshal(m) - - layerRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) - for i, blob := range testBlobs { - layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents[:len(blob.contents)/2], - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(blob.contents))}, - }), - }, - } - layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents[len(blob.contents)/2:], - }, - } - } - - for i := 0; i < 3; i++ { - layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifests/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, - }) - } - - handler := testutil.NewHandler(layerRequestResponseMappings) - server := httptest.NewServer(handler) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - for attempts := 0; attempts < 3; attempts++ { - err = Pull(client, objectStore, name, tag) - if err == nil { - break - } - } - - if err != nil { - t.Fatal(err) - } - - sm, err := objectStore.Manifest(name, tag) - if err != nil { - t.Fatal(err) - } - - mBytes, err := json.Marshal(sm) - if err != nil { - t.Fatal(err) - } - - if string(mBytes) != string(manifestBytes) { - t.Fatal("Incorrect manifest") - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - reader, err := l.Reader() - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - layerBytes, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err) - } - - if string(layerBytes) != string(blob.contents) { - t.Fatal("Incorrect blob") - } - } -} - -// headerInterceptingResponseWriter is a hacky workaround to re-write the -// location header to have the server url. -type headerInterceptingResponseWriter struct { - http.ResponseWriter - serverURL string -} - -func (hirw *headerInterceptingResponseWriter) WriteHeader(status int) { - location := hirw.Header().Get("Location") - if location != "" { - hirw.Header().Set("Location", hirw.serverURL+location) - } - - hirw.ResponseWriter.WriteHeader(status) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go index 3e89e674f6de..7305c021cf56 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go @@ -1,79 +1,68 @@ package client import ( + "encoding/json" "fmt" + "io" + "io/ioutil" + "net/http" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" ) -// RepositoryNotFoundError is returned when making an operation against a -// repository that does not exist in the registry. -type RepositoryNotFoundError struct { - Name string -} - -func (e *RepositoryNotFoundError) Error() string { - return fmt.Sprintf("No repository found with Name: %s", e.Name) -} - -// ImageManifestNotFoundError is returned when making an operation against a -// given image manifest that does not exist in the registry. -type ImageManifestNotFoundError struct { - Name string - Tag string -} - -func (e *ImageManifestNotFoundError) Error() string { - return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", - e.Name, e.Tag) -} - -// BlobNotFoundError is returned when making an operation against a given image -// layer that does not exist in the registry. -type BlobNotFoundError struct { - Name string - Digest digest.Digest +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string } -func (e *BlobNotFoundError) Error() string { - return fmt.Sprintf("No blob found with Name: %s, Digest: %s", - e.Name, e.Digest) +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) } -// BlobUploadNotFoundError is returned when making a blob upload operation against an -// invalid blob upload location url. -// This may be the result of using a cancelled, completed, or stale upload -// location. -type BlobUploadNotFoundError struct { - Location string +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + Response []byte } -func (e *BlobUploadNotFoundError) Error() string { - return fmt.Sprintf("No blob upload found at Location: %s", e.Location) +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } -// BlobUploadInvalidRangeError is returned when attempting to upload an image -// blob chunk that is out of order. -// This provides the known BlobSize and LastValidRange which can be used to -// resume the upload. -type BlobUploadInvalidRangeError struct { - Location string - LastValidRange int - BlobSize int -} +func parseHTTPErrorResponse(r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } -func (e *BlobUploadInvalidRangeError) Error() string { - return fmt.Sprintf( - "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", - e.Location, e.LastValidRange, e.BlobSize) + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + Response: body, + } + } + return errors } -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string +func handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == 401 { + err := parseHTTPErrorResponse(resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + return parseHTTPErrorResponse(resp.Body) + } + return &UnexpectedHTTPStatusError{Status: resp.Status} } -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/objectstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/objectstore.go deleted file mode 100644 index 5969c9d28313..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/objectstore.go +++ /dev/null @@ -1,239 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -var ( - // ErrLayerAlreadyExists is returned when attempting to create a layer with - // a tarsum that is already in use. - ErrLayerAlreadyExists = fmt.Errorf("Layer already exists") - - // ErrLayerLocked is returned when attempting to write to a layer which is - // currently being written to. - ErrLayerLocked = fmt.Errorf("Layer locked") -) - -// ObjectStore is an interface which is designed to approximate the docker -// engine storage. This interface is subject to change to conform to the -// future requirements of the engine. -type ObjectStore interface { - // Manifest retrieves the image manifest stored at the given repository name - // and tag - Manifest(name, tag string) (*manifest.SignedManifest, error) - - // WriteManifest stores an image manifest at the given repository name and - // tag - WriteManifest(name, tag string, manifest *manifest.SignedManifest) error - - // Layer returns a handle to a layer for reading and writing - Layer(dgst digest.Digest) (Layer, error) -} - -// Layer is a generic image layer interface. -// A Layer may not be written to if it is already complete. -type Layer interface { - // Reader returns a LayerReader or an error if the layer has not been - // written to or is currently being written to. - Reader() (LayerReader, error) - - // Writer returns a LayerWriter or an error if the layer has been fully - // written to or is currently being written to. - Writer() (LayerWriter, error) - - // Wait blocks until the Layer can be read from. - Wait() error -} - -// LayerReader is a read-only handle to a Layer, which exposes the CurrentSize -// and full Size in addition to implementing the io.ReadCloser interface. -type LayerReader interface { - io.ReadCloser - - // CurrentSize returns the number of bytes written to the underlying Layer - CurrentSize() int - - // Size returns the full size of the underlying Layer - Size() int -} - -// LayerWriter is a write-only handle to a Layer, which exposes the CurrentSize -// and full Size in addition to implementing the io.WriteCloser interface. -// SetSize must be called on this LayerWriter before it can be written to. -type LayerWriter interface { - io.WriteCloser - - // CurrentSize returns the number of bytes written to the underlying Layer - CurrentSize() int - - // Size returns the full size of the underlying Layer - Size() int - - // SetSize sets the full size of the underlying Layer. - // This must be called before any calls to Write - SetSize(int) error -} - -// memoryObjectStore is an in-memory implementation of the ObjectStore interface -type memoryObjectStore struct { - mutex *sync.Mutex - manifestStorage map[string]*manifest.SignedManifest - layerStorage map[digest.Digest]Layer -} - -func (objStore *memoryObjectStore) Manifest(name, tag string) (*manifest.SignedManifest, error) { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - manifest, ok := objStore.manifestStorage[name+":"+tag] - if !ok { - return nil, fmt.Errorf("No manifest found with Name: %q, Tag: %q", name, tag) - } - return manifest, nil -} - -func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *manifest.SignedManifest) error { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - objStore.manifestStorage[name+":"+tag] = manifest - return nil -} - -func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - layer, ok := objStore.layerStorage[dgst] - if !ok { - layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} - objStore.layerStorage[dgst] = layer - } - - return layer, nil -} - -type memoryLayer struct { - cond *sync.Cond - contents []byte - expectedSize int - writing bool -} - -func (ml *memoryLayer) Reader() (LayerReader, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents == nil { - return nil, fmt.Errorf("Layer has not been written to yet") - } - if ml.writing { - return nil, ErrLayerLocked - } - - return &memoryLayerReader{ml: ml, reader: bytes.NewReader(ml.contents)}, nil -} - -func (ml *memoryLayer) Writer() (LayerWriter, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents != nil { - if ml.writing { - return nil, ErrLayerLocked - } - if ml.expectedSize == len(ml.contents) { - return nil, ErrLayerAlreadyExists - } - } else { - ml.contents = make([]byte, 0) - } - - ml.writing = true - return &memoryLayerWriter{ml: ml, buffer: bytes.NewBuffer(ml.contents)}, nil -} - -func (ml *memoryLayer) Wait() error { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents == nil { - return fmt.Errorf("No writer to wait on") - } - - for ml.writing { - ml.cond.Wait() - } - - return nil -} - -type memoryLayerReader struct { - ml *memoryLayer - reader *bytes.Reader -} - -func (mlr *memoryLayerReader) Read(p []byte) (int, error) { - return mlr.reader.Read(p) -} - -func (mlr *memoryLayerReader) Close() error { - return nil -} - -func (mlr *memoryLayerReader) CurrentSize() int { - return len(mlr.ml.contents) -} - -func (mlr *memoryLayerReader) Size() int { - return mlr.ml.expectedSize -} - -type memoryLayerWriter struct { - ml *memoryLayer - buffer *bytes.Buffer -} - -func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { - if mlw.ml.expectedSize == 0 { - return 0, fmt.Errorf("Must set size before writing to layer") - } - wrote, err := mlw.buffer.Write(p) - mlw.ml.contents = mlw.buffer.Bytes() - return wrote, err -} - -func (mlw *memoryLayerWriter) Close() error { - mlw.ml.cond.L.Lock() - defer mlw.ml.cond.L.Unlock() - - return mlw.close() -} - -func (mlw *memoryLayerWriter) close() error { - mlw.ml.writing = false - mlw.ml.cond.Broadcast() - return nil -} - -func (mlw *memoryLayerWriter) CurrentSize() int { - return len(mlw.ml.contents) -} - -func (mlw *memoryLayerWriter) Size() int { - return mlw.ml.expectedSize -} - -func (mlw *memoryLayerWriter) SetSize(size int) error { - if !mlw.ml.writing { - return fmt.Errorf("Layer is closed for writing") - } - mlw.ml.expectedSize = size - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/pull.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/pull.go deleted file mode 100644 index 385158db18e7..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/pull.go +++ /dev/null @@ -1,151 +0,0 @@ -package client - -import ( - "fmt" - "io" - - log "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/manifest" -) - -// simultaneousLayerPullWindow is the size of the parallel layer pull window. -// A layer may not be pulled until the layer preceeding it by the length of the -// pull window has been successfully pulled. -const simultaneousLayerPullWindow = 4 - -// Pull implements a client pull workflow for the image defined by the given -// name and tag pair, using the given ObjectStore for local manifest and layer -// storage -func Pull(c Client, objectStore ObjectStore, name, tag string) error { - manifest, err := c.GetImageManifest(name, tag) - if err != nil { - return err - } - log.WithField("manifest", manifest).Info("Pulled manifest") - - if len(manifest.FSLayers) != len(manifest.History) { - return fmt.Errorf("Length of history not equal to number of layers") - } - if len(manifest.FSLayers) == 0 { - return fmt.Errorf("Image has no layers") - } - - errChans := make([]chan error, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - errChans[i] = make(chan error) - } - - // To avoid leak of goroutines we must notify - // pullLayer goroutines about a cancelation, - // otherwise they will lock forever. - cancelCh := make(chan struct{}) - - // Iterate over each layer in the manifest, simultaneously pulling no more - // than simultaneousLayerPullWindow layers at a time. If an error is - // received from a layer pull, we abort the push. - for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ { - dependentLayer := i - simultaneousLayerPullWindow - if dependentLayer >= 0 { - err := <-errChans[dependentLayer] - if err != nil { - log.WithField("error", err).Warn("Pull aborted") - close(cancelCh) - return err - } - } - - if i < len(manifest.FSLayers) { - go func(i int) { - select { - case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]): - case <-cancelCh: // no chance to recv until cancelCh's closed - } - }(i) - } - } - - err = objectStore.WriteManifest(name, tag, manifest) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "manifest": manifest, - }).Warn("Unable to write image manifest") - return err - } - - return nil -} - -func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { - log.WithField("layer", fsLayer).Info("Pulling layer") - - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - - layerWriter, err := layer.Writer() - if err == ErrLayerAlreadyExists { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - if err == ErrLayerLocked { - log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") - layer.Wait() - return nil - } - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - defer layerWriter.Close() - - if layerWriter.CurrentSize() > 0 { - log.WithFields(log.Fields{ - "layer": fsLayer, - "currentSize": layerWriter.CurrentSize(), - "size": layerWriter.Size(), - }).Info("Layer partially downloaded, resuming") - } - - layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize()) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - defer layerReader.Close() - - layerWriter.SetSize(layerWriter.CurrentSize() + length) - - _, err = io.Copy(layerWriter, layerReader) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - if layerWriter.CurrentSize() != layerWriter.Size() { - log.WithFields(log.Fields{ - "size": layerWriter.Size(), - "currentSize": layerWriter.CurrentSize(), - "layer": fsLayer, - }).Warn("Layer invalid size") - return fmt.Errorf( - "Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d", - fsLayer, layerWriter.Size(), layerWriter.CurrentSize(), - ) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/push.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/push.go deleted file mode 100644 index c26bd174ce97..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/push.go +++ /dev/null @@ -1,137 +0,0 @@ -package client - -import ( - "fmt" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/manifest" -) - -// simultaneousLayerPushWindow is the size of the parallel layer push window. -// A layer may not be pushed until the layer preceeding it by the length of the -// push window has been successfully pushed. -const simultaneousLayerPushWindow = 4 - -type pushFunction func(fsLayer manifest.FSLayer) error - -// Push implements a client push workflow for the image defined by the given -// name and tag pair, using the given ObjectStore for local manifest and layer -// storage -func Push(c Client, objectStore ObjectStore, name, tag string) error { - manifest, err := objectStore.Manifest(name, tag) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "name": name, - "tag": tag, - }).Info("No image found") - return err - } - - errChans := make([]chan error, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - errChans[i] = make(chan error) - } - - cancelCh := make(chan struct{}) - - // Iterate over each layer in the manifest, simultaneously pushing no more - // than simultaneousLayerPushWindow layers at a time. If an error is - // received from a layer push, we abort the push. - for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ { - dependentLayer := i - simultaneousLayerPushWindow - if dependentLayer >= 0 { - err := <-errChans[dependentLayer] - if err != nil { - log.WithField("error", err).Warn("Push aborted") - close(cancelCh) - return err - } - } - - if i < len(manifest.FSLayers) { - go func(i int) { - select { - case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]): - case <-cancelCh: // recv broadcast notification about cancelation - } - }(i) - } - } - - err = c.PutImageManifest(name, tag, manifest) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "manifest": manifest, - }).Warn("Unable to upload manifest") - return err - } - - return nil -} - -func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { - log.WithField("layer", fsLayer).Info("Pushing layer") - - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - - layerReader, err := layer.Reader() - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - defer layerReader.Close() - - if layerReader.CurrentSize() != layerReader.Size() { - log.WithFields(log.Fields{ - "layer": fsLayer, - "currentSize": layerReader.CurrentSize(), - "size": layerReader.Size(), - }).Warn("Local layer incomplete") - return fmt.Errorf("Local layer incomplete") - } - - length, err := c.BlobLength(name, fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to check existence of remote layer") - return err - } - if length >= 0 { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - - location, err := c.InitiateBlobUpload(name) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - err = c.UploadBlob(location, layerReader, int(layerReader.CurrentSize()), fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 000000000000..e7685b4daef5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,569 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, handleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL +func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + if err := v2.ValidateRepositoryName(name); err != nil { + return nil, err + } + + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name string +} + +func (r *repository) Name() string { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.Name(), + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.Name(), + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.Name(), + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Signatures() distribution.SignatureService { + ms, _ := r.Manifests(r.context) + return &signatures{ + manifests: ms, + } +} + +type signatures struct { + manifests distribution.ManifestService +} + +func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { + m, err := s.manifests.Get(dgst) + if err != nil { + return nil, err + } + return m.Signatures() +} + +func (s *signatures) Enumerate(dgst digest.Digest) ([]digest.Digest, error) { + panic("not implemented") +} + +func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { + panic("not implemented") +} + +func (s *signatures) Delete(revision, dgst digest.Digest) error { + panic("not implemented") +} + +type manifests struct { + name string + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Tags() ([]string, error) { + u, err := ms.ub.BuildTagsURL(ms.name) + if err != nil { + return nil, err + } + + resp, err := ms.client.Get(u) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return nil, err + } + + return tagsResponse.Tags, nil + } else if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + return nil, handleErrorResponse(resp) +} + +func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { + // Call by Tag endpoint since the API uses the same + // URL endpoint for tags and digests. + return ms.ExistsByTag(dgst.String()) +} + +func (ms *manifests) ExistsByTag(tag string) (bool, error) { + u, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, handleErrorResponse(resp) +} + +func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + // Call by Tag endpoint since the API uses the same + // URL endpoint for tags and digests. + return ms.GetByTag(dgst.String()) +} + +// AddEtagToTag allows a client to supply an eTag to GetByTag which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and nil error will be returned. etag is automatically quoted when added to +// this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return func(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") + } +} + +func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + u, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return nil, err + } + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + if _, ok := ms.etags[tag]; ok { + req.Header.Set("If-None-Match", ms.etags[tag]) + } + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, nil + } else if SuccessStatus(resp.StatusCode) { + var sm manifest.SignedManifest + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&sm); err != nil { + return nil, err + } + return &sm, nil + } + return nil, handleErrorResponse(resp) +} + +func (ms *manifests) Enumerate() ([]digest.Digest, error) { + panic("not implemented") +} + +func (ms *manifests) Put(m *manifest.SignedManifest) error { + manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) + if err != nil { + return err + } + + // todo(richardscothern): do something with options here when they become applicable + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) + if err != nil { + return err + } + + resp, err := ms.client.Do(putRequest) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + // TODO(dmcgowan): make use of digest header + return nil + } + return handleErrorResponse(resp) +} + +func (ms *manifests) Delete(dgst digest.Digest) error { + u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return handleErrorResponse(resp) +} + +type blobs struct { + name string + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, source string) (string, error) { + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + if locationURL.Scheme == "" { + sourceURL, err := url.Parse(source) + if err != nil { + return "", err + } + locationURL = &url.URL{ + Scheme: sourceURL.Scheme, + Host: sourceURL.Host, + Path: location, + } + location = locationURL.String() + } + return location, nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + desc, err := bs.Stat(ctx, dgst) + if err != nil { + return nil, err + } + reader, err := bs.Open(ctx, desc.Digest) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + stat, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.New() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { + u, err := bs.ub.BuildBlobUploadURL(bs.name) + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + } + return nil, handleErrorResponse(resp) +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +func (bs *blobs) Enumerate(ctx context.Context, digests []digest.Digest, last string) (n int, err error) { + panic("not implemented") +} + +type blobStatter struct { + name string + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + u, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, handleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return handleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go new file mode 100644 index 000000000000..8a7a598e6862 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go @@ -0,0 +1,858 @@ +package client + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution/uuid" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/testutil" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +func newRandomBlob(size int) (digest.Digest, []byte) { + b := make([]byte, size) + if n, err := rand.Read(b); err != nil { + panic(err) + } else if n != size { + panic("unable to read enough bytes") + } + + dgst, err := digest.FromBytes(b) + if err != nil { + panic(err) + } + + return dgst, b +} + +func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) +} + +func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { + headers := map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + } + if link != "" { + headers["Link"] = append(headers["Link"], link) + } + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: route, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(headers), + }, + }) +} + +func TestBlobDelete(t *testing.T) { + dgst, _ := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo := "test.example.com/repo1" + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + err = l.Delete(ctx, dgst) + if err != nil { + t.Errorf("Error deleting blob: %s", err.Error()) + } + +} + +func TestBlobFetch(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + b, err := l.Get(ctx, d1) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b, b1) != 0 { + t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) + } + + // TODO(dmcgowan): Test for unknown blob case +} + +func TestBlobExists(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + stat, err := l.Stat(ctx, d1) + if err != nil { + t.Fatal(err) + } + + if stat.Digest != d1 { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) + } + + if stat.Size != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) + } + + // TODO(dmcgowan): Test error cases and ErrBlobUnknown case +} + +func TestBlobUploadChunked(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + chunks := [][]byte{ + b1[0:256], + b1[256:512], + b1[512:513], + b1[513:1024], + } + repo := "test.example.com/uploadrepo" + uuids := []string{uuid.Generate().String()} + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, + "Docker-Upload-UUID": {uuids[0]}, + "Range": {"0-0"}, + }), + }, + }) + offset := 0 + for i, chunk := range chunks { + uuids = append(uuids, uuid.Generate().String()) + newOffset := offset + len(chunk) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], + Body: chunk, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, + "Docker-Upload-UUID": {uuids[i+1]}, + "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, + }), + }, + }) + offset = newOffset + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(offset)}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) + } + + for _, chunk := range chunks { + n, err := upload.Write(chunk) + if err != nil { + t.Fatal(err) + } + if n != len(chunk) { + t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) + } + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobUploadMonolithic(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo := "test.example.com/uploadrepo" + uploadID := uuid.Generate().String() + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Range": {"0-0"}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Body: b1, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(b1))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) + } + + n, err := upload.ReadFrom(bytes.NewReader(b1)) + if err != nil { + t.Fatal(err) + } + if n != int64(len(b1)) { + t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { + blobs := make([]manifest.FSLayer, blobCount) + history := make([]manifest.History, blobCount) + + for i := 0; i < blobCount; i++ { + dgst, blob := newRandomBlob((i % 5) * 16) + + blobs[i] = manifest.FSLayer{BlobSum: dgst} + history[i] = manifest.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + } + + m := &manifest.SignedManifest{ + Manifest: manifest.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + }, + } + manifestBytes, err := json.Marshal(m) + if err != nil { + panic(err) + } + dgst, err := digest.FromBytes(manifestBytes) + if err != nil { + panic(err) + } + + m.Raw = manifestBytes + + return m, dgst +} + +func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest, _ := digest.FromBytes(content) + getReqWithEtag := testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + reference, + Headers: http.Header(map[string][]string{ + "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, + }), + } + + var getRespWithEtag testutil.Response + if actualDigest.String() == dgst { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusNotModified, + Body: []byte{}, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + } + } else { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + } + + } + *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) +} + +func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + +} + +func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { + if m1.Name != m2.Name { + return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) + } + if m1.Tag != m2.Tag { + return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) + } + if len(m1.FSLayers) != len(m2.FSLayers) { + return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + } + for i := range m1.FSLayers { + if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { + return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) + } + } + if len(m1.History) != len(m2.History) { + return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) + } + for i := range m1.History { + if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { + return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) + } + } + return nil +} + +func TestManifestFetch(t *testing.T) { + ctx := context.Background() + repo := "test.example.com/repo" + m1, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifest(repo, dgst.String(), m1.Raw, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + ok, err := ms.Exists(dgst) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.Get(dgst) + if err != nil { + t.Fatal(err) + } + if err := checkEqualManifest(manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestFetchWithEtag(t *testing.T) { + repo := "test.example.com/repo/by/tag" + m1, d1 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifestWithEtag(repo, "latest", m1.Raw, &m, d1.String()) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + m2, err := ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + if err != nil { + t.Fatal(err) + } + if m2 != nil { + t.Fatal("Expected empty manifest for matching etag") + } +} + +func TestManifestDelete(t *testing.T) { + repo := "test.example.com/repo/delete" + _, dgst1 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo + "/manifests/" + dgst1.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Delete(dgst1); err != nil { + t.Fatal(err) + } + if err := ms.Delete(dgst2); err == nil { + t.Fatal("Expected error deleting unknown manifest") + } + // TODO(dmcgowan): Check for specific unknown error +} + +func TestManifestPut(t *testing.T) { + repo := "test.example.com/repo/delete" + m1, dgst := newRandomSchemaV1Manifest(repo, "other", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/manifests/other", + Body: m1.Raw, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Put(m1); err != nil { + t.Fatal(err) + } + + // TODO(dmcgowan): Check for invalid input error +} + +func TestManifestTags(t *testing.T) { + repo := "test.example.com/repo/tags/list" + tagsList := []byte(strings.TrimSpace(` +{ + "name": "test.example.com/repo/tags/list", + "tags": [ + "tag1", + "tag2", + "funtag" + ] +} + `)) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + tags, err := ms.Tags() + if err != nil { + t.Fatal(err) + } + + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + // TODO(dmcgowan): Check array + + // TODO(dmcgowan): Check for error cases +} + +func TestManifestUnauthorized(t *testing.T) { + repo := "test.example.com/repo" + _, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusUnauthorized, + Body: []byte("garbage"), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = ms.Get(dgst) + if err == nil { + t.Fatal("Expected error fetching manifest") + } + v2Err, ok := err.(errcode.Error) + if !ok { + t.Fatalf("Unexpected error type: %#v", err) + } + if v2Err.Code != errcode.ErrorCodeUnauthorized { + t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) + } + if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) + } +} + +func TestCatalog(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=5", + []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 5) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 3 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestCatalogInParts(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=2", + []byte("{\"repositories\":[\"bar\", \"baz\"]}"), + "", &m) + addTestCatalog( + "/v2/_catalog?last=baz&n=2", + []byte("{\"repositories\":[\"foo\"]}"), + "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 2) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != nil { + t.Fatal(err) + } + + if numFilled != 2 { + t.Fatalf("Got wrong number of repos") + } + + numFilled, err = r.Repositories(ctx, entries, "baz") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 1 { + t.Fatalf("Got wrong number of repos") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 000000000000..b2e74ddb85de --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,173 @@ +package transport + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, size int64) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + size: size, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + size int64 + + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + + // Simulate io.EOF error if we reach filesize. + if err == nil && hrs.offset >= hrs.size { + err = io.EOF + } + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + var err error + newOffset := hrs.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = hrs.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + if hrs.offset != newOffset { + hrs.reset() + } + + // No problems, set the offset. + hrs.offset = newOffset + } + + return hrs.offset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + hrs.brd = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.brd, nil + } + + // If the offset is great than or equal to size, return a empty, noop reader. + if hrs.offset >= hrs.size { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.offset > 0 { + // TODO(stevvooe): Get this working correctly. + + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", "1-") + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + if hrs.brd == nil { + hrs.brd = bufio.NewReader(hrs.rc) + } else { + hrs.brd.Reset(hrs.rc) + } + + return hrs.brd, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 000000000000..30e45fab0f73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go index 1e31477f7a58..4c700e062324 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go @@ -13,12 +13,15 @@ import ( "os" "path" "reflect" + "regexp" + "strconv" "strings" "testing" "github.com/docker/distribution/configuration" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -30,7 +33,7 @@ import ( // TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified // 200 OK response. func TestCheckAPI(t *testing.T) { - env := newTestEnv(t) + env := newTestEnv(t, false) baseURL, err := env.builder.BuildBaseURL() if err != nil { @@ -59,6 +62,152 @@ func TestCheckAPI(t *testing.T) { } } +// TestCatalogAPI tests the /v2/_catalog endpoint +func TestCatalogAPI(t *testing.T) { + chunkLen := 2 + env := newTestEnv(t, false) + + values := url.Values{ + "last": []string{""}, + "n": []string{strconv.Itoa(chunkLen)}} + + catalogURL, err := env.builder.BuildCatalogURL(values) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + // ----------------------------------- + // try to get an empty catalog + resp, err := http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + var ctlg struct { + Repositories []string `json:"repositories"` + } + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // we haven't pushed anything to the registry yet + if len(ctlg.Repositories) != 0 { + t.Fatalf("repositories has unexpected values") + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + + // ----------------------------------- + // push something to the registry and try again + images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} + + for _, image := range images { + createRepository(env, t, image, "sometag") + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != chunkLen { + t.Fatalf("repositories has unexpected values") + } + + for _, image := range images[:chunkLen] { + if !contains(ctlg.Repositories, image) { + t.Fatalf("didn't find our repository '%s' in the catalog", image) + } + } + + link := resp.Header.Get("Link") + if link == "" { + t.Fatalf("repositories has less data than expected") + } + + newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) + + // ----------------------------------- + // get the last chunk of data + + catalogURL, err = env.builder.BuildCatalogURL(newValues) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != 1 { + t.Fatalf("repositories has unexpected values") + } + + lastImage := images[len(images)-1] + if !contains(ctlg.Repositories, lastImage) { + t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) + } + + link = resp.Header.Get("Link") + if link != "" { + t.Fatalf("catalog has unexpected data") + } +} + +func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { + re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") + matches := re.FindStringSubmatch(urlStr) + + if len(matches) != 2 { + t.Fatalf("Catalog link address response was incorrect") + } + linkURL, _ := url.Parse(matches[1]) + urlValues := linkURL.Query() + + if urlValues.Get("n") != strconv.Itoa(numEntries) { + t.Fatalf("Catalog link entry size is incorrect") + } + + if urlValues.Get("last") != last { + t.Fatal("Catalog link last entry is incorrect") + } + + return urlValues +} + +func contains(elems []string, e string) bool { + for _, elem := range elems { + if elem == e { + return true + } + } + return false +} + func TestURLPrefix(t *testing.T) { config := configuration.Configuration{ Storage: configuration.Storage{ @@ -90,18 +239,16 @@ func TestURLPrefix(t *testing.T) { "Content-Type": []string{"application/json; charset=utf-8"}, "Content-Length": []string{"2"}, }) - } -// TestLayerAPI conducts a full test of the of the layer api. -func TestLayerAPI(t *testing.T) { - // TODO(stevvooe): This test code is complete junk but it should cover the - // complete flow. This must be broken down and checked against the - // specification *before* we submit the final to docker core. - env := newTestEnv(t) +type blobArgs struct { + imageName string + layerFile io.ReadSeeker + layerDigest digest.Digest + tarSumStr string +} - imageName := "foo/bar" - // "build" our layer file +func makeBlobArgs(t *testing.T) blobArgs { layerFile, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) @@ -109,6 +256,66 @@ func TestLayerAPI(t *testing.T) { layerDigest := digest.Digest(tarSumStr) + args := blobArgs{ + imageName: "foo/bar", + layerFile: layerFile, + layerDigest: layerDigest, + tarSumStr: tarSumStr, + } + return args +} + +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeBlobArgs(t) + testBlobAPI(t, env, args) + + deleteEnabled = true + env = newTestEnv(t, deleteEnabled) + args = makeBlobArgs(t) + testBlobAPI(t, env, args) + +} + +func TestBlobDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + + args := makeBlobArgs(t) + env = testBlobAPI(t, env, args) + testBlobDelete(t, env, args) +} + +func TestBlobDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeBlobArgs(t) + + imageName := args.imageName + layerDigest := args.layerDigest + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting when disabled: %v", err) + } + + checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) +} + +func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + // ----------------------------------- // Test fetch for non-existent content layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) @@ -213,9 +420,17 @@ func TestLayerAPI(t *testing.T) { // Now, push just a chunk layerFile.Seek(0, 0) + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + layerFile.Seek(0, 0) uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) + // ------------------------ // Use a head request to see if the layer exists. resp, err = http.Head(layerURL) @@ -226,7 +441,7 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "checking head on existing layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, }) // ---------------- @@ -239,7 +454,7 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "fetching layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, }) // Verify the body @@ -263,15 +478,229 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + // Cache headers + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, + "Cache-Control": []string{"max-age=31536000"}, + }) + + // Matching etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Non-matching etag, gives 200 + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", "") + resp, err = http.DefaultClient.Do(req) + checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) + // Missing tests: // - Upload the same tarsum file under and different repository and // ensure the content remains uncorrupted. + return env } -func TestManifestAPI(t *testing.T) { - env := newTestEnv(t) +func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { + // Upload a layer + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf(err.Error()) + } + // --------------- + // Delete a layer + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Try and get it back + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) + + // Delete already deleted layer + resp, err = httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusNotFound) + + // ---------------- + // Attempt to delete a layer with an invalid digest + badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + resp, err = httpDelete(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) + + // ---------------- + // Reupload previously deleted blob + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + layerFile.Seek(0, os.SEEK_SET) + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + // ------------------------ + // Use a head request to see if it exists + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) +} + +func TestDeleteDisabled(t *testing.T) { + env := newTestEnv(t, false) imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + // defer resp.Body.Close() + return resp, err +} + +type manifestArgs struct { + imageName string + signedManifest *manifest.SignedManifest + dgst digest.Digest +} + +func makeManifestArgs(t *testing.T) manifestArgs { + args := manifestArgs{ + imageName: "foo/bar", + } + + return args +} + +func TestManifestAPI(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + testManifestAPI(t, env, args) + + deleteEnabled = true + env = newTestEnv(t, deleteEnabled) + args = makeManifestArgs(t) + testManifestAPI(t, env, args) +} + +func TestManifestDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + env, args = testManifestAPI(t, env, args) + testManifestDelete(t, env, args) +} + +func TestManifestDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + testManifestDeleteDisabled(t, env, args) +} + +func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) *testEnv { + imageName := args.imageName + manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + resp, err := httpDelete(manifestURL) + if err != nil { + t.Fatalf("unexpected error deleting manifest %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) + return nil +} + +func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, manifestArgs) { + imageName := args.imageName tag := "thetag" manifestURL, err := env.builder.BuildManifestURL(imageName, tag) @@ -329,7 +758,7 @@ func TestManifestAPI(t *testing.T) { _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) - expectedCounts := map[v2.ErrorCode]int{ + expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestUnverified: 1, v2.ErrorCodeBlobUnknown: 2, v2.ErrorCodeDigestInvalid: 2, @@ -374,11 +803,14 @@ func TestManifestAPI(t *testing.T) { dgst, err := digest.FromBytes(payload) checkErr(t, err, "digesting manifest") + args.signedManifest = signedManifest + args.dgst = dgst + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -387,7 +819,7 @@ func TestManifestAPI(t *testing.T) { // -------------------- // Push by digest -- should get same result resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -404,6 +836,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifest manifest.SignedManifest @@ -425,6 +858,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifestByDigest manifest.SignedManifest @@ -437,6 +871,33 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("manifests do not match") } + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + // Ensure that the tag is listed. resp, err = http.Get(tagsURL) if err != nil { @@ -465,6 +926,70 @@ func TestManifestAPI(t *testing.T) { if tagsResponse.Tags[0] != tag { t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) } + + return env, args +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + signedManifest := args.signedManifest + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + unknownManifestDigestURL, err := env.builder.BuildManifestURL(imageName, unknownDigest) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + } type testEnv struct { @@ -476,10 +1001,26 @@ type testEnv struct { builder *v2.URLBuilder } -func newTestEnv(t *testing.T) *testEnv { +func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + }, + Proxy: configuration.Proxy{ + RemoteURL: "http://example.com", + }, + } + + return newTestEnvWithConfig(t, &config) + +} + +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, }, } @@ -593,9 +1134,9 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, &digester)) + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) } @@ -658,9 +1199,9 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp uploadURL := u.String() - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() - req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester)) + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { t.Fatalf("unexpected error creating new request: %v", err) } @@ -704,18 +1245,18 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus // checkBodyHasErrorCodes ensures the body is an error body and has the // expected error codes, returning the error structure, the json slice and a // count of the errors by code. -func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...v2.ErrorCode) (v2.Errors, []byte, map[v2.ErrorCode]int) { +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { p, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("unexpected error reading body %s: %v", msg, err) } - var errs v2.Errors + var errs errcode.Errors if err := json.Unmarshal(p, &errs); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } - if len(errs.Errors) == 0 { + if len(errs) == 0 { t.Fatalf("expected errors in response") } @@ -726,8 +1267,8 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error // resp.Header.Get("Content-Type")) // } - expected := map[v2.ErrorCode]struct{}{} - counts := map[v2.ErrorCode]int{} + expected := map[errcode.ErrorCode]struct{}{} + counts := map[errcode.ErrorCode]int{} // Initialize map with zeros for expected for _, code := range errorCodes { @@ -735,11 +1276,15 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error counts[code] = 0 } - for _, err := range errs.Errors { - if _, ok := expected[err.Code]; !ok { - t.Fatalf("unexpected error code %v encountered during %s: %s ", err.Code, msg, string(p)) + for _, e := range errs { + err, ok := e.(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", e) + } + if _, ok := expected[err.ErrorCode()]; !ok { + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) } - counts[err.Code]++ + counts[err.ErrorCode()]++ } // Ensure that counts of expected errors were all non-zero @@ -772,14 +1317,14 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { for _, v := range vs { if v == "*" { // Just ensure there is some value. - if len(resp.Header[k]) > 0 { + if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { continue } } - for _, hv := range resp.Header[k] { + for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { if hv != v { - t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v) + t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) } } } @@ -791,3 +1336,110 @@ func checkErr(t *testing.T, err error, msg string) { t.Fatalf("unexpected error %s: %v", msg, err) } } + +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { + unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + signedManifest, err := manifest.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + payload, err := signedManifest.Payload() + checkErr(t, err, "getting manifest payload") + + dgst, err := digest.FromBytes(payload) + checkErr(t, err, "digesting manifest") + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) +} + +// Test mutation operations on a registry configured as a cache. Ensure that they return +// appropriate errors. +func TestRegistryAsCacheMutationAPIs(t *testing.T) { + deleteEnabled := true + env := newTestEnvMirror(t, deleteEnabled) + + imageName := "foo/bar" + tag := "latest" + manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + // Manifest upload + unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{}, + } + resp := putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Manifest Delete + resp, err = httpDelete(manifestURL) + checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob upload initialization + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob Delete + blobURL, err := env.builder.BuildBlobURL(imageName, digest.DigestSha256EmptyTar) + resp, err = httpDelete(blobURL) + checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go index 004d81fc61eb..787f08e0dd87 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go @@ -1,6 +1,7 @@ package handlers import ( + cryptorand "crypto/rand" "expvar" "fmt" "math/rand" @@ -9,16 +10,21 @@ import ( "os" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/health" "github.com/docker/distribution/notifications" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" @@ -27,6 +33,10 @@ import ( "golang.org/x/net/context" ) +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + // App is a global registry application object. Shared resources can be placed // on this object that will be accessible from all requests. Any writable // fields should be protected. @@ -47,6 +57,9 @@ type App struct { } redis *redis.Pool + + // true if this registry is configured as a pull through cache + isCache bool } // NewApp takes a configuration and returns a configured app, ready to serve @@ -57,6 +70,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App Config: configuration, Context: ctx, router: v2.RouterWithPrefix(configuration.HTTP.Prefix), + isCache: configuration.Proxy.RemoteURL != "", } app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) @@ -66,14 +80,14 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App return http.HandlerFunc(apiBase) }) app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, layerDispatcher) - app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) var err error app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - if err != nil { // TODO(stevvooe): Move the creation of a service into a protected // method, where this is created lazily. Its status can be queried via @@ -92,41 +106,76 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App } - startUploadPurger(app.driver, ctxu.GetLogger(app), purgeConfig) + startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) if err != nil { panic(err) } + app.configureSecret(&configuration) app.configureEvents(&configuration) app.configureRedis(&configuration) + app.configureLogHook(&configuration) + + // configure deletion + var deleteEnabled bool + if d, ok := configuration.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok = e.(bool); !ok { + deleteEnabled = false + } + } + } + + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := configuration.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } + } // configure storage caches if cc, ok := configuration.Storage["cache"]; ok { - switch cc["layerinfo"] { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { case "redis": if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) - ctxu.GetLogger(app).Infof("using redis layerinfo cache") + app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled, app.isCache, false) + ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) - ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") + app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled, app.isCache, false) + ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: - if cc["layerinfo"] != "" { - ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) + if v != "" { + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) } } } if app.registry == nil { // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.driver, nil) + app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled, app.isCache, false) } - app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) + app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) if err != nil { panic(err) } @@ -139,13 +188,34 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) } app.accessController = accessController + ctxu.GetLogger(app).Debugf("configured %q access controller", authType) + } + + // configure as a pull through cache + if configuration.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) } return app } -func (app *App) Registry() distribution.Namespace { - return app.registry +// RegisterHealthChecks is an awful hack to defer health check registration +// control to callers. This should only ever be called once per registry +// process, typically in a main function. The correct way would be register +// health checks outside of app, since multiple apps may exist in the same +// process. Because the configuration and app are tightly coupled, +// implementing this properly will require a refactor. This method may panic +// if called twice in the same process. +func (app *App) RegisterHealthChecks() { + health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), 10*time.Second, 3, func() error { + _, err := app.driver.List(app, "/") // "/" should always exist + return err // any error will be treated as failure + }) } type customAccessRecordsFunc func(*http.Request) []auth.Access @@ -169,6 +239,11 @@ func (app *App) register(routeName string, dispatch dispatchFunc) { app.RegisterRoute(app.router.GetRoute(routeName), dispatch, app.nameRequired, NoCustomAccessRecords) } +// Namespace returns a namespace instance representing application's registry storage. +func (app *App) Namespace() distribution.Namespace { + return app.registry +} + func (app *App) RegisterRoute(route *mux.Route, dispatch dispatchFunc, nameRequired nameRequiredFunc, accessRecords customAccessRecordsFunc) { // TODO(stevvooe): This odd dispatcher/route registration is by-product of // some limitations in the gorilla/mux router. We are using it to keep @@ -309,14 +384,62 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { })) } +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + entry, ok := ctxu.GetLogger(app).(*log.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. // Instantiate an http context here so we can track the error codes // returned by the request router. ctx := defaultContextManager.context(app, w, r) + defer func() { - ctxu.GetResponseLogger(ctx).Infof("response completed") + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + ctxu.GetResponseLogger(ctx).Infof("response completed") + } }() defer defaultContextManager.release(ctx) @@ -363,13 +486,14 @@ func (app *App) dispatcher(dispatch dispatchFunc, nameRequired nameRequiredFunc, switch err := err.(type) { case distribution.ErrRepositoryUnknown: - context.Errors.Push(v2.ErrorCodeNameUnknown, err) + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) case distribution.ErrRepositoryNameInvalid: - context.Errors.Push(v2.ErrorCodeNameInvalid, err) + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) } - w.WriteHeader(http.StatusBadRequest) - serveJSON(w, context.Errors) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } return } @@ -378,44 +502,57 @@ func (app *App) dispatcher(dispatch dispatchFunc, nameRequired nameRequiredFunc, repository, app.eventBridge(context, r)) - context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) + context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - serveJSON(w, context.Errors) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } return } } dispatch(context, r).ServeHTTP(w, r) - // Automated error response handling here. Handlers may return their // own errors if they need different behavior (such as range errors // for layer upload). if context.Errors.Len() > 0 { - if context.Value("http.response.status") == 0 { - // TODO(stevvooe): Getting this value from the context is a - // bit of a hack. We can further address with some of our - // future refactoring. - w.WriteHeader(http.StatusBadRequest) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } + app.logError(context, context.Errors) - serveJSON(w, context.Errors) } }) } -func (app *App) logError(context context.Context, errors v2.Errors) { - for _, e := range errors.Errors { - c := ctxu.WithValue(context, "err.code", e.Code) - c = ctxu.WithValue(c, "err.message", e.Message) - c = ctxu.WithValue(c, "err.detail", e.Detail) +func (app *App) logError(context context.Context, errors errcode.Errors) { + for _, e1 := range errors { + var c ctxu.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = ctxu.WithValue(context, "err.code", e.Code) + c = ctxu.WithValue(c, "err.message", e.Code.Message()) + c = ctxu.WithValue(c, "err.detail", e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = ctxu.WithValue(context, "err.code", e) + c = ctxu.WithValue(c, "err.message", e.Message()) + default: + // just normal go 'error' + c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, "err.message", e1.Error()) + } + c = ctxu.WithLogger(c, ctxu.GetLogger(c, "err.code", "err.message", "err.detail")) - ctxu.GetLogger(c).Errorf("An error occured") + ctxu.GetResponseLogger(c).Errorf("response completed with error") } } @@ -466,26 +603,24 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // base route is accessed. This section prevents us from making // that mistake elsewhere in the code, allowing any operation to // proceed. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusForbidden) - - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized) - serveJSON(w, errs) + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } return fmt.Errorf("forbidden: no repository name") } + accessRecords = appendCatalogAccessRecord(accessRecords, r) } ctx, err := app.accessController.Authorized(context.Context, accessRecords...) if err != nil { switch err := err.(type) { case auth.Challenge: - w.Header().Set("Content-Type", "application/json; charset=utf-8") - err.ServeHTTP(w, r) + // Add the appropriate WWW-Auth header + err.SetHeaders(w) - var errs v2.Errors - errs.Push(v2.ErrorCodeUnauthorized, accessRecords) - serveJSON(w, errs) + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } default: // This condition is a potential security problem either in // the configuration or whatever is backing the access @@ -521,7 +656,8 @@ type nameRequiredFunc func(*http.Request) bool // nameRequired returns true if the route requires a name. func (app *App) nameRequired(r *http.Request) bool { route := mux.CurrentRoute(r) - return route == nil || route.GetName() != v2.RouteNameBase + routeName := route.GetName() + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) } // apiBase implements a simple yes-man for doing overall checks against the @@ -571,10 +707,30 @@ func appendAccessRecords(records []auth.Access, method string, repo string) []au return records } +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + // applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) if err != nil { return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) } @@ -585,9 +741,9 @@ func applyRegistryMiddleware(registry distribution.Namespace, middlewares []conf } // applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, repository) + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) if err != nil { return nil, err } @@ -626,7 +782,7 @@ func badPurgeUploadConfig(reason string) { // startUploadPurger schedules a goroutine which will periodically // check upload directories for old files and delete them -func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { if config["enabled"] == false { return } @@ -681,7 +837,7 @@ func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logge time.Sleep(jitter) for { - storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) log.Infof("Starting upload purge in %s", intervalDuration) time.Sleep(intervalDuration) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go index d0b9174d47ac..bba7539c5f6b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go @@ -9,11 +9,12 @@ import ( "testing" "github.com/docker/distribution/configuration" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "golang.org/x/net/context" ) @@ -24,12 +25,13 @@ import ( // tested individually. func TestAppDispatcher(t *testing.T) { driver := inmemory.New() + ctx := context.Background() app := &App{ Config: configuration.Configuration{}, - Context: context.Background(), + Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()), + registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false), } server := httptest.NewServer(app) router := v2.Router() @@ -193,14 +195,18 @@ func TestNewApp(t *testing.T) { t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) } - var errs v2.Errors + var errs errcode.Errors dec := json.NewDecoder(req.Body) if err := dec.Decode(&errs); err != nil { t.Fatalf("error decoding error response: %v", err) } - if errs.Errors[0].Code != v2.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) + err2, ok := errs[0].(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", errs[0]) + } + if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go new file mode 100644 index 000000000000..4a923aa51e77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blob.go @@ -0,0 +1,94 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + "DELETE": http.HandlerFunc(blobHandler.DeleteBlob), + } +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) + } else { + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrUnsupported: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) + return + case distribution.ErrBlobUnknown: + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + return + default: + bh.Errors = append(bh.Errors, err) + context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) + return + } + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go new file mode 100644 index 000000000000..bbb70b59d712 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go @@ -0,0 +1,334 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/url" + "os" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := http.Handler(handlers.MethodHandler{ + "POST": http.HandlerFunc(buh.StartBlobUpload), + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + "PATCH": http.HandlerFunc(buh.PatchBlobData), + "PUT": http.HandlerFunc(buh.PutBlobUploadComplete), + "DELETE": http.HandlerFunc(buh.CancelBlobUpload), + }) + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + }) + } + buh.Upload = upload + + if state.Offset > 0 { + // Seek the blob upload to the correct spot if it's non-zero. + // These error conditions should be rare and demonstrate really + // problems. We basically cancel the upload and tell the client to + // start over. + if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } else if nn != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } + } + + handler = closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh) + + if err != nil { + if err == distribution.ErrUnsupported { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + buh.Upload = upload + defer buh.Upload.Close() + + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) + return + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) + return + } + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the length and mediatype. For now, we can let the + // backend take care of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + default: + switch err { + case distribution.ErrUnsupported: + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + default: + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + + // Build our canonical blob url + blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) + if err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + + var offset int64 + if !fresh { + var err error + offset, err = buh.Upload.Seek(0, os.SEEK_CUR) + if err != nil { + ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) + return err + } + } + + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Name() + buh.State.UUID = buh.Upload.ID() + buh.State.Offset = offset + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Name(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := offset + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go new file mode 100644 index 000000000000..6ec1fe55018b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/catalog.go @@ -0,0 +1,95 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + if err == io.EOF { + moreEntries = false + } else if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos[0:filled], + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go index 0df5534682f8..85a171237595 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "golang.org/x/net/context" ) @@ -27,7 +28,7 @@ type Context struct { // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the // handler *must not* start the response via http.ResponseWriter. - Errors v2.Errors + Errors errcode.Errors urlBuilder *v2.URLBuilder diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go index f2879137be67..a4f3abcc0b14 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go @@ -1,24 +1,13 @@ package handlers import ( - "encoding/json" + "errors" "io" "net/http" -) - -// serveJSON marshals v and sets the content-type header to -// 'application/json'. If a different status code is required, call -// ResponseWriter.WriteHeader before this function. -func serveJSON(w http.ResponseWriter, v interface{}) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - enc := json.NewEncoder(w) - - if err := enc.Encode(v); err != nil { - return err - } - return nil -} + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) // closeResources closes all the provided resources after running the target // handler. @@ -30,3 +19,44 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { handler.ServeHTTP(w, r) }) } + +// copyFullPayload copies the payload of a HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + ctxu.GetLogger(context).Warn("the ResponseWriter does not implement CloseNotifier") + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + + ctxu.GetLogger(context).Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go index e17ececa2878..1725d240b2bd 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac.go @@ -9,9 +9,9 @@ import ( "time" ) -// layerUploadState captures the state serializable state of the layer upload. -type layerUploadState struct { - // name is the primary repository under which the layer will be linked. +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. Name string // UUID identifies the upload. @@ -26,10 +26,10 @@ type layerUploadState struct { type hmacKey string -// unpackUploadState unpacks and validates the layer upload state from the +// unpackUploadState unpacks and validates the blob upload state from the // token, using the hmacKey secret. -func (secret hmacKey) unpackUploadState(token string) (layerUploadState, error) { - var state layerUploadState +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState tokenBytes, err := base64.URLEncoding.DecodeString(token) if err != nil { @@ -59,7 +59,7 @@ func (secret hmacKey) unpackUploadState(token string) (layerUploadState, error) // packUploadState packs the upload state signed with and hmac digest using // the hmacKey secret, encoding to url safe base64. The resulting token can be // used to share data with minimized risk of external tampering. -func (secret hmacKey) packUploadState(lus layerUploadState) (string, error) { +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { mac := hmac.New(sha256.New, []byte(secret)) p, err := json.Marshal(lus) if err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go index cce2cd492e3e..366c7279e771 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hmac_test.go @@ -2,7 +2,7 @@ package handlers import "testing" -var layerUploadStates = []layerUploadState{ +var blobUploadStates = []blobUploadState{ { Name: "hello", UUID: "abcd-1234-qwer-0987", @@ -45,7 +45,7 @@ var secrets = []string{ func TestLayerUploadTokens(t *testing.T) { secret := hmacKey("supersecret") - for _, testcase := range layerUploadStates { + for _, testcase := range blobUploadStates { token, err := secret.packUploadState(testcase) if err != nil { t.Fatal(err) @@ -56,7 +56,7 @@ func TestLayerUploadTokens(t *testing.T) { t.Fatal(err) } - assertLayerUploadStateEquals(t, testcase, lus) + assertBlobUploadStateEquals(t, testcase, lus) } } @@ -68,7 +68,7 @@ func TestHMACValidation(t *testing.T) { secret2 := hmacKey(secret) badSecret := hmacKey("DifferentSecret") - for _, testcase := range layerUploadStates { + for _, testcase := range blobUploadStates { token, err := secret1.packUploadState(testcase) if err != nil { t.Fatal(err) @@ -79,7 +79,7 @@ func TestHMACValidation(t *testing.T) { t.Fatal(err) } - assertLayerUploadStateEquals(t, testcase, lus) + assertBlobUploadStateEquals(t, testcase, lus) _, err = badSecret.unpackUploadState(token) if err == nil { @@ -104,7 +104,7 @@ func TestHMACValidation(t *testing.T) { } } -func assertLayerUploadStateEquals(t *testing.T, expected layerUploadState, received layerUploadState) { +func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { if expected.Name != received.Name { t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go new file mode 100644 index 000000000000..7bbab4f8a39c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go index ed2159726912..f4f0db8906e4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go @@ -1,6 +1,7 @@ package handlers import ( + "bytes" "encoding/json" "fmt" "net/http" @@ -10,6 +11,7 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" "golang.org/x/net/context" @@ -49,22 +51,25 @@ type imageManifestHandler struct { // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests := imh.Repository.Manifests() - - var ( - sm *manifest.SignedManifest - err error - ) + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + var sm *manifest.SignedManifest if imh.Tag != "" { - sm, err = manifests.GetByTag(imh.Context, imh.Tag) + sm, err = manifests.GetByTag(imh.Tag) } else { - sm, err = manifests.Get(imh.Context, imh.Digest) + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + sm, err = manifests.Get(imh.Digest) } if err != nil { - imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) - w.WriteHeader(http.StatusNotFound) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } @@ -72,8 +77,11 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http if imh.Digest == "" { dgst, err := digestManifest(imh, sm) if err != nil { - imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - w.WriteHeader(http.StatusBadRequest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + return + } + if etagMatch(r, dgst.String()) { + w.WriteHeader(http.StatusNotModified) return } @@ -83,26 +91,43 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) w.Write(sm.Raw) } +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted + return true + } + } + return false +} + // PutImageManifest validates and stores and image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") - manifests := imh.Repository.Manifests() - dec := json.NewDecoder(r.Body) + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var jsonBuf bytes.Buffer + if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } var manifest manifest.SignedManifest - if err := dec.Decode(&manifest); err != nil { - imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) - w.WriteHeader(http.StatusBadRequest) + if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } dgst, err := digestManifest(imh, &manifest) if err != nil { - imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - w.WriteHeader(http.StatusBadRequest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) return } @@ -110,8 +135,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http if imh.Tag != "" { if manifest.Tag != imh.Tag { ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors.Push(v2.ErrorCodeTagInvalid) - w.WriteHeader(http.StatusBadRequest) + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid) return } @@ -119,42 +143,41 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http } else if imh.Digest != "" { if dgst != imh.Digest { ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) - imh.Errors.Push(v2.ErrorCodeDigestInvalid) - w.WriteHeader(http.StatusBadRequest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return } } else { - imh.Errors.Push(v2.ErrorCodeTagInvalid, "no tag or digest specified") - w.WriteHeader(http.StatusBadRequest) + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) return } - if err := manifests.Put(imh.Context, &manifest); err != nil { + if err := manifests.Put(&manifest); err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. + if err == distribution.ErrUnsupported { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + } switch err := err.(type) { case distribution.ErrManifestVerification: for _, verificationError := range err { switch verificationError := verificationError.(type) { - case distribution.ErrUnknownLayer: - imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) + case distribution.ErrManifestBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(verificationError.Digest)) case distribution.ErrManifestUnverified: - imh.Errors.Push(v2.ErrorCodeManifestUnverified) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { - // TODO(stevvooe): We need to really need to move all - // errors to types. Its much more straightforward. - imh.Errors.Push(v2.ErrorCodeDigestInvalid) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) } else { - imh.Errors.PushErr(verificationError) + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) } } } default: - imh.Errors.PushErr(err) + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } - w.WriteHeader(http.StatusBadRequest) return } @@ -169,20 +192,39 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http w.Header().Set("Location", location) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.WriteHeader(http.StatusAccepted) + w.WriteHeader(http.StatusCreated) } -// DeleteImageManifest removes the image with the given tag from the registry. +// DeleteImageManifest removes the manifest with the given digest from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("DeleteImageManifest") - // TODO(stevvooe): Unfortunately, at this point, manifest deletes are - // unsupported. There are issues with schema version 1 that make removing - // tag index entries a serious problem in eventually consistent storage. - // Once we work out schema version 2, the full deletion system will be - // worked out and we can add support back. - imh.Errors.Push(v2.ErrorCodeUnsupported) - w.WriteHeader(http.StatusBadRequest) + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + return + } + } + + w.WriteHeader(http.StatusAccepted) } // digestManifest takes a digest of the given manifest. This belongs somewhere diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layer.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layer.go deleted file mode 100644 index b8230135ab53..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layer.go +++ /dev/null @@ -1,74 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// layerDispatcher uses the request context to build a layerHandler. -func layerDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := getDigest(ctx) - if err != nil { - - if err == errDigestNotAvailable { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) - }) - } - - layerHandler := &layerHandler{ - Context: ctx, - Digest: dgst, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(layerHandler.GetLayer), - "HEAD": http.HandlerFunc(layerHandler.GetLayer), - } -} - -// layerHandler serves http layer requests. -type layerHandler struct { - *Context - - Digest digest.Digest -} - -// GetLayer fetches the binary data from backend storage returns it in the -// response. -func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(lh).Debug("GetImageLayer") - layers := lh.Repository.Layers() - layer, err := layers.Fetch(lh.Digest) - - if err != nil { - switch err := err.(type) { - case distribution.ErrUnknownLayer: - w.WriteHeader(http.StatusNotFound) - lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) - default: - lh.Errors.Push(v2.ErrorCodeUnknown, err) - } - return - } - - handler, err := layer.Handler(r) - if err != nil { - ctxu.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err) - lh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - handler.ServeHTTP(w, r) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layerupload.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layerupload.go deleted file mode 100644 index 1591d98dcc12..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/layerupload.go +++ /dev/null @@ -1,344 +0,0 @@ -package handlers - -import ( - "fmt" - "io" - "net/http" - "net/url" - "os" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// layerUploadDispatcher constructs and returns the layer upload handler for -// the given request context. -func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - luh := &layerUploadHandler{ - Context: ctx, - UUID: getUploadUUID(ctx), - } - - handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(luh.StartLayerUpload), - "GET": http.HandlerFunc(luh.GetUploadStatus), - "HEAD": http.HandlerFunc(luh.GetUploadStatus), - "PATCH": http.HandlerFunc(luh.PatchLayerData), - "PUT": http.HandlerFunc(luh.PutLayerUploadComplete), - "DELETE": http.HandlerFunc(luh.CancelLayerUpload), - }) - - if luh.UUID != "" { - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) - if err != nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - luh.State = state - - if state.Name != ctx.Repository.Name() { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, luh.Repository.Name()) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - - if state.UUID != luh.UUID { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - - layers := ctx.Repository.Layers() - upload, err := layers.Resume(luh.UUID) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == distribution.ErrLayerUploadUnknown { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - }) - } - luh.Upload = upload - - if state.Offset > 0 { - // Seek the layer upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(luh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking layer upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - upload.Cancel() - }) - } else if nn != luh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - upload.Cancel() - }) - } - } - - handler = closeResources(handler, luh.Upload) - } - - return handler -} - -// layerUploadHandler handles the http layer upload process. -type layerUploadHandler struct { - *Context - - // UUID identifies the upload instance for the current request. - UUID string - - Upload distribution.LayerUpload - - State layerUploadState -} - -// StartLayerUpload begins the layer upload process and allocates a server- -// side upload session. -func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { - layers := luh.Repository.Layers() - upload, err := layers.Upload() - if err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - luh.Upload = upload - defer luh.Upload.Close() - - if err := luh.layerUploadResponse(w, r, true); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.Upload.UUID()) - w.WriteHeader(http.StatusAccepted) -} - -// GetUploadStatus returns the status of a given upload, identified by uuid. -func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - // TODO(dmcgowan): Set last argument to false in layerUploadResponse when - // resumable upload is supported. This will enable returning a non-zero - // range for clients to begin uploading at an offset. - if err := luh.layerUploadResponse(w, r, true); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - w.WriteHeader(http.StatusNoContent) -} - -// PatchLayerData writes data to an upload. -func (luh *layerUploadHandler) PatchLayerData(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - ct := r.Header.Get("Content-Type") - if ct != "" && ct != "application/octet-stream" { - w.WriteHeader(http.StatusBadRequest) - // TODO(dmcgowan): encode error - return - } - - // TODO(dmcgowan): support Content-Range header to seek and write range - - // Copy the data - if _, err := io.Copy(luh.Upload, r.Body); err != nil { - ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - if err := luh.layerUploadResponse(w, r, false); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.WriteHeader(http.StatusAccepted) -} - -// PutLayerUploadComplete takes the final request of a layer upload. The -// request may include all the layer data or no layer data. Any data -// provided is received and verified. If successful, the layer is linked -// into the blob store and 201 Created is returned with the canonical -// url of the layer. -func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - // no digest? return error, but allow retry. - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") - return - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - // no digest? return error, but allow retry. - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") - return - } - - // TODO(stevvooe): Consider checking the error on this copy. - // Theoretically, problems should be detected during verification but we - // may miss a root cause. - - // Read in the data, if any. - if _, err := io.Copy(luh.Upload, r.Body); err != nil { - ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - layer, err := luh.Upload.Finish(dgst) - if err != nil { - switch err := err.(type) { - case distribution.ErrLayerInvalidDigest: - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - default: - ctxu.GetLogger(luh).Errorf("unknown error completing upload: %#v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - } - - // Clean up the backend layer data if there was an error. - if err := luh.Upload.Cancel(); err != nil { - // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(luh).Errorf("error canceling upload after error: %v", err) - } - - return - } - - // Build our canonical layer url - layerURL, err := luh.urlBuilder.BuildBlobURL(luh.Repository.Name(), layer.Digest()) - if err != nil { - luh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - w.Header().Set("Location", layerURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", layer.Digest().String()) - w.WriteHeader(http.StatusCreated) -} - -// CancelLayerUpload cancels an in-progress upload of a layer. -func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - if err := luh.Upload.Cancel(); err != nil { - ctxu.GetLogger(luh).Errorf("error encountered canceling upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.PushErr(err) - } - - w.WriteHeader(http.StatusNoContent) -} - -// layerUploadResponse provides a standard request for uploading layers and -// chunk responses. This sets the correct headers but the response status is -// left to the caller. The fresh argument is used to ensure that new layer -// uploads always start at a 0 offset. This allows disabling resumable push -// by always returning a 0 offset on check status. -func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - - var offset int64 - if !fresh { - var err error - offset, err = luh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err) - return err - } - } - - // TODO(stevvooe): Need a better way to manage the upload state automatically. - luh.State.Name = luh.Repository.Name() - luh.State.UUID = luh.Upload.UUID() - luh.State.Offset = offset - luh.State.StartedAt = luh.Upload.StartedAt() - - token, err := hmacKey(luh.Config.HTTP.Secret).packUploadState(luh.State) - if err != nil { - ctxu.GetLogger(luh).Infof("error building upload state token: %s", err) - return err - } - - uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL( - luh.Repository.Name(), luh.Upload.UUID(), - url.Values{ - "_state": []string{token}, - }) - if err != nil { - ctxu.GetLogger(luh).Infof("error building upload url: %s", err) - return err - } - - endRange := offset - if endRange > 0 { - endRange = endRange - 1 - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - w.Header().Set("Location", uploadURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go new file mode 100644 index 000000000000..39244909de4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go index f2a94e0aeaa2..547255857cc3 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) @@ -33,16 +34,19 @@ type tagsAPIResponse struct { // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - manifests := th.Repository.Manifests() + manifests, err := th.Repository.Manifests(th) + if err != nil { + th.Errors = append(th.Errors, err) + return + } - tags, err := manifests.Tags(th.Context) + tags, err := manifests.Tags() if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - w.WriteHeader(404) - th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()}) + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) default: - th.Errors.PushErr(err) + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } @@ -54,7 +58,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { Name: th.Repository.Name(), Tags: tags, }); err != nil { - th.Errors.PushErr(err) + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go new file mode 100644 index 000000000000..b93a7a63ffcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go index 048603b877e2..7535c6db5ee4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -4,11 +4,12 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/context" ) // InitFunc is the type of a RegistryMiddleware factory function and is // used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc @@ -28,10 +29,10 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { - return initFunc(registry, options) + return initFunc(ctx, registry, options) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go index d6330fc400a5..27b42aecfcb4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/repository/middleware.go @@ -4,11 +4,12 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/context" ) // InitFunc is the type of a RepositoryMiddleware factory function and is // used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) var middlewares map[string]InitFunc @@ -28,10 +29,10 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { - return initFunc(repository, options) + return initFunc(ctx, repository, options) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go new file mode 100644 index 000000000000..e4bec75a5edc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -0,0 +1,54 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution/registry/client/auth" +) + +const tokenURL = "https://auth.docker.io/token" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +// ConfigureAuth authorizes with the upstream registry +func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { + if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { + return nil, err + } + + creds := map[string]userpass{ + tokenURL: { + username: username, + password: password, + }, + } + return credentials{creds: creds}, nil +} + +func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go new file mode 100644 index 000000000000..c3df333187f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -0,0 +1,218 @@ +package proxy + +import ( + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.BlobStore = proxyBlobStore{} + +type inflightBlob struct { + refCount int + bw distribution.BlobWriter +} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]*inflightBlob) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil && err != distribution.ErrBlobUnknown { + return err + } + + if err == nil { + proxyMetrics.BlobPush(uint64(desc.Size)) + return pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + desc, err = pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return err + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return err + } + + bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) + if err != nil { + return err + } + defer cleanup() + + if isNew { + go func() { + err := streamToStorage(ctx, remoteReader, desc, bw) + if err != nil { + context.GetLogger(ctx).Error(err) + } + + proxyMetrics.BlobPull(uint64(desc.Size)) + }() + err := streamToClient(ctx, w, desc, bw) + if err != nil { + return err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + pbs.scheduler.AddBlob(dgst.String(), blobTTL) + return nil + } + + err = streamToClient(ctx, w, desc, bw) + if err != nil { + return err + } + proxyMetrics.BlobPush(uint64(desc.Size)) + return nil +} + +type cleanupFunc func() + +// getOrCreateBlobWriter will track which blobs are currently being downloaded and enable client requesting +// the same blob concurrently to read from the existing stream. +func getOrCreateBlobWriter(ctx context.Context, blobs distribution.BlobService, desc distribution.Descriptor) (distribution.BlobWriter, bool, cleanupFunc, error) { + mu.Lock() + defer mu.Unlock() + dgst := desc.Digest + + cleanup := func() { + mu.Lock() + defer mu.Unlock() + inflight[dgst].refCount-- + + if inflight[dgst].refCount == 0 { + defer delete(inflight, dgst) + _, err := inflight[dgst].bw.Commit(ctx, desc) + if err != nil { + // There is a narrow race here where Commit can be called while this blob's TTL is expiring + // and its being removed from storage. In that case, the client stream will continue + // uninterruped and the blob will be pulled through on the next request, so just log it + context.GetLogger(ctx).Errorf("Error committing blob: %q", err) + } + + } + } + + var bw distribution.BlobWriter + _, ok := inflight[dgst] + if ok { + bw = inflight[dgst].bw + inflight[dgst].refCount++ + return bw, false, cleanup, nil + } + + var err error + bw, err = blobs.Create(ctx) + if err != nil { + return nil, false, nil, err + } + + inflight[dgst] = &inflightBlob{refCount: 1, bw: bw} + return bw, true, cleanup, nil +} + +func streamToStorage(ctx context.Context, remoteReader distribution.ReadSeekCloser, desc distribution.Descriptor, bw distribution.BlobWriter) error { + _, err := io.CopyN(bw, remoteReader, desc.Size) + if err != nil { + return err + } + + return nil +} + +func streamToClient(ctx context.Context, w http.ResponseWriter, desc distribution.Descriptor, bw distribution.BlobWriter) error { + setResponseHeaders(w, desc.Size, desc.MediaType, desc.Digest) + + reader, err := bw.Reader() + if err != nil { + return err + } + defer reader.Close() + teeReader := io.TeeReader(reader, w) + buf := make([]byte, 32768, 32786) + var soFar int64 + for { + rd, err := teeReader.Read(buf) + if err == nil || err == io.EOF { + soFar += int64(rd) + if soFar < desc.Size { + // buffer underflow, keep trying + continue + } + return nil + } + return err + } +} + +func (pbs proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +// Unsupported functions +func (pbs proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Enumerate(ctx context.Context, digests []digest.Digest, last string) (n int, err error) { + return 0, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go new file mode 100644 index 000000000000..c7877dc3d1d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go @@ -0,0 +1,236 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type statsBlobStore struct { + stats map[string]int + blobs distribution.BlobStore +} + +func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbs.stats["put"]++ + return sbs.blobs.Put(ctx, mediaType, p) +} + +func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbs.stats["get"]++ + return sbs.blobs.Get(ctx, dgst) +} + +func (sbs statsBlobStore) Enumerate(ctx context.Context, digests []digest.Digest, last string) (n int, err error) { + sbs.stats["enumerate"]++ + return sbs.blobs.Enumerate(ctx, digests, last) +} + +func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + sbs.stats["create"]++ + return sbs.blobs.Create(ctx) +} + +func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbs.stats["resume"]++ + return sbs.blobs.Resume(ctx, id) +} + +func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbs.stats["open"]++ + return sbs.blobs.Open(ctx, dgst) +} + +func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbs.stats["serveblob"]++ + return sbs.blobs.ServeBlob(ctx, w, r, dgst) +} + +func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + sbs.stats["stat"]++ + return sbs.blobs.Stat(ctx, dgst) +} + +func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbs.stats["delete"]++ + return sbs.blobs.Delete(ctx, dgst) +} + +type testEnv struct { + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context +} + +func (te testEnv) LocalStats() *map[string]int { + ls := te.store.localStore.(statsBlobStore).stats + return &ls +} + +func (te testEnv) RemoteStats() *map[string]int { + rs := te.store.remoteStore.(statsBlobStore).stats + return &rs +} + +// Populate remote store and record the digests +func makeTestEnv(t *testing.T, name string) testEnv { + ctx := context.Background() + + localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true, false) + localRepo, err := localRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false, false) + truthRepo, err := truthRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: truthRepo.Blobs(ctx), + } + + localBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: localRepo.Blobs(ctx), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + + proxyBlobStore := proxyBlobStore{ + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + } + + te := testEnv{ + store: proxyBlobStore, + ctx: ctx, + } + return te +} + +func populate(t *testing.T, te *testEnv, blobCount int) { + var inRemote []distribution.Descriptor + for i := 0; i < blobCount; i++ { + bytes := []byte(fmt.Sprintf("blob%d", i)) + + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Errorf("Put in store") + } + inRemote = append(inRemote, desc) + } + + te.inRemote = inRemote + +} + +func TestProxyStoreStat(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 + populate(t, &te, remoteBlobCount) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Stat - touches both stores + for _, d := range te.inRemote { + _, err := te.store.Stat(te.ctx, d.Digest) + if err != nil { + t.Fatalf("Error stating proxy store") + } + } + + if (*localStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected local stat count") + } + + if (*remoteStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected remote stat count") + } +} + +func TestProxyStoreServe(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 + populate(t, &te, remoteBlobCount) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Serveblob - pulls through blobs + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl, err := digest.FromBytes(w.Body.Bytes()) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + if (*localStats)["stat"] != remoteBlobCount && (*localStats)["create"] != remoteBlobCount { + t.Fatalf("unexpected local stats") + } + if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { + t.Fatalf("unexpected local stats") + } + + // Serveblob - blobs come from local + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl, err := digest.FromBytes(w.Body.Bytes()) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + // Stat to find local, but no new blobs were created + if (*localStats)["stat"] != remoteBlobCount*2 && (*localStats)["create"] != remoteBlobCount*2 { + t.Fatalf("unexpected local stats") + } + + // Remote unchanged + if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { + fmt.Printf("\tlocal=%#v, \n\tremote=%#v\n", localStats, remoteStats) + t.Fatalf("unexpected local stats") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go new file mode 100644 index 000000000000..5e458cb067a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -0,0 +1,158 @@ +package proxy + +import ( + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName string + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.Exists(dgst) +} + +func (pms proxyManifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm, err := pms.localManifests.Get(dgst) + if err == nil { + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + return sm, err + } + + sm, err = pms.remoteManifests.Get(dgst) + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPull(uint64(len(sm.Raw))) + err = pms.localManifests.Put(sm) + if err != nil { + return nil, err + } + + // Schedule the repo for removal + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + // Ensure the manifest blob is cleaned up + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + + return sm, err +} + +func (pms proxyManifestStore) Tags() ([]string, error) { + return pms.localManifests.Tags() +} + +func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { + exists, err := pms.localManifests.ExistsByTag(tag) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.ExistsByTag(tag) +} + +func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + var localDigest digest.Digest + + localManifest, err := pms.localManifests.GetByTag(tag, options...) + switch err.(type) { + case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision: + goto fromremote + case nil: + break + default: + return nil, err + } + + localDigest, err = manifestDigest(localManifest) + if err != nil { + return nil, err + } + +fromremote: + var sm *manifest.SignedManifest + sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) + if err != nil { + return nil, err + } + + if sm == nil { + context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) + return localManifest, nil + } + context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String()) + + err = pms.localManifests.Put(sm) + if err != nil { + return nil, err + } + + dgst, err := manifestDigest(sm) + if err != nil { + return nil, err + } + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + proxyMetrics.ManifestPull(uint64(len(sm.Raw))) + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + + return sm, err +} + +func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { + payload, err := sm.Payload() + if err != nil { + return "", err + + } + + dgst, err := digest.FromBytes(payload) + if err != nil { + return "", err + } + + return dgst, nil +} + +func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { + return distribution.ErrUnsupported +} + +func (pms proxyManifestStore) Delete(dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (pms proxyManifestStore) Enumerate() ([]digest.Digest, error) { + return nil, distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go new file mode 100644 index 000000000000..cb64e066285a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go @@ -0,0 +1,240 @@ +package proxy + +import ( + "io" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type statsManifest struct { + manifests distribution.ManifestService + stats map[string]int +} + +type manifestStoreTestEnv struct { + manifestDigest digest.Digest // digest of the signed manifest in the local storage + manifests proxyManifestStore +} + +func (te manifestStoreTestEnv) LocalStats() *map[string]int { + ls := te.manifests.localManifests.(statsManifest).stats + return &ls +} + +func (te manifestStoreTestEnv) RemoteStats() *map[string]int { + rs := te.manifests.remoteManifests.(statsManifest).stats + return &rs +} + +func (sm statsManifest) Delete(dgst digest.Digest) error { + sm.stats["delete"]++ + return sm.manifests.Delete(dgst) +} + +func (sm statsManifest) Exists(dgst digest.Digest) (bool, error) { + sm.stats["exists"]++ + return sm.manifests.Exists(dgst) +} + +func (sm statsManifest) ExistsByTag(tag string) (bool, error) { + sm.stats["existbytag"]++ + return sm.manifests.ExistsByTag(tag) +} + +func (sm statsManifest) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm.stats["get"]++ + return sm.manifests.Get(dgst) +} + +func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + sm.stats["getbytag"]++ + return sm.manifests.GetByTag(tag, options...) +} + +func (sm statsManifest) Enumerate() ([]digest.Digest, error) { + sm.stats["enumerate"]++ + return sm.manifests.Enumerate() +} + +func (sm statsManifest) Put(manifest *manifest.SignedManifest) error { + sm.stats["put"]++ + return sm.manifests.Put(manifest) +} + +func (sm statsManifest) Tags() ([]string, error) { + sm.stats["tags"]++ + return sm.manifests.Tags() +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + ctx := context.Background() + truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false, false) + truthRepo, err := truthRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + tr, err := truthRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + truthManifests := statsManifest{ + manifests: tr, + stats: make(map[string]int), + } + + manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) + if err != nil { + t.Fatalf(err.Error()) + } + + localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true, false) + localRepo, err := localRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + lr, err := localRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + localManifests := statsManifest{ + manifests: lr, + stats: make(map[string]int), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + return &manifestStoreTestEnv{ + manifestDigest: manifestDigest, + manifests: proxyManifestStore{ + ctx: ctx, + localManifests: localManifests, + remoteManifests: truthManifests, + scheduler: s, + }, + } +} + +func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + for i := 0; i < 2; i++ { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + rs, ts, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ts) + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + ms, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + ms.Put(sm) + if err != nil { + t.Fatalf("unexpected errors putting manifest: %v", err) + } + pl, err := sm.Payload() + if err != nil { + t.Fatal(err) + } + return digest.FromBytes(pl) +} + +// TestProxyManifests contains basic acceptance tests +// for the pull-through behavior +func TestProxyManifests(t *testing.T) { + name := "foo/bar" + env := newManifestStoreTestEnv(t, name, "latest") + + localStats := env.LocalStats() + remoteStats := env.RemoteStats() + + // Stat - must check local and remote + exists, err := env.manifests.ExistsByTag("latest") + if err != nil { + t.Fatalf("Error checking existance") + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["existbytag"] != 1 && (*remoteStats)["existbytag"] != 1 { + t.Errorf("Unexpected exists count") + } + + // Get - should succeed and pull manifest into local + _, err = env.manifests.Get(env.manifestDigest) + if err != nil { + t.Fatal(err) + } + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected get count") + } + + if (*localStats)["put"] != 1 { + t.Errorf("Expected local put") + } + + // Stat - should only go to local + exists, err = env.manifests.ExistsByTag("latest") + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["existbytag"] != 2 && (*remoteStats)["existbytag"] != 1 { + t.Errorf("Unexpected exists count") + + } + + // Get - should get from remote, to test freshness + _, err = env.manifests.Get(env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if (*remoteStats)["get"] != 2 && (*remoteStats)["existsbytag"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected get count") + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go new file mode 100644 index 000000000000..d3d84d786365 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go new file mode 100644 index 000000000000..44849a2b7557 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -0,0 +1,144 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + + scheduler *scheduler.TTLExpirationScheduler + + remoteURL string + credentialStore auth.CredentialStore + challengeManager auth.ChallengeManager +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + _, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(digest string) error { + return v.RemoveBlob(digest) + }) + s.OnManifestExpire(func(repoName string) error { + return v.RemoveRepository(repoName) + }) + err = s.Start() + if err != nil { + return nil, err + } + + challengeManager := auth.NewSimpleChallengeManager() + cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + challengeManager: challengeManager, + credentialStore: cs, + remoteURL: config.RemoteURL, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + }, + manifests: proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + }, + name: name, + signatures: localRepo.Signatures(), + }, nil +} + +func (pr *proxyingRegistry) Blobs() distribution.BlobService { + // TODO + return nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name string + signatures distribution.SignatureService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // options + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Name() string { + return pr.name +} + +func (pr *proxiedRepository) Signatures() distribution.SignatureService { + return pr.signatures +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go new file mode 100644 index 000000000000..056b148ad6af --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go @@ -0,0 +1,250 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repositories' TTL expires +type expiryFunc func(string) error + +const ( + entryTypeBlob = iota + entryTypeManifest +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]schedulerEntry), + addChan: make(chan schedulerEntry), + stopChan: make(chan bool), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + entries map[string]schedulerEntry + addChan chan schedulerEntry + stopChan chan bool + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc +} + +// addChan allows more TTLs to be pushed to the scheduler +type addChan chan schedulerEntry + +// stopChan allows the scheduler to be stopped - used for testing. +type stopChan chan bool + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + ttles.add(dgst, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(repoName, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + return ttles.start() +} + +func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { + entry := schedulerEntry{ + Key: key, + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + ttles.addChan <- entry +} + +func (ttles *TTLExpirationScheduler) stop() { + ttles.stopChan <- true +} + +func (ttles *TTLExpirationScheduler) start() error { + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + go ttles.mainloop() + + return nil +} + +// mainloop uses a select statement to listen for events. Most of its time +// is spent in waiting on a TTL to expire but can be interrupted when TTLs +// are added. +func (ttles *TTLExpirationScheduler) mainloop() { + for { + if ttles.stopped { + return + } + + nextEntry, ttl := nextExpiringEntry(ttles.entries) + if len(ttles.entries) == 0 { + context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Nothing to do, sleeping...") + } else { + context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Sleeping for %s until cleanup of %s", ttl, nextEntry.Key) + } + + select { + case <-time.After(ttl): + var f expiryFunc + + switch nextEntry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(repoName string) error { + return fmt.Errorf("Unexpected scheduler entry type") + } + } + + if err := f(nextEntry.Key); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", nextEntry.Key, err) + } + + delete(ttles.entries, nextEntry.Key) + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + case entry := <-ttles.addChan: + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + ttles.entries[entry.Key] = entry + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + break + + case <-ttles.stopChan: + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + ttles.stopped = true + } + } +} + +func nextExpiringEntry(entries map[string]schedulerEntry) (*schedulerEntry, time.Duration) { + if len(entries) == 0 { + return nil, 24 * time.Hour + } + + // todo:(richardscothern) this is a primitive o(n) algorithm + // but n will never be *that* big and it's all in memory. Investigate + // time.AfterFunc for heap based expiries + + first := true + var nextEntry schedulerEntry + for _, entry := range entries { + if first { + nextEntry = entry + first = false + continue + } + if entry.Expiry.Before(nextEntry.Expiry) { + nextEntry = entry + } + } + + // Dates may be from the past if the scheduler has + // been restarted, set their ttl to 0 + if nextEntry.Expiry.Before(time.Now()) { + nextEntry.Expiry = time.Now() + return &nextEntry, 0 + } + + return &nextEntry, nextEntry.Expiry.Sub(time.Now()) +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go new file mode 100644 index 000000000000..fb5479f011da --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go @@ -0,0 +1,165 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestSchedule(t *testing.T) { + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + "testBlob1": true, + "testBlob2": true, + "ch00": true, + } + + s := New(context.Background(), inmemory.New(), "/ttl") + deleteFunc := func(repoName string) error { + if len(remainingRepos) == 0 { + t.Fatalf("Incorrect expiry count") + } + _, ok := remainingRepos[repoName] + if !ok { + t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + } + fmt.Println("removing", repoName) + delete(remainingRepos, repoName) + + return nil + } + s.onBlobExpire = deleteFunc + err := s.start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + s.add("testBlob1", 3*timeUnit, entryTypeBlob) + s.add("testBlob2", 1*timeUnit, entryTypeBlob) + + func() { + s.add("ch00", 1*timeUnit, entryTypeBlob) + + }() + + // Ensure all repos are deleted + <-time.After(50 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestRestoreOld(t *testing.T) { + remainingRepos := map[string]bool{ + "testBlob1": true, + "oldRepo": true, + } + + deleteFunc := func(repoName string) error { + if repoName == "oldRepo" && len(remainingRepos) == 3 { + t.Errorf("oldRepo should be removed first") + } + _, ok := remainingRepos[repoName] + if !ok { + t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + } + delete(remainingRepos, repoName) + return nil + } + + timeUnit := time.Millisecond + serialized, err := json.Marshal(&map[string]schedulerEntry{ + "testBlob1": { + Expiry: time.Now().Add(1 * timeUnit), + Key: "testBlob1", + EntryType: 0, + }, + "oldRepo": { + Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first + Key: "oldRepo", + EntryType: 0, + }, + }) + if err != nil { + t.Fatalf("Error serializing test data: %s", err.Error()) + } + + ctx := context.Background() + pathToStatFile := "/ttl" + fs := inmemory.New() + err = fs.PutContent(ctx, pathToStatFile, serialized) + if err != nil { + t.Fatal("Unable to write serialized data to fs") + } + s := New(context.Background(), fs, "/ttl") + s.onBlobExpire = deleteFunc + err = s.start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + <-time.After(50 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestStopRestore(t *testing.T) { + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + "testBlob1": true, + "testBlob2": true, + } + deleteFunc := func(repoName string) error { + delete(remainingRepos, repoName) + return nil + } + + fs := inmemory.New() + pathToStateFile := "/ttl" + s := New(context.Background(), fs, pathToStateFile) + s.onBlobExpire = deleteFunc + + err := s.start() + if err != nil { + t.Fatalf(err.Error()) + } + s.add("testBlob1", 300*timeUnit, entryTypeBlob) + s.add("testBlob2", 100*timeUnit, entryTypeBlob) + + // Start and stop before all operations complete + // state will be written to fs + s.stop() + time.Sleep(10 * time.Millisecond) + + // v2 will restore state from fs + s2 := New(context.Background(), fs, pathToStateFile) + s2.onBlobExpire = deleteFunc + err = s2.start() + if err != nil { + t.Fatalf("Error starting v2: %s", err.Error()) + } + + <-time.After(500 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } + +} + +func TestDoubleStart(t *testing.T) { + s := New(context.Background(), inmemory.New(), "/ttl") + err := s.start() + if err != nil { + t.Fatalf("Unable to start scheduler") + } + fmt.Printf("%#v", s) + err = s.start() + if err == nil { + t.Fatalf("Scheduler started twice without error") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go new file mode 100644 index 000000000000..f067993ca94e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go @@ -0,0 +1,876 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "os" + "path" + "sort" + "strings" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" +) + +// TestSimpleBlobUpload covers the blob upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleBlobUpload(t *testing.T) { + randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + dgst := digest.Digest(tarSumStr) + if err != nil { + t.Fatalf("error allocating upload store: %v", err) + } + + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := blobUpload.Cancel(ctx); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // Do a resume, get unknown upload + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) + } + + // Restart! + blobUpload, err = bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + offset, err := blobUpload.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("unexpected error seeking layer upload: %v", err) + } + + if offset != nn { + t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) + } + blobUpload.Close() + + // Do a resume, for good fun + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + sha256Digest := digest.NewDigest("sha256", h) + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // After finishing an upload, it should no longer exist. + if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + statDesc, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h.Reset() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) + } + + checkBlobParentPath(t, ctx, driver, "", desc.Digest, true) + checkBlobParentPath(t, ctx, driver, imageName, desc.Digest, true) + + // Delete a blob + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob: %v", err) + } + + d, err := bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + checkBlobParentPath(t, ctx, driver, "", desc.Digest, true) + checkBlobParentPath(t, ctx, driver, imageName, desc.Digest, true) + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + _, err = bs.Open(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected success opening deleted blob for read") + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type getting deleted manifest: %#v", err) + } + + // Re-upload the blob + randomBlob, err := ioutil.ReadAll(randomDataReader) + if err != nil { + t.Fatalf("Error reading all of blob %s", err.Error()) + } + expectedDigest, err := digest.FromBytes(randomBlob) + if err != nil { + t.Fatalf("Error getting digest from bytes: %s", err) + } + simpleUpload(t, bs, randomBlob, expectedDigest) + + d, err = bs.Stat(ctx, expectedDigest) + if err != nil { + t.Errorf("unexpected error stat-ing blob") + } + if d.Digest != expectedDigest { + t.Errorf("Mismatching digest with restored blob") + } + + _, err = bs.Open(ctx, expectedDigest) + if err != nil { + t.Errorf("Unexpected error opening blob") + } + + // Reuse state to test delete with a delete-disabled registry + registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false, false) + repository, err = registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs = repository.Blobs(ctx) + err = bs.Delete(ctx, desc.Digest) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestSimpleBlobRead just creates a simple blob file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleBlobRead(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + dgst := digest.Digest(tarSumStr) + + // Test for existence. + desc, err := bs.Stat(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when testing for existence: %v", err) + } + + rc, err := bs.Open(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when opening non-existent blob: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} + t.Logf("desc: %v", descBefore) + + desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) + if err != nil { + t.Fatalf("error adding blob to blobservice: %v", err) + } + + if desc.Size != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) + } + + rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + if err != nil { + t.Fatalf("error opening blob with %v: %v", dgst, err) + } + defer rc.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, rc) + if err != nil { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) + } + + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != desc.Digest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) + } + + // Now seek back the blob, read the whole thing and check against randomLayerData + offset, err := rc.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking blob: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("error reading all of blob: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) +} + +// TestRemoveParentsOnDelete verifies that blob store deletes a directory +// together with blob's data or link when RemoveParentsOnDelete option is +// applied. +func TestRemoveBlobParentsOnDelete(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, true) + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, false) + checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, false) + + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) + + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) + checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, true) + + // Delete a layer link + err = bs.Delete(ctx, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("Unexpected error deleting blob: %v", err) + } + + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) + checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, false) + + bd, err := RegistryBlobDeleter(registry) + if err != nil { + t.Fatalf("failed to obtain blob deleter: %v", err) + } + bd.Delete(ctx, digest.DigestSha256EmptyTar) + + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, false) + checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, false) +} + +// TestBlobEnumeration checks whether enumeration of repository and registry's +// blobs returns proper results. +func TestBlobEnumeration(t *testing.T) { + var err error + ctx := context.Background() + imageNames := []string{"foo/bar", "baz/gas"} + driver := inmemory.New() + reg := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false) + // holds a repository objects corresponding to imageNames + repositories := make([]distribution.Repository, len(imageNames)) + // holds blob store of each repository + blobStores := make([]distribution.BlobStore, len(imageNames)) + for i, name := range imageNames { + repositories[i], err = reg.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + blobStores[i] = repositories[i].Blobs(ctx) + } + be, err := RegistryBlobEnumerator(reg) + if err != nil { + t.Fatalf("unexpected error getting blob enumerator: %v", err) + } + + // doEnumeration calls Enumerate method on all repositories and registry's blob store. + // Additinal arguments represent expected digests for each repository defined. + doEnumeration := func(expectRegistryBlobs []digest.Digest, expectDigests ...[]digest.Digest) { + expBlobSets := make([]map[digest.Digest]struct{}, len(imageNames)) + // each number is a counter of tarsum digests for corresponding repository + tarsumDgstCounts := make([]int, len(imageNames)) + totalBlobSet := make(map[digest.Digest]struct{}) + tarsumTotalDgstCount := 0 + for i, dgsts := range expectDigests { + expBlobSets[i] = make(map[digest.Digest]struct{}) + for _, dgst := range dgsts { + expBlobSets[i][dgst] = struct{}{} + if strings.HasPrefix(dgst.String(), "tarsum") { + tarsumDgstCounts[i]++ + } + } + } + for _, d := range expectRegistryBlobs { + if strings.HasPrefix(d.String(), "tarsum") { + tarsumTotalDgstCount++ + } else { + totalBlobSet[d] = struct{}{} + } + } + + unexpected := []digest.Digest{} + blobTarsumDigests := make(map[digest.Digest]struct{}) + + buf := make([]digest.Digest, len(totalBlobSet)+tarsumTotalDgstCount+1) + + for i, bs := range blobStores { + n, err := bs.Enumerate(ctx, buf, "") + if err != io.EOF { + t.Fatalf("expected io.EOF when enumerating blobs of repository %s, not: %v", imageNames[i], err) + } + dgsts := buf[:n] + // linked blob store stores 2 links per tarsum blob + if len(dgsts) != len(expBlobSets[i])+tarsumDgstCounts[i] { + t.Errorf("got unexpected number of blobs in repository %s (%d != %d)", imageNames[i], len(dgsts), len(expBlobSets[i])+tarsumDgstCounts[i]) + } + for _, d := range dgsts { + if _, exists := expBlobSets[i][d]; !exists { + unexpected = append(unexpected, d) + blobTarsumDigests[d] = struct{}{} + } + delete(expBlobSets[i], d) + } + if len(unexpected) != tarsumDgstCounts[i] { + for _, d := range dgsts { + t.Errorf("received unexpected blob digest %s in repository %s", d, imageNames[i]) + } + } + for d := range expBlobSets[i] { + t.Errorf("expected digest %s not received for repository %s", d, imageNames[i]) + } + unexpected = unexpected[:0] + } + + n, err := be.Enumerate(ctx, buf, "") + if err != io.EOF { + t.Fatalf("expected io.EOF when enumerating registry blobs, not: %v", err) + } + dgsts := buf[:n] + if len(dgsts) != len(totalBlobSet)+tarsumTotalDgstCount { + t.Errorf("got unexpected number of blobs in registry (%d != %d)", len(dgsts), len(totalBlobSet)+tarsumTotalDgstCount) + } + for _, d := range dgsts { + if _, exists := totalBlobSet[d]; !exists { + unexpected = append(unexpected, d) + } + delete(totalBlobSet, d) + } + for _, d := range unexpected { + if _, exists := blobTarsumDigests[d]; !exists || len(unexpected) != tarsumTotalDgstCount { + t.Errorf("received unexpected blob digest %s", d) + } + } + for d := range totalBlobSet { + t.Errorf("expected digest %s not received", d) + } + } + + doEnumeration( + []digest.Digest{}, + []digest.Digest{}, + []digest.Digest{}, + ) + + t.Logf("uploading an empty tarball to repository %s", imageNames[0]) + simpleUpload(t, blobStores[0], []byte{}, digest.DigestSha256EmptyTar) + + doEnumeration( + []digest.Digest{digest.DigestSha256EmptyTar}, + []digest.Digest{digest.DigestSha256EmptyTar}, + []digest.Digest{}, + ) + + t.Logf("uploading a random tarball to repository %s", imageNames[1]) + tarballDgst := uploadRandomTarball(t, ctx, blobStores[1]) + + doEnumeration( + []digest.Digest{digest.DigestSha256EmptyTar, tarballDgst}, + []digest.Digest{digest.DigestSha256EmptyTar}, + []digest.Digest{tarballDgst}, + ) + + t.Logf("uploading a random layer to %s repository", imageNames[0]) + layerDgst := uploadRandomLayer(t, ctx, blobStores[0]) + + doEnumeration( + []digest.Digest{digest.DigestSha256EmptyTar, layerDgst, tarballDgst}, + []digest.Digest{digest.DigestSha256EmptyTar, layerDgst}, + []digest.Digest{tarballDgst}, + ) + + // delete is performed without parent directory being deleted + t.Logf("deleting empty layer data from registry") + bd, err := RegistryBlobDeleter(reg) + if err != nil { + t.Fatalf("failed to obtain blob deleter: %v", err) + } + err = bd.Delete(ctx, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("unexpected error while deleting registry blob: %v", err) + } + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) + checkBlobParentPath(t, ctx, driver, imageNames[0], digest.DigestSha256EmptyTar, true) + checkBlobParentPath(t, ctx, driver, imageNames[1], digest.DigestSha256EmptyTar, false) + + // check that deletion had no effect on digests enumerated + doEnumeration( + []digest.Digest{digest.DigestSha256EmptyTar, layerDgst, tarballDgst}, + []digest.Digest{digest.DigestSha256EmptyTar, layerDgst}, + []digest.Digest{tarballDgst}, + ) + + // set RemoveParentsOnDelete and delete the layer again + if r, ok := reg.(*registry); ok { + r.blobStore.removeParentsOnDelete = true + } else { + t.Fatalf("failed to cast registry") + } + + repo, err := reg.Repository(ctx, imageNames[0]) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repo.Blobs(ctx) + bd, err = RegistryBlobDeleter(reg) + if err != nil { + t.Fatalf("failed to obtain blob deleter: %v", err) + } + + t.Logf("deleting empty layer link directory from %s repository", imageNames[0]) + err = bs.Delete(ctx, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("unexpected error while deleting empty layer link: %v", err) + } + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) + checkBlobParentPath(t, ctx, driver, imageNames[0], digest.DigestSha256EmptyTar, false) + checkBlobParentPath(t, ctx, driver, imageNames[1], digest.DigestSha256EmptyTar, false) + + // verify that blob data is still in registry's store + doEnumeration( + []digest.Digest{digest.DigestSha256EmptyTar, layerDgst, tarballDgst}, + []digest.Digest{layerDgst}, + []digest.Digest{tarballDgst}, + ) + + t.Logf("deleting empty layer directory from registry") + err = bd.Delete(ctx, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("unexpected error while deleting registry blob: %v", err) + } + + doEnumeration( + []digest.Digest{layerDgst, tarballDgst}, + []digest.Digest{layerDgst}, + []digest.Digest{tarballDgst}, + ) + + checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, false) +} + +// TestBlobBatchEnumeration checks whether enumeration of repository and registry's +// blobs returns proper results when enumerating store iteratively with small +// buffer. +func TestBlobBatchEnumeration(t *testing.T) { + const numDigests = 10 + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + reg := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, true) + repo, err := reg.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repo.Blobs(ctx) + if err != nil { + t.Fatalf("unexpected error getting blob enumerator: %v", err) + } + be, err := RegistryBlobEnumerator(reg) + if err != nil { + t.Fatal(err) + } + + digests := make([]digest.Digest, numDigests) + for i := 0; i < numDigests; i++ { + var dgst digest.Digest + if i%2 == 0 { + dgst = uploadRandomLayer(t, ctx, bs) + t.Logf("uploaded a new layer with digest %s", dgst.String()) + } else { + dgst = uploadRandomTarball(t, ctx, bs) + t.Logf("uploaded a new tarsum layer with digest %s", dgst.String()) + } + digests[i] = dgst + } + tarsumDigests := numDigests/2 + numDigests%1 + layerLinkDigests, err := EnumerateAllBlobs(bs, ctx) + // two links are written for one tarsum digest in the blob store + if len(layerLinkDigests) != numDigests+tarsumDigests { + t.Fatalf("unexpected number of digests (%d != %d)", len(layerLinkDigests), numDigests+tarsumDigests) + } + // registry doesn't contain tarsum digests + blobDigests := make([]digest.Digest, 0, len(digests)) + for _, d := range layerLinkDigests { + if !strings.HasPrefix(d.String(), tarsumPrefix) { + blobDigests = append(blobDigests, d) + } + } + + sort.Sort(byDigestString(blobDigests)) + sort.Sort(byDigestString(layerLinkDigests)) + + doEnum := func(be distribution.BlobEnumerator, batchSize int, expectedDigests []digest.Digest) { + t.Logf("enumerating %T with a buffer of size %d", be, batchSize) + last := "" + buf := make([]digest.Digest, batchSize) + for i := 0; i < (len(digests)/batchSize)+1; i++ { + n, err := be.Enumerate(ctx, buf, last) + expectedN := batchSize + if i >= len(expectedDigests)/batchSize { + // last batch or past last batch + expectedN = len(expectedDigests) % batchSize + if err != io.EOF { + t.Fatalf("batch #%d: expected io.EOF while enumerating blobs of %T, not: %v", i+1, be, err) + } + } else { + if err != nil { + t.Fatalf("batch #%d: got unexpected error while enumerating blobs of %T: %v", i+1, be, err) + } + } + if n != expectedN { + t.Fatalf("batch #%d: got unexpected number of digests while enumerating %T (%d != %d) ", i+1, be, n, expectedN) + } + for j := 0; j < expectedN; j++ { + if expectedDigests[i*batchSize+j] != buf[j] { + t.Errorf("batch #%d: got unexpected digest on index %d while enumerating %T: %s != %s", i+1, j, be, buf[j].String(), expectedDigests[i*batchSize+j].String()) + } + } + if n > 0 { + last = buf[n-1].String() + } + } + } + + // enumerate linked blob store + doEnum(bs, 4, layerLinkDigests) + doEnum(bs, 1, layerLinkDigests) + doEnum(bs, len(layerLinkDigests), layerLinkDigests) + + // enumerate registry's blob store + doEnum(be, 4, blobDigests) + doEnum(be, 1, blobDigests) + doEnum(be, len(blobDigests), blobDigests) + + // enumerate with empty buffer + for _, enumerator := range []distribution.BlobEnumerator{bs, be} { + buf := []digest.Digest{} + n, err := enumerator.Enumerate(ctx, buf, "") + if err != nil { + t.Errorf("unexpected error while enumerating %T with empty buffer: %v", enumerator, err) + } + if n != 0 { + t.Errorf("expected 0 items written to empty buffer by %T, not %d", enumerator, n) + } + } +} + +func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { + ctx := context.Background() + wr, err := bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting upload: %v", err) + } + + nn, err := io.Copy(wr, bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error copying into blob writer: %v", err) + } + + if nn != 0 { + t.Fatalf("unexpected number of bytes copied: %v > 0", nn) + } + + dgst, err := digest.FromReader(bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error getting digest: %v", err) + } + + if dgst != expectedDigest { + // sanity check on zero digest + t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) + } + + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error committing write: %v", err) + } + + if desc.Digest != dgst { + t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) + } +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// addBlob simply consumes the reader and inserts into the blob service, +// returning a descriptor on success. +func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { + wr, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + defer wr.Cancel(ctx) + + if nn, err := io.Copy(wr, rd); err != nil { + return distribution.Descriptor{}, err + } else if nn != desc.Size { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) + } + + return wr.Commit(ctx, desc) +} + +func createRandomData() (io.ReadSeeker, int64, error) { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + randomData := make([]byte, fileSize) + // Fill up the buffer with some random data. + n, err := rand.Read(randomData) + if err != nil { + return nil, 0, fmt.Errorf("failed to fill buffer with random data: %v", err) + } + if n != len(randomData) { + return nil, 0, fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + return bytes.NewReader(randomData), fileSize, nil +} + +func uploadRandomLayer(t *testing.T, ctx context.Context, bi distribution.BlobIngester) digest.Digest { + dr, size, err := createRandomData() + if err != nil { + t.Fatalf("failed to create random file: %v", err) + } + + h := sha256.New() + rd := io.TeeReader(dr, h) + blobUpload, err := bi.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + nn, err := io.Copy(blobUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + if nn != size { + t.Fatalf("layer data write incomplete") + } + dgst := digest.NewDigest("sha256", h) + _, err = blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + return dgst +} + +func uploadRandomTarball(t *testing.T, ctx context.Context, bi distribution.BlobIngester) digest.Digest { + randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + dgst := digest.Digest(tarSumStr) + if err != nil { + t.Fatalf("error allocating upload store: %v", err) + } + + randomLayerSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + _, err = addBlob(ctx, bi, distribution.Descriptor{ + Digest: dgst, + MediaType: "application/octet-stream", + Size: randomLayerSize, + }, randomDataReader) + if err != nil { + t.Fatalf("failed to add blob: %v", err) + } + return dgst +} + +// checkBlobParentPath asserts that a directory containing blob's link or data +// does (not) exist. If repoName is given, link path in _layers directory of +// that repository will be checked. Registry's blob store will be checked +// otherwise. +func checkBlobParentPath(t *testing.T, ctx context.Context, driver *inmemory.Driver, repoName string, dgst digest.Digest, expectExistent bool) { + var ( + blobPath string + err error + ) + + if repoName != "" { + blobPath, err = defaultPathMapper.path(layerLinkPathSpec{name: repoName, digest: dgst}) + if err != nil { + t.Fatalf("failed to get layer link path for repo=%s, digest=%s: %v", repoName, dgst.String(), err) + } + blobPath = path.Dir(blobPath) + } else { + blobPath, err = defaultPathMapper.path(blobPathSpec{digest: dgst}) + if err != nil { + t.Fatalf("failed to get blob path for digest %s: %v", dgst.String(), err) + } + } + + parentExists, err := exists(ctx, driver, blobPath) + if err != nil { + t.Fatalf("failed to check whether path %s exists: %v", blobPath, err) + } + if expectExistent && !parentExists { + t.Errorf("expected blob path %s to exist", blobPath) + } else if !expectExistent && parentExists { + t.Errorf("expected blob path %s not to exist", blobPath) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go new file mode 100644 index 000000000000..fad0a77a1bec --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go new file mode 100644 index 000000000000..24aeba690550 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go @@ -0,0 +1,79 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + + switch err { + case nil: + if bs.redirect { + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + } + + fallthrough + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil + } + + // Some unexpected error. + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go index 2a0d449accaf..37da25bf3a46 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go @@ -2,185 +2,340 @@ package storage import ( "fmt" + "io" + "path" + "regexp" "strings" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" + "github.com/docker/distribution/registry/storage/driver" ) -// TODO(stevvooe): Currently, the blobStore implementation used by the -// manifest store. The layer store should be refactored to better leverage the -// blobStore, reducing duplicated code. +const ( + tarsumPrefix = `tarsum` +) + +var ( + reTarsumPrefix = regexp.MustCompile(`^` + tarsumPrefix + `(?:/(\w+))?`) + reDigestPath = regexp.MustCompile(fmt.Sprintf(`^([^/]+)/(?:\w{%d}/)?(\w+)$`, multilevelHexPrefixLength)) +) -// blobStore implements a generalized blob store over a driver, supporting the -// read side and link management. This object is intentionally a leaky -// abstraction, providing utility methods that support creating and traversing -// backend links. +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. type blobStore struct { - driver storagedriver.StorageDriver - pm *pathMapper - ctx context.Context + driver driver.StorageDriver + pm *pathMapper + statter distribution.BlobStatter + deleteEnabled bool + // Causes directory containing blob's data to be removed recursively upon + // Delete. + removeParentsOnDelete bool } var _ distribution.BlobService = &blobStore{} +var _ distribution.BlobEnumerator = &blobStore{} +var _ distribution.BlobDeleter = &blobStore{} -func (bs *blobStore) Delete(dgst digest.Digest) error { - found, err := bs.exists(dgst) +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) if err != nil { - return err + return nil, err } - if !found { - // TODO if the blob doesn't exist, should this be an error? - return nil + p, err := bs.driver.GetContent(ctx, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err } - path, err := bs.path(dgst) + return p, err +} +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) if err != nil { - return err + return nil, err } - path = strings.TrimSuffix(path, "/data") + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } - return bs.driver.Delete(path) + return newFileReader(ctx, bs.driver, path, desc.Size) } -// exists reports whether or not the path exists. If the driver returns error -// other than storagedriver.PathNotFound, an error may be returned. -func (bs *blobStore) exists(dgst digest.Digest) (bool, error) { - path, err := bs.path(dgst) - +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst, err := digest.FromBytes(p) if err != nil { - return false, err + return distribution.Descriptor{}, err } - ok, err := exists(bs.driver, path) - if err != nil { - return false, err + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + // real error, return it + return distribution.Descriptor{}, err } - return ok, nil -} - -// get retrieves the blob by digest, returning it a byte slice. This should -// only be used for small objects. -func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) { bp, err := bs.path(dgst) if err != nil { - return nil, err + return distribution.Descriptor{}, err } - return bs.driver.GetContent(bp) + // TODO(stevvooe): Write out mediatype here, as well. + + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) } -// link links the path to the provided digest by writing the digest into the -// target file. -func (bs *blobStore) link(path string, dgst digest.Digest) error { - if exists, err := bs.exists(dgst); err != nil { - return err - } else if !exists { - return fmt.Errorf("cannot link non-existent blob") +func (bs *blobStore) Enumerate(ctx context.Context, digests []digest.Digest, last string) (n int, err error) { + var lastPathComponents []string + + if last != "" { + lastPathComponents, err = digestPathComponents(digest.Digest(last), true) + if err != nil { + return 0, fmt.Errorf("invalid last digest %q", last) + } } - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(path, []byte(dgst)) -} + rootPath := path.Join(storagePathRoot, storagePathVersion, "blobs") + + pastLastOffset := func(pthSepCount int, pth string, lastPathComponents []string) bool { + maxComponents := pthSepCount + 1 + if maxComponents >= len(lastPathComponents) { + return pth > path.Join(lastPathComponents...) + } + return pth >= path.Join(lastPathComponents[:maxComponents]...) + } + + err = WalkSorted(ctx, bs.driver, rootPath, func(fi driver.FileInfo) error { + if !fi.IsDir() { + // ignore files + return nil + } + + // trim / prefix + pth := strings.TrimPrefix(strings.TrimPrefix(fi.Path(), rootPath), "/") + sepCount := strings.Count(pth, "/") + + if !pastLastOffset(sepCount, pth, lastPathComponents) { + // we haven't reached the 'last' offset yet + return ErrSkipDir + } + + if sepCount < 2 { + // continue walking + return nil + } + + alg := "" + tarsumParts := reTarsumPrefix.FindStringSubmatch(pth) + isTarsum := len(tarsumParts) > 0 + if sepCount > 4 || (!isTarsum && sepCount > 2) { + // path too long + return ErrSkipDir + } + + if len(tarsumParts) > 0 { + alg = tarsumPrefix + "." + tarsumParts[1] + "+" + // trim "tarsum//" prefix from path + pth = strings.TrimPrefix(pth[len(tarsumParts[0]):], "/") + } + + digestParts := reDigestPath.FindStringSubmatch(pth) + if len(digestParts) > 0 { + alg += digestParts[1] + dgstHex := digestParts[2] + dgst := digest.NewDigestFromHex(alg, dgstHex) + // append only valid digests + if err := dgst.Validate(); err == nil { + if n >= len(digests) { + return ErrStopWalking + } + digests[n] = dgst + n++ + } else { + context.GetLogger(ctx).Warnf("skipping invalid digest %q: %v", dgst.String(), err) + } + return ErrSkipDir + } + + return nil + }) -// linked reads the link at path and returns the content. -func (bs *blobStore) linked(path string) ([]byte, error) { - linked, err := bs.readlink(path) if err != nil { - return nil, err + switch err.(type) { + case driver.PathNotFoundError: + err = io.EOF + default: + if err == ErrStopWalking { + err = nil + } + } + } else if n < len(digests) { + err = io.EOF } + return +} - return bs.get(linked) +func (bs *blobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported } -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(path) - if err != nil { - return "", err +func (bs *blobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (bs *blobStore) Delete(ctx context.Context, dgst digest.Digest) error { + var ( + blobPath string + err error + ) + if !bs.deleteEnabled { + return distribution.ErrUnsupported } - linked, err := digest.ParseDigest(string(content)) + if bs.removeParentsOnDelete { + blobPath, err = bs.pm.path(blobPathSpec{digest: dgst}) + } else { + blobPath, err = bs.pm.path(blobDataPathSpec{digest: dgst}) + } if err != nil { - return "", err + return err } - if exists, err := bs.exists(linked); err != nil { - return "", err - } else if !exists { - return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked) - } + context.GetLogger(ctx).Infof("Deleting blob path: %s", blobPath) + return bs.driver.Delete(ctx, blobPath) +} - return linked, nil +func (bs *blobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) } -// resolve reads the digest link at path and returns the blob store link. -func (bs *blobStore) resolve(path string) (string, error) { - dgst, err := bs.readlink(path) +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := bs.pm.path(blobDataPathSpec{ + digest: dgst, + }) + if err != nil { return "", err } - return bs.path(dgst) + return bp, nil } -// put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. -func (bs *blobStore) put(p []byte) (digest.Digest, error) { - dgst, err := digest.FromBytes(p) +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) if err != nil { - ctxu.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) return "", err } - bp, err := bs.path(dgst) + linked, err := digest.ParseDigest(string(content)) if err != nil { return "", err } - // If the content already exists, just return the digest. - if exists, err := bs.exists(dgst); err != nil { + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { return "", err - } else if exists { - return dgst, nil } - return dgst, bs.driver.PutContent(bp, p) + return bs.path(dgst) } -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := bs.pm.path(blobDataPathSpec{ +type blobStatter struct { + driver driver.StorageDriver + pm *pathMapper +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := bs.pm.path(blobDataPathSpec{ digest: dgst, }) - if err != nil { - return "", err + return distribution.Descriptor{}, err } - return bp, nil -} - -// exists provides a utility method to test whether or not -func exists(driver storagedriver.StorageDriver, path string) (bool, error) { - if _, err := driver.Stat(path); err != nil { + fi, err := bs.driver.Stat(ctx, path) + if err != nil { switch err := err.(type) { - case storagedriver.PathNotFoundError: - return false, nil + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown default: - return false, err + return distribution.Descriptor{}, err } } - return true, nil + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go new file mode 100644 index 000000000000..2142c37fd003 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go @@ -0,0 +1,379 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +// layerWriter is used to control the various aspects of resumable +// layer upload. It implements the LayerUpload interface. +type blobWriter struct { + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy + // LayerUpload Interface + bufferedFileWriter + + resumableDigestEnabled bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.bufferedFileWriter.Close(); err != nil { + return distribution.Descriptor{}, err + } + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + return canonical, nil +} + +// Rollback the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + if err := bw.removeResources(ctx); err != nil { + return err + } + + bw.Close() + return nil +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.err != nil { + return bw.err + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil { + return err + } + + return bw.bufferedFileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + // Stat the on disk file + if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + bw.size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != bw.size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = bw.size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigestAt(ctx, bw.size); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.New() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() + } + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[string]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := bw.blobStore.pm.path(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the zero tarsum. + if desc.Digest == digest.DigestSha256EmptyTar { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 000000000000..39166876f05e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go new file mode 100644 index 000000000000..a26ac2cce1d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -0,0 +1,175 @@ +// +build !noresumabledigest + +package storage + +import ( + "fmt" + "io" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigestAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := bw.driver.Delete(ctx, hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired offset. + fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) + if err != nil { + return err + } + + if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) + } + + if _, err := io.CopyN(h, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go index dcc79d8d53fd..10a3909197cb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache.go @@ -1,112 +1,35 @@ // Package cache provides facilities to speed up access to the storage -// backend. Typically cache implementations deal with internal implementation -// details at the backend level, rather than generalized caches for -// distribution related interfaces. In other words, unless the cache is -// specific to the storage package, it belongs in another package. +// backend. package cache import ( "fmt" - "github.com/docker/distribution/digest" - "golang.org/x/net/context" + "github.com/docker/distribution" ) -// ErrNotFound is returned when a meta item is not found. -var ErrNotFound = fmt.Errorf("not found") +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService -// LayerMeta describes the backend location and length of layer data. -type LayerMeta struct { - Path string - Length int64 + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) } -// LayerInfoCache is a driver-aware cache of layer metadata. Basically, it -// provides a fast cache for checks against repository metadata, avoiding -// round trips to backend storage. Note that this is different from a pure -// layer cache, which would also provide access to backing data, as well. Such -// a cache should be implemented as a middleware, rather than integrated with -// the storage backend. -// -// Note that most implementations rely on the caller to do strict checks on on -// repo and dgst arguments, since these are mostly used behind existing -// implementations. -type LayerInfoCache interface { - // Contains returns true if the repository with name contains the layer. - Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) - - // Add includes the layer in the given repository cache. - Add(ctx context.Context, repo string, dgst digest.Digest) error - - Delete(ctx context.Context, repo string, dgst digest.Digest) error - - // Meta provides the location of the layer on the backend and its size. Membership of a - // repository should be tested before using the result, if required. - Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) - - // SetMeta sets the meta data for the given layer. - SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error -} - -// base implements common checks between cache implementations. Note that -// these are not full checks of input, since that should be done by the -// caller. -type base struct { - LayerInfoCache -} - -func (b *base) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - if repo == "" { - return false, fmt.Errorf("cache: cannot check for empty repository name") - } - - if dgst == "" { - return false, fmt.Errorf("cache: cannot check for empty digests") - } - - return b.LayerInfoCache.Contains(ctx, repo, dgst) -} - -func (b *base) Add(ctx context.Context, repo string, dgst digest.Digest) error { - if repo == "" { - return fmt.Errorf("cache: cannot add empty repository name") +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err } - if dgst == "" { - return fmt.Errorf("cache: cannot add empty digest") - } - - return b.LayerInfoCache.Add(ctx, repo, dgst) -} - -func (b *base) Delete(ctx context.Context, repo string, dgst digest.Digest) error { - if repo == "" { - return fmt.Errorf("cache: cannot delete empty repository name") - } - - if dgst == "" { - return fmt.Errorf("cache: cannot delete empty digest") - } - - return b.LayerInfoCache.Delete(ctx, repo, dgst) -} - -func (b *base) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - if dgst == "" { - return LayerMeta{}, fmt.Errorf("cache: cannot get meta for empty digest") - } - - return b.LayerInfoCache.Meta(ctx, dgst) -} - -func (b *base) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - if dgst == "" { - return fmt.Errorf("cache: cannot set meta for empty digest") + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) } - if meta.Path == "" { - return fmt.Errorf("cache: cannot set empty path for meta") + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) } - return b.LayerInfoCache.SetMeta(ctx, dgst, meta) + return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go deleted file mode 100644 index 48cef955ec5f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cache_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package cache - -import ( - "testing" - - "golang.org/x/net/context" -) - -// checkLayerInfoCache takes a cache implementation through a common set of -// operations. If adding new tests, please add them here so new -// implementations get the benefit. -func checkLayerInfoCache(t *testing.T, lic LayerInfoCache) { - ctx := context.Background() - - exists, err := lic.Contains(ctx, "", "fake:abc") - if err == nil { - t.Fatalf("expected error checking for cache item with empty repo") - } - - exists, err = lic.Contains(ctx, "foo/bar", "") - if err == nil { - t.Fatalf("expected error checking for cache item with empty digest") - } - - exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") - if err != nil { - t.Fatalf("unexpected error checking for cache item: %v", err) - } - - if exists { - t.Fatalf("item should not exist") - } - - if err := lic.Add(ctx, "", "fake:abc"); err == nil { - t.Fatalf("expected error adding cache item with empty name") - } - - if err := lic.Add(ctx, "foo/bar", ""); err == nil { - t.Fatalf("expected error adding cache item with empty digest") - } - - if err := lic.Add(ctx, "foo/bar", "fake:abc"); err != nil { - t.Fatalf("unexpected error adding item: %v", err) - } - - exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") - if err != nil { - t.Fatalf("unexpected error checking for cache item: %v", err) - } - - if !exists { - t.Fatalf("item should exist") - } - - _, err = lic.Meta(ctx, "") - if err == nil || err == ErrNotFound { - t.Fatalf("expected error getting meta for cache item with empty digest") - } - - _, err = lic.Meta(ctx, "fake:abc") - if err != ErrNotFound { - t.Fatalf("expected unknown layer error getting meta for cache item with empty digest") - } - - if err = lic.SetMeta(ctx, "", LayerMeta{}); err == nil { - t.Fatalf("expected error setting meta for cache item with empty digest") - } - - if err = lic.SetMeta(ctx, "foo/bar", LayerMeta{}); err == nil { - t.Fatalf("expected error setting meta for cache item with empty meta") - } - - expected := LayerMeta{Path: "/foo/bar", Length: 20} - if err := lic.SetMeta(ctx, "foo/bar", expected); err != nil { - t.Fatalf("unexpected error setting meta: %v", err) - } - - meta, err := lic.Meta(ctx, "foo/bar") - if err != nil { - t.Fatalf("unexpected error getting meta: %v", err) - } - - if meta != expected { - t.Fatalf("retrieved meta data did not match: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 000000000000..94ca8a90c76b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,101 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go deleted file mode 100644 index 63b0dcc0d789..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory.go +++ /dev/null @@ -1,72 +0,0 @@ -package cache - -import ( - "github.com/docker/distribution/digest" - "golang.org/x/net/context" -) - -// inmemoryLayerInfoCache is a map-based implementation of LayerInfoCache. -type inmemoryLayerInfoCache struct { - membership map[string]map[digest.Digest]struct{} - meta map[digest.Digest]LayerMeta -} - -// NewInMemoryLayerInfoCache provides an implementation of LayerInfoCache that -// stores results in memory. -func NewInMemoryLayerInfoCache() LayerInfoCache { - return &base{&inmemoryLayerInfoCache{ - membership: make(map[string]map[digest.Digest]struct{}), - meta: make(map[digest.Digest]LayerMeta), - }} -} - -func (ilic *inmemoryLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - members, ok := ilic.membership[repo] - if !ok { - return false, nil - } - - _, ok = members[dgst] - return ok, nil -} - -// Add adds the layer to the redis repository blob set. -func (ilic *inmemoryLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - members, ok := ilic.membership[repo] - if !ok { - members = make(map[digest.Digest]struct{}) - ilic.membership[repo] = members - } - - members[dgst] = struct{}{} - - return nil -} - -func (ilic *inmemoryLayerInfoCache) Delete(ctx context.Context, repo string, dgst digest.Digest) error { - members, ok := ilic.membership[repo] - if !ok { - return nil - } - delete(members, dgst) - return nil -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (ilic *inmemoryLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - meta, ok := ilic.meta[dgst] - if !ok { - return LayerMeta{}, ErrNotFound - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (ilic *inmemoryLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - ilic.meta[dgst] = meta - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 000000000000..120a6572d430 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,170 @@ +package memory + +import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if err := v2.ValidateRepositoryName(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if rsimbdcp.repository == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + if rsimbdcp.repository == nil { + return distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if rsimbdcp.repository == nil { + // allocate map since we are setting it now. + rsimbdcp.parent.mu.Lock() + var ok bool + // have to read back value since we may have allocated elsewhere. + rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + rsimbdcp.repository = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository + } + + rsimbdcp.parent.mu.Unlock() + } + + if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go new file mode 100644 index 000000000000..3bae7ccb3023 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go @@ -0,0 +1,13 @@ +package memory + +import ( + "testing" + + "github.com/docker/distribution/registry/storage/cache" +) + +// TestInMemoryBlobInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryBlobInfoCache(t *testing.T) { + cache.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go deleted file mode 100644 index 417e982e2b4b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/memory_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package cache - -import "testing" - -// TestInMemoryLayerInfoCache checks the in memory implementation is working -// correctly. -func TestInMemoryLayerInfoCache(t *testing.T) { - checkLayerInfoCache(t, NewInMemoryLayerInfoCache()) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go deleted file mode 100644 index eba0a8af2849..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis.go +++ /dev/null @@ -1,103 +0,0 @@ -package cache - -import ( - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/garyburd/redigo/redis" - "golang.org/x/net/context" -) - -// redisLayerInfoCache provides an implementation of storage.LayerInfoCache -// based on redis. Layer info is stored in two parts. The first provide fast -// access to repository membership through a redis set for each repo. The -// second is a redis hash keyed by the digest of the layer, providing path and -// length information. Note that there is no implied relationship between -// these two caches. The layer may exist in one, both or none and the code -// must be written this way. -type redisLayerInfoCache struct { - pool *redis.Pool - - // TODO(stevvooe): We use a pool because we don't have great control over - // the cache lifecycle to manage connections. A new connection if fetched - // for each operation. Once we have better lifecycle management of the - // request objects, we can change this to a connection. -} - -// NewRedisLayerInfoCache returns a new redis-based LayerInfoCache using the -// provided redis connection pool. -func NewRedisLayerInfoCache(pool *redis.Pool) LayerInfoCache { - return &base{&redisLayerInfoCache{ - pool: pool, - }} -} - -// Contains does a membership check on the repository blob set in redis. This -// is used as an access check before looking up global path information. If -// false is returned, the caller should still check the backend to if it -// exists elsewhere. -func (rlic *redisLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - conn := rlic.pool.Get() - defer conn.Close() - - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Contains(%q, %q)", repo, dgst) - return redis.Bool(conn.Do("SISMEMBER", rlic.repositoryBlobSetKey(repo), dgst)) -} - -// Add adds the layer to the redis repository blob set. -func (rlic *redisLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - conn := rlic.pool.Get() - defer conn.Close() - - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Add(%q, %q)", repo, dgst) - _, err := conn.Do("SADD", rlic.repositoryBlobSetKey(repo), dgst) - return err -} - -func (rlic *redisLayerInfoCache) Delete(ctx context.Context, repo string, dgst digest.Digest) error { - //TODO - return nil -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (rlic *redisLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - conn := rlic.pool.Get() - defer conn.Close() - - reply, err := redis.Values(conn.Do("HMGET", rlic.blobMetaHashKey(dgst), "path", "length")) - if err != nil { - return LayerMeta{}, err - } - - if len(reply) < 2 || reply[0] == nil || reply[1] == nil { - return LayerMeta{}, ErrNotFound - } - - var meta LayerMeta - if _, err := redis.Scan(reply, &meta.Path, &meta.Length); err != nil { - return LayerMeta{}, err - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (rlic *redisLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - conn := rlic.pool.Get() - defer conn.Close() - - _, err := conn.Do("HMSET", rlic.blobMetaHashKey(dgst), "path", meta.Path, "length", meta.Length) - return err -} - -// repositoryBlobSetKey returns the key for the blob set in the cache. -func (rlic *redisLayerInfoCache) repositoryBlobSetKey(repo string) string { - return "repository::" + repo + "::blobs" -} - -// blobMetaHashKey returns the cache key for immutable blob meta data. -func (rlic *redisLayerInfoCache) blobMetaHashKey(dgst digest.Digest) string { - return "blobs::" + dgst.String() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go new file mode 100644 index 000000000000..36370bdd9733 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -0,0 +1,268 @@ +package redis + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" +) + +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ + pool: pool, + } +} + +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if err := v2.ValidateRepositoryName(repo); err != nil { + return nil, err + } + + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil +} + +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.stat(ctx, conn, dgst) +} + +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err + } + + return desc, nil +} + +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "size", desc.Size); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil +} + +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs tarsum). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go similarity index 83% rename from Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go rename to Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go index 7422a7ebb0ba..ed6944a17867 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go @@ -1,4 +1,4 @@ -package cache +package redis import ( "flag" @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" ) @@ -17,7 +18,7 @@ func init() { // TestRedisLayerInfoCache exercises a live redis instance using the cache // implementation. -func TestRedisLayerInfoCache(t *testing.T) { +func TestRedisBlobDescriptorCacheProvider(t *testing.T) { if redisAddr == "" { // fallback to an environement variable redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") @@ -46,5 +47,5 @@ func TestRedisLayerInfoCache(t *testing.T) { t.Fatalf("unexpected error flushing redis db: %v", err) } - checkLayerInfoCache(t, NewRedisLayerInfoCache(pool)) + cache.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go new file mode 100644 index 000000000000..b5a2f64317a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/suite.go @@ -0,0 +1,178 @@ +package cache + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// CheckBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { + ctx := context.Background() + + checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) + checkBlobDescriptorCacheSetAndRead(t, ctx, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") + if err == nil { + t.Fatalf("expected an error when asking for invalid repo") + } + + cache, err = provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repository: %v", err) + } + + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Size: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) + } + + if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{ + Digest: "", + Size: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") + } + + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) + } + + if _, err := cache.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } +} + +func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error deleting descriptor") + } + + nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + err = cache.Clear(ctx, nonExistantDigest) + if err == nil { + t.Fatalf("expected error deleting unknown descriptor") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go new file mode 100644 index 000000000000..470894b71eae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go @@ -0,0 +1,65 @@ +package storage + +import ( + "errors" + "io" + "path" + "sort" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var foundRepos []string + var errVal error + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + // Walk each of the directories in our storage. Unfortunately since there's no + // guarantee that storage will return files in lexigraphical order, we have + // to store everything another slice, sort it and then copy it back to our + // passed in slice. + + Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + + // lop the base path off + repoPath := filePath[len(root)+1:] + + _, file := path.Split(repoPath) + if file == "_layers" { + repoPath = strings.TrimSuffix(repoPath, "/_layers") + if repoPath > last { + foundRepos = append(foundRepos, repoPath) + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + return nil + }) + + sort.Strings(foundRepos) + n = copy(repos, foundRepos) + + // Signal that we have no more entries by setting EOF + if len(foundRepos) <= len(repos) { + errVal = io.EOF + } + + return n, errVal + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go new file mode 100644 index 000000000000..60b853e77e72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog_test.go @@ -0,0 +1,122 @@ +package storage + +import ( + "io" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type setupEnv struct { + ctx context.Context + driver driver.StorageDriver + expected []string + registry distribution.Namespace +} + +func setupFS(t *testing.T) *setupEnv { + d := inmemory.New() + c := []byte("") + ctx := context.Background() + registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false, false) + rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) + + repos := []string{ + "/foo/a/_layers/1", + "/foo/b/_layers/2", + "/bar/c/_layers/3", + "/bar/d/_layers/4", + "/foo/d/in/_layers/5", + "/an/invalid/repo", + "/bar/d/_layers/ignored/dir/6", + } + + for _, repo := range repos { + if err := d.PutContent(ctx, rootpath+repo, c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + } + + expected := []string{ + "bar/c", + "bar/d", + "foo/a", + "foo/b", + "foo/d/in", + } + + return &setupEnv{ + ctx: ctx, + driver: d, + expected: expected, + registry: registry, + } +} + +func TestCatalog(t *testing.T) { + env := setupFS(t) + + p := make([]string, 50) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + + if !testEq(p, env.expected, numFilled) { + t.Errorf("Expected catalog repos err") + } + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } +} + +func TestCatalogInParts(t *testing.T) { + env := setupFS(t) + + chunkLen := 2 + p := make([]string, chunkLen) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[0:chunkLen], numFilled) { + t.Errorf("Expected catalog first chunk err") + } + + lastRepo := p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { + t.Errorf("Expected catalog second chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } + + if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) { + t.Errorf("Expected catalog third chunk err") + } + +} + +func testEq(a, b []string, size int) bool { + for cnt := 0; cnt < size-1; cnt++ { + if a[cnt] != b[cnt] { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go index b985b7a95591..cbb959812dcb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -11,11 +11,12 @@ import ( "strings" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) const driverName = "azure" @@ -67,7 +68,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { realm, ok := parameters[paramRealm] if !ok || fmt.Sprint(realm) == "" { - realm = azure.DefaultBaseUrl + realm = azure.DefaultBaseURL } return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) @@ -75,7 +76,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // New constructs a new Driver with the given Azure Storage Account credentials func New(accountName, accountKey, container, realm string) (*Driver, error) { - api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultApiVersion, true) + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) if err != nil { return nil, err } @@ -88,7 +89,7 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { } d := &driver{ - client: *blobClient, + client: blobClient, container: container} return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil } @@ -99,7 +100,7 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { blob, err := d.client.GetBlob(d.container, path) if err != nil { if is404(err) { @@ -112,13 +113,22 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(path string, contents []byte) error { - return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { + return err + } + if err := d.client.CreateBlockBlob(d.container, path); err != nil { + return err + } + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) + return err } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if !ok { @@ -145,7 +155,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (int64, error) { +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { if blobExists, err := d.client.BlobExists(d.container, path); err != nil { return 0, err } else if !blobExists { @@ -166,7 +176,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (int64 // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { // Check if the path is a blob if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err @@ -215,7 +225,7 @@ func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. -func (d *driver) List(path string) ([]string, error) { +func (d *driver) List(ctx context.Context, path string) ([]string, error) { if path == "/" { path = "" } @@ -231,8 +241,8 @@ func (d *driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) if err != nil { if is404(err) { @@ -245,7 +255,7 @@ func (d *driver) Move(sourcePath string, destPath string) error { } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { +func (d *driver) Delete(ctx context.Context, path string) error { ok, err := d.client.DeleteBlobIfExists(d.container, path) if err != nil { return err @@ -275,7 +285,7 @@ func (d *driver) Delete(path string) error { // URLFor returns a publicly accessible URL for the blob stored at given path // for specified duration by making use of Azure Storage Shared Access Signatures (SAS). // See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration expires, ok := options["expiry"] if ok { @@ -351,6 +361,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { } func is404(err error) bool { - e, ok := err.(azure.StorageServiceError) + e, ok := err.(azure.AzureStorageServiceError) return ok && e.StatusCode == http.StatusNotFound } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go index 4990ba19b284..4a0661b3e606 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go @@ -59,11 +59,5 @@ func init() { return "" } - testsuites.RegisterInProcessSuite(azureDriverConstructor, skipCheck) - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // paramAccountName: accountName, - // paramAccountKey: accountKey, - // paramContainer: container, - // paramRealm: realm, - // }, skipCheck) + testsuites.RegisterSuite(azureDriverConstructor, skipCheck) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go index 10b2bf216e06..1c1df899ce30 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) // azureBlockStorage is adaptor between azure.BlobStorageClient and diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go index c29b4742c4d9..7ce4719577ff 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go @@ -6,7 +6,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) type StorageSimulator struct { @@ -122,12 +122,12 @@ func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.B var blockIDs []string for _, v := range blocks { - bl, ok := bb.blocks[v.Id] + bl, ok := bb.blocks[v.ID] if !ok { // check if block ID exists - return fmt.Errorf("Block id '%s' not found", v.Id) + return fmt.Errorf("Block id '%s' not found", v.ID) } bl.committed = true - blockIDs = append(blockIDs, v.Id) + blockIDs = append(blockIDs, v.ID) } // Mark all other blocks uncommitted diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go index f6bda6a86d7e..776c7cd593c6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go @@ -7,7 +7,7 @@ import ( "sync" "time" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) type blockIDGenerator struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go index 6569e15d7372..aab70202a907 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) func Test_blockIdGenerator(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go index b570d5593a76..f18692d0b8d7 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go @@ -5,7 +5,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) // blockStorage is the interface required from a block storage service @@ -75,7 +75,7 @@ func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chu // Use existing block list var existingBlocks []azure.Block for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) } blockList = append(existingBlocks, blockList...) } @@ -111,7 +111,7 @@ func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.R if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { return newBlocks, nn, err } - newBlocks = append(newBlocks, azure.Block{Id: blockID, Status: azure.BlockStatusUncommitted}) + newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) } return newBlocks, nn, nil } @@ -131,7 +131,7 @@ func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset in for _, v := range bx.CommittedBlocks { blkSize := int64(v.Size) if o >= blkSize { // use existing block - left = append(left, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) o -= blkSize elapsed += blkSize } else if o > 0 { // current block needs to be splitted @@ -150,7 +150,7 @@ func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset in if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { return left, err } - left = append(left, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) + left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) break } } @@ -177,7 +177,7 @@ func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset i ) if bs > re { // take the block as is - right = append(right, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) } else if be > re { // current block needs to be splitted part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) if err != nil { @@ -192,7 +192,7 @@ func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset i if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { return right, err } - right = append(right, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) + right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) } elapsed += int64(v.Size) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go index 2c7480dbf98d..32c2509e4a15 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) func TestRandomWriter_writeChunkToBlocks(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go index 8fa747dd6678..60af06b86c3a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -51,32 +51,32 @@ type Base struct { } // GetContent wraps GetContent of underlying storage driver. -func (base *Base) GetContent(path string) ([]byte, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.GetContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.GetContent(path) + return base.StorageDriver.GetContent(ctx, path) } // PutContent wraps PutContent of underlying storage driver. -func (base *Base) PutContent(path string, content []byte) error { - _, done := context.WithTrace(context.Background()) +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := context.WithTrace(ctx) defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.PutContent(path, content) + return base.StorageDriver.PutContent(ctx, path, content) } // ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) if offset < 0 { @@ -87,12 +87,12 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.ReadStream(path, offset) + return base.StorageDriver.ReadStream(ctx, path, offset) } // WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + ctx, done := context.WithTrace(ctx) defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) if offset < 0 { @@ -103,36 +103,36 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i return 0, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.WriteStream(path, offset, reader) + return base.StorageDriver.WriteStream(ctx, path, offset, reader) } // Stat wraps Stat of underlying storage driver. -func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.Stat(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.Stat(path) + return base.StorageDriver.Stat(ctx, path) } // List wraps List of underlying storage driver. -func (base *Base) List(path string) ([]string, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.List(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.List(path) + return base.StorageDriver.List(ctx, path) } // Move wraps Move of underlying storage driver. -func (base *Base) Move(sourcePath string, destPath string) error { - _, done := context.WithTrace(context.Background()) +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := context.WithTrace(ctx) defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { @@ -141,29 +141,29 @@ func (base *Base) Move(sourcePath string, destPath string) error { return storagedriver.InvalidPathError{Path: destPath} } - return base.StorageDriver.Move(sourcePath, destPath) + return base.StorageDriver.Move(ctx, sourcePath, destPath) } // Delete wraps Delete of underlying storage driver. -func (base *Base) Delete(path string) error { - _, done := context.WithTrace(context.Background()) +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := context.WithTrace(ctx) defer done("%s.Delete(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.Delete(path) + return base.StorageDriver.Delete(ctx, path) } // URLFor wraps URLFor of underlying storage driver. -func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.URLFor(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.URLFor(path, options) + return base.StorageDriver.URLFor(ctx, path, options) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go index 66d160f385c1..e84f0026bdcd 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go @@ -33,30 +33,14 @@ func Register(name string, factory StorageDriverFactory) { driverFactories[name] = factory } -// Create a new storagedriver.StorageDriver with the given name and parameters -// To run in-process, the StorageDriverFactory must first be registered with the given name -// If no in-process drivers are found with the given name, this attempts to create an IPC driver -// If no in-process or external drivers are found, an InvalidStorageDriverError is returned +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { driverFactory, ok := driverFactories[name] if !ok { return nil, InvalidStorageDriverError{name} - - // NOTE(stevvooe): We are disabling storagedriver ipc for now, as the - // server and client need to be updated for the changed API calls and - // there were some problems libchan hanging. We'll phase this - // functionality back in over the next few weeks. - - // No registered StorageDriverFactory found, try ipc - // driverClient, err := ipc.NewDriverClient(name, parameters) - // if err != nil { - // return nil, InvalidStorageDriverError{name} - // } - // err = driverClient.Start() - // if err != nil { - // return nil, err - // } - // return driverClient, nil } return driverFactory.Create(parameters) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go index 9ffe08887e98..d5d8708cba3f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go @@ -9,13 +9,14 @@ import ( "path" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) const driverName = "filesystem" -const defaultRootDirectory = "/tmp/registry/storage" +const defaultRootDirectory = "/var/lib/registry" func init() { factory.Register(driverName, &filesystemDriverFactory{}) @@ -76,8 +77,8 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { - rc, err := d.ReadStream(path, 0) +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } @@ -92,8 +93,8 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(subPath string, contents []byte) error { - if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil { +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { return err } @@ -102,7 +103,7 @@ func (d *driver) PutContent(subPath string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { @@ -126,7 +127,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.Reader at a location // designated by the given path. -func (d *driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) { +func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { // TODO(stevvooe): This needs to be a requirement. // if !path.IsAbs(subPath) { // return fmt.Errorf("absolute path required: %q", subPath) @@ -162,7 +163,7 @@ func (d *driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. -func (d *driver) Stat(subPath string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { fullPath := d.fullPath(subPath) fi, err := os.Stat(fullPath) @@ -182,7 +183,7 @@ func (d *driver) Stat(subPath string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. -func (d *driver) List(subPath string) ([]string, error) { +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { if subPath[len(subPath)-1] != '/' { subPath += "/" } @@ -213,7 +214,7 @@ func (d *driver) List(subPath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { source := d.fullPath(sourcePath) dest := d.fullPath(destPath) @@ -230,7 +231,7 @@ func (d *driver) Move(sourcePath string, destPath string) error { } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(subPath string) error { +func (d *driver) Delete(ctx context.Context, subPath string) error { fullPath := d.fullPath(subPath) _, err := os.Stat(fullPath) @@ -246,7 +247,7 @@ func (d *driver) Delete(subPath string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go index 8572de16e763..8b48b4312139 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go @@ -20,10 +20,7 @@ func init() { } defer os.Remove(root) - testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return New(root), nil }, testsuites.NeverSkip) - - // BUG(stevvooe): IPC is broken so we're disabling for now. Will revisit later. - // testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": root}, testsuites.NeverSkip) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go index e0694de2e983..2d121e1cf682 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -69,11 +70,11 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() - rc, err := d.ReadStream(path, 0) + rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(p string, contents []byte) error { +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -102,7 +103,7 @@ func (d *driver) PutContent(p string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -126,7 +127,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { d.mutex.Lock() defer d.mutex.Unlock() @@ -167,7 +168,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (nn in } // Stat returns info about the provided path. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -193,7 +194,7 @@ func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. -func (d *driver) List(path string) ([]string, error) { +func (d *driver) List(ctx context.Context, path string) ([]string, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -223,7 +224,7 @@ func (d *driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -239,7 +240,7 @@ func (d *driver) Move(sourcePath string, destPath string) error { } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { +func (d *driver) Delete(ctx context.Context, path string) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -256,6 +257,6 @@ func (d *driver) Delete(path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go index a02ff23e3283..dbc1916f958d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go @@ -5,7 +5,6 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - "gopkg.in/check.v1" ) @@ -16,9 +15,5 @@ func init() { inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { return New(), nil } - testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) - - // BUG(stevvooe): Disable flaky IPC tests for now when we can troubleshoot - // the problems with libchan. - // testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/client.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/client.go deleted file mode 100644 index daa823d7e902..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/client.go +++ /dev/null @@ -1,454 +0,0 @@ -// +build ignore - -package ipc - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "syscall" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" - "github.com/docker/libchan/spdy" -) - -// StorageDriverExecutablePrefix is the prefix which the IPC storage driver -// loader expects driver executables to begin with. For example, the s3 driver -// should be named "registry-storagedriver-s3". -const StorageDriverExecutablePrefix = "registry-storagedriver-" - -// StorageDriverClient is a storagedriver.StorageDriver implementation using a -// managed child process communicating over IPC using libchan with a unix domain -// socket -type StorageDriverClient struct { - subprocess *exec.Cmd - exitChan chan error - exitErr error - stopChan chan struct{} - socket *os.File - transport *spdy.Transport - sender libchan.Sender - version storagedriver.Version -} - -// NewDriverClient constructs a new out-of-process storage driver using the -// driver name and configuration parameters -// A user must call Start on this driver client before remote method calls can -// be made -// -// Looks for drivers in the following locations in order: -// - Storage drivers directory (to be determined, yet not implemented) -// - $GOPATH/bin -// - $PATH -func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) { - paramsBytes, err := json.Marshal(parameters) - if err != nil { - return nil, err - } - - driverExecName := StorageDriverExecutablePrefix + name - driverPath, err := exec.LookPath(driverExecName) - if err != nil { - return nil, err - } - - command := exec.Command(driverPath, string(paramsBytes)) - - return &StorageDriverClient{ - subprocess: command, - }, nil -} - -// Start starts the designated child process storage driver and binds a socket -// to this process for IPC method calls -func (driver *StorageDriverClient) Start() error { - driver.exitErr = nil - driver.exitChan = make(chan error) - driver.stopChan = make(chan struct{}) - - fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) - if err != nil { - return err - } - - childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket") - driver.socket = os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") - - driver.subprocess.Stdout = os.Stdout - driver.subprocess.Stderr = os.Stderr - driver.subprocess.ExtraFiles = []*os.File{childSocket} - - if err = driver.subprocess.Start(); err != nil { - driver.Stop() - return err - } - - go driver.handleSubprocessExit() - - if err = childSocket.Close(); err != nil { - driver.Stop() - return err - } - - connection, err := net.FileConn(driver.socket) - if err != nil { - driver.Stop() - return err - } - driver.transport, err = spdy.NewClientTransport(connection) - if err != nil { - driver.Stop() - return err - } - driver.sender, err = driver.transport.NewSendChannel() - if err != nil { - driver.Stop() - return err - } - - // Check the driver's version to determine compatibility - receiver, remoteSender := libchan.Pipe() - err = driver.sender.Send(&Request{Type: "Version", ResponseChannel: remoteSender}) - if err != nil { - driver.Stop() - return err - } - - var response VersionResponse - err = receiver.Receive(&response) - if err != nil { - driver.Stop() - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - driver.version = response.Version - - if driver.version.Major() != storagedriver.CurrentVersion.Major() || driver.version.Minor() > storagedriver.CurrentVersion.Minor() { - return IncompatibleVersionError{driver.version} - } - - return nil -} - -// Stop stops the child process storage driver -// storagedriver.StorageDriver methods called after Stop will fail -func (driver *StorageDriverClient) Stop() error { - var closeSenderErr, closeTransportErr, closeSocketErr, killErr error - - if driver.sender != nil { - closeSenderErr = driver.sender.Close() - } - if driver.transport != nil { - closeTransportErr = driver.transport.Close() - } - if driver.socket != nil { - closeSocketErr = driver.socket.Close() - } - if driver.subprocess != nil { - killErr = driver.subprocess.Process.Kill() - } - if driver.stopChan != nil { - close(driver.stopChan) - } - - if closeSenderErr != nil { - return closeSenderErr - } else if closeTransportErr != nil { - return closeTransportErr - } else if closeSocketErr != nil { - return closeSocketErr - } - - return killErr -} - -// Implement the storagedriver.StorageDriver interface over IPC - -// GetContent retrieves the content stored at "path" as a []byte. -func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "GetContent", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ReadStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - defer response.Reader.Close() - contents, err := ioutil.ReadAll(response.Reader) - if err != nil { - return nil, err - } - return contents, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - - params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))} - err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(WriteStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (driver *StorageDriverClient) ReadStream(path string, offset int64) (io.ReadCloser, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset} - err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ReadStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - return response.Reader, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (driver *StorageDriverClient) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": reader} - err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(WriteStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// CurrentSize retrieves the curernt size in bytes of the object at the given -// path. -func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { - if err := driver.exited(); err != nil { - return 0, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return 0, err - } - - response := new(CurrentSizeResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return 0, err - } - - if response.Error != nil { - return 0, response.Error.Unwrap() - } - - return response.Position, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (driver *StorageDriverClient) List(path string) ([]string, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ListResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - return response.Keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath} - err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(MoveResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (driver *StorageDriverClient) Delete(path string) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(DeleteResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// handleSubprocessExit populates the exit channel until we have explicitly -// stopped the storage driver subprocess -// Requests can select on driver.exitChan and response receiving and not hang if -// the process exits -func (driver *StorageDriverClient) handleSubprocessExit() { - exitErr := driver.subprocess.Wait() - if exitErr == nil { - exitErr = fmt.Errorf("Storage driver subprocess already exited cleanly") - } else { - exitErr = fmt.Errorf("Storage driver subprocess exited with error: %s", exitErr) - } - - driver.exitErr = exitErr - - for { - select { - case driver.exitChan <- exitErr: - case <-driver.stopChan: - close(driver.exitChan) - return - } - } -} - -// receiveResponse populates the response value with the next result from the -// given receiver, or returns an error if receiving failed or the driver has -// stopped -func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { - receiveChan := make(chan error, 1) - go func(receiver libchan.Receiver, receiveChan chan<- error) { - receiveChan <- receiver.Receive(response) - }(receiver, receiveChan) - - var err error - var ok bool - select { - case err = <-receiveChan: - case err, ok = <-driver.exitChan: - if !ok { - err = driver.exitErr - } - } - - return err -} - -// exited returns an exit error if the driver has exited or nil otherwise -func (driver *StorageDriverClient) exited() error { - select { - case err, ok := <-driver.exitChan: - if !ok { - return driver.exitErr - } - return err - default: - return nil - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/ipc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/ipc.go deleted file mode 100644 index dabb834de17c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/ipc.go +++ /dev/null @@ -1,148 +0,0 @@ -// +build ignore - -package ipc - -import ( - "fmt" - "io" - "reflect" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" -) - -// StorageDriver is the interface which IPC storage drivers must implement. As external storage -// drivers may be defined to use a different version of the storagedriver.StorageDriver interface, -// we use an additional version check to determine compatiblity. -type StorageDriver interface { - // Version returns the storagedriver.StorageDriver interface version which this storage driver - // implements, which is used to determine driver compatibility - Version() (storagedriver.Version, error) -} - -// IncompatibleVersionError is returned when a storage driver is using an incompatible version of -// the storagedriver.StorageDriver api -type IncompatibleVersionError struct { - version storagedriver.Version -} - -func (e IncompatibleVersionError) Error() string { - return fmt.Sprintf("Incompatible storage driver version: %s", e.version) -} - -// Request defines a remote method call request -// A return value struct is to be sent over the ResponseChannel -type Request struct { - Type string `codec:",omitempty"` - Parameters map[string]interface{} `codec:",omitempty"` - ResponseChannel libchan.Sender `codec:",omitempty"` -} - -// ResponseError is a serializable error type. -// The Type and Parameters may be used to reconstruct the same error on the -// client side, falling back to using the Type and Message if this cannot be -// done. -type ResponseError struct { - Type string `codec:",omitempty"` - Message string `codec:",omitempty"` - Parameters map[string]interface{} `codec:",omitempty"` -} - -// WrapError wraps an error in a serializable struct containing the error's type -// and message. -func WrapError(err error) *ResponseError { - if err == nil { - return nil - } - v := reflect.ValueOf(err) - re := ResponseError{ - Type: v.Type().String(), - Message: err.Error(), - } - - if v.Kind() == reflect.Struct { - re.Parameters = make(map[string]interface{}) - for i := 0; i < v.NumField(); i++ { - field := v.Type().Field(i) - re.Parameters[field.Name] = v.Field(i).Interface() - } - } - return &re -} - -// Unwrap returns the underlying error if it can be reconstructed, or the -// original ResponseError otherwise. -func (err *ResponseError) Unwrap() error { - var errVal reflect.Value - var zeroVal reflect.Value - - switch err.Type { - case "storagedriver.PathNotFoundError": - errVal = reflect.ValueOf(&storagedriver.PathNotFoundError{}) - case "storagedriver.InvalidOffsetError": - errVal = reflect.ValueOf(&storagedriver.InvalidOffsetError{}) - } - if errVal == zeroVal { - return err - } - - for k, v := range err.Parameters { - fieldVal := errVal.Elem().FieldByName(k) - if fieldVal == zeroVal { - return err - } - fieldVal.Set(reflect.ValueOf(v)) - } - - if unwrapped, ok := errVal.Elem().Interface().(error); ok { - return unwrapped - } - - return err - -} - -func (err *ResponseError) Error() string { - return fmt.Sprintf("%s: %s", err.Type, err.Message) -} - -// IPC method call response object definitions - -// VersionResponse is a response for a Version request -type VersionResponse struct { - Version storagedriver.Version `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// ReadStreamResponse is a response for a ReadStream request -type ReadStreamResponse struct { - Reader io.ReadCloser `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// WriteStreamResponse is a response for a WriteStream request -type WriteStreamResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -// CurrentSizeResponse is a response for a CurrentSize request -type CurrentSizeResponse struct { - Position uint64 `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// ListResponse is a response for a List request -type ListResponse struct { - Keys []string `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// MoveResponse is a response for a Move request -type MoveResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -// DeleteResponse is a response for a Delete request -type DeleteResponse struct { - Error *ResponseError `codec:",omitempty"` -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/server.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/server.go deleted file mode 100644 index 1752f12baeaf..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/ipc/server.go +++ /dev/null @@ -1,178 +0,0 @@ -// +build ignore - -package ipc - -import ( - "bytes" - "io" - "io/ioutil" - "net" - "os" - "reflect" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" - "github.com/docker/libchan/spdy" -) - -// StorageDriverServer runs a new IPC server handling requests for the given -// storagedriver.StorageDriver -// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in -// client.go -// -// To create a new out-of-process driver, create a main package which calls StorageDriverServer with -// a storagedriver.StorageDriver -func StorageDriverServer(driver storagedriver.StorageDriver) error { - childSocket := os.NewFile(3, "childSocket") - defer childSocket.Close() - conn, err := net.FileConn(childSocket) - if err != nil { - panic(err) - } - defer conn.Close() - if transport, err := spdy.NewServerTransport(conn); err != nil { - panic(err) - } else { - for { - receiver, err := transport.WaitReceiveChannel() - if err == io.EOF { - return nil - } else if err != nil { - panic(err) - } - go receive(driver, receiver) - } - } -} - -// receive receives new storagedriver.StorageDriver method requests and creates a new goroutine to -// handle each request -// Requests are expected to be of type ipc.Request as the parameters are unknown until the request -// type is deserialized -func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { - for { - var request Request - err := receiver.Receive(&request) - if err == io.EOF { - return - } else if err != nil { - panic(err) - } - go handleRequest(driver, request) - } -} - -// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go -// Responds to requests using the Request.ResponseChannel -func handleRequest(driver storagedriver.StorageDriver, request Request) { - switch request.Type { - case "Version": - err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion}) - if err != nil { - panic(err) - } - case "GetContent": - path, _ := request.Parameters["Path"].(string) - content, err := driver.GetContent(path) - var response ReadStreamResponse - if err != nil { - response = ReadStreamResponse{Error: WrapError(err)} - } else { - response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))} - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "PutContent": - path, _ := request.Parameters["Path"].(string) - reader, _ := request.Parameters["Reader"].(io.ReadCloser) - contents, err := ioutil.ReadAll(reader) - defer reader.Close() - if err == nil { - err = driver.PutContent(path, contents) - } - response := WriteStreamResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "ReadStream": - path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be converted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - reader, err := driver.ReadStream(path, offset) - var response ReadStreamResponse - if err != nil { - response = ReadStreamResponse{Error: WrapError(err)} - } else { - response = ReadStreamResponse{Reader: reader} - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "WriteStream": - path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be converted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - // Depending on serialization method, Size may be converted to any int/uint type - size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() - reader, _ := request.Parameters["Reader"].(io.ReadCloser) - err := driver.WriteStream(path, offset, size, reader) - response := WriteStreamResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "CurrentSize": - path, _ := request.Parameters["Path"].(string) - position, err := driver.CurrentSize(path) - response := CurrentSizeResponse{ - Position: position, - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "List": - path, _ := request.Parameters["Path"].(string) - keys, err := driver.List(path) - response := ListResponse{ - Keys: keys, - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "Move": - sourcePath, _ := request.Parameters["SourcePath"].(string) - destPath, _ := request.Parameters["DestPath"].(string) - err := driver.Move(sourcePath, destPath) - response := MoveResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "Delete": - path, _ := request.Parameters["Path"].(string) - err := driver.Delete(path) - response := DeleteResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - default: - panic(request) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go index aee068a5e629..31c00afc8df5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -98,12 +98,12 @@ type S3BucketKeyer interface { // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]interface{}) (string, error) { +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { // TODO(endophage): currently only supports S3 keyer, ok := lh.StorageDriver.(S3BucketKeyer) if !ok { - context.GetLogger(context.Background()).Warn("the CloudFront middleware does not support this backend storage driver") - return lh.StorageDriver.URLFor(path, options) + context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) } cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go new file mode 100644 index 000000000000..d1bc932f8266 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go new file mode 100644 index 000000000000..cec320262b23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -0,0 +1,813 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// +build include_oss + +package oss + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/distribution/context" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyID string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + Endpoint string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] + if !ok { + return nil, fmt.Errorf("No accesskeyid parameter provided") + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + return nil, fmt.Errorf("No accesskeysecret parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The internal parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + + params := DriverParameters{ + AccessKeyID: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) + bucket := client.Bucket(params.Bucket) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + return nil, parseError(path, err) + } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []oss.Part{} + var part oss.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the OSS + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying OSS library should handle it, it doesn't seem to + // be part of the shouldRetry function (see denverdino/aliyungo/oss). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part oss.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! + } + + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the OSS package does not. We may add oss + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to OSS slows to a crawl. If the RequestTimeout + // ends up getting added to the OSS library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *oss.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) + } + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.ossPath(path), nil) + if err != nil { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Path(d.ossPath(path))) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{}, + d.Bucket.Path(d.ossPath(path))) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), + oss.CopyOptions{ + //Options: d.getOptions(), + //ContentType: d.getContentType() + }, + d.Bucket.Path(d.ossPath(sourcePath))) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + logrus.Infof("expiresTime: %d", expiresTime) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("expiresTime: %d", expiresTime) + testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("testURL: %s", testURL) + return testURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the OSS bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).ossPath(path) +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go new file mode 100644 index 000000000000..fbae5d9ca5cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go @@ -0,0 +1,144 @@ +// +build include_oss + +package oss + +import ( + "io/ioutil" + + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + //"log" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go new file mode 100644 index 000000000000..655c68a33db8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go @@ -0,0 +1,3 @@ +// Package rados implements the rados storage driver backend. Support can be +// enabled by including the "include_rados" build tag. +package rados diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go new file mode 100644 index 000000000000..b2e6590d7066 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go @@ -0,0 +1,632 @@ +// +build include_rados + +package rados + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "path" + "strconv" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/uuid" + "github.com/noahdesu/go-ceph/rados" +) + +const driverName = "rados" + +// Prefix all the stored blob +const objectBlobPrefix = "blob:" + +// Stripes objects size to 4M +const defaultChunkSize = 4 << 20 +const defaultXattrTotalSizeName = "total-size" + +// Max number of keys fetched from omap at each read operation +const defaultKeysFetched = 1 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + poolname string + username string + chunksize uint64 +} + +func init() { + factory.Register(driverName, &radosDriverFactory{}) +} + +// radosDriverFactory implements the factory.StorageDriverFactory interface +type radosDriverFactory struct{} + +func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *rados.Conn + Ioctx *rados.IOContext + chunksize uint64 +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - poolname: the ceph pool name +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + + pool, ok := parameters["poolname"] + if !ok { + return nil, fmt.Errorf("No poolname parameter provided") + } + + username, ok := parameters["username"] + if !ok { + username = "" + } + + chunksize := uint64(defaultChunkSize) + chunksizeParam, ok := parameters["chunksize"] + if ok { + chunksize, ok = chunksizeParam.(uint64) + if !ok { + return nil, fmt.Errorf("The chunksize parameter should be a number") + } + } + + params := DriverParameters{ + fmt.Sprint(pool), + fmt.Sprint(username), + chunksize, + } + + return New(params) +} + +// New constructs a new Driver +func New(params DriverParameters) (*Driver, error) { + var conn *rados.Conn + var err error + + if params.username != "" { + log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) + conn, err = rados.NewConnWithUser(params.username) + } else { + log.Infof("Opening connection to pool %s", params.poolname) + conn, err = rados.NewConn() + } + + if err != nil { + return nil, err + } + + err = conn.ReadDefaultConfigFile() + if err != nil { + return nil, err + } + + err = conn.Connect() + if err != nil { + return nil, err + } + + log.Infof("Connected") + + ioctx, err := conn.OpenIOContext(params.poolname) + + log.Infof("Connected to pool %s", params.poolname) + + if err != nil { + return nil, err + } + + d := &driver{ + Ioctx: ioctx, + Conn: conn, + chunksize: params.chunksize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +type readStreamReader struct { + driver *driver + oid string + size uint64 + offset uint64 +} + +func (r *readStreamReader) Read(b []byte) (n int, err error) { + // Determine the part available to read + bufferOffset := uint64(0) + bufferSize := uint64(len(b)) + + // End of the object, read less than the buffer size + if bufferSize > r.size-r.offset { + bufferSize = r.size - r.offset + } + + // Fill `b` + for bufferOffset < bufferSize { + // Get the offset in the object chunk + chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) + + // Determine the best size to read + bufferEndOffset := bufferSize + if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { + bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) + } + + // Read the chunk + n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) + + if err != nil { + return int(bufferOffset), err + } + + bufferOffset += uint64(n) + r.offset += uint64(n) + } + + // EOF if the offset is at the end of the object + if r.offset == r.size { + return int(bufferOffset), io.EOF + } + + return int(bufferOffset), nil +} + +func (r *readStreamReader) Close() error { + return nil +} + +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // get object stat + stat, err := d.Stat(ctx, path) + + if err != nil { + return nil, err + } + + if offset > stat.Size() { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return &readStreamReader{ + driver: d, + oid: oid, + size: uint64(stat.Size()), + offset: uint64(offset), + }, nil +} + +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + buf := make([]byte, d.chunksize) + totalRead = 0 + + oid, err := d.getOid(path) + if err != nil { + switch err.(type) { + // Trying to write new object, generate new blob identifier for it + case storagedriver.PathNotFoundError: + oid = d.generateOid() + err = d.putOid(path, oid) + if err != nil { + return 0, err + } + default: + return 0, err + } + } else { + // Check total object size only for existing ones + totalSize, err := d.getXattrTotalSize(ctx, oid) + if err != nil { + return 0, err + } + + // If offset if after the current object size, fill the gap with zeros + for totalSize < uint64(offset) { + sizeToWrite := d.chunksize + if totalSize-uint64(offset) < sizeToWrite { + sizeToWrite = totalSize - uint64(offset) + } + + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) + err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) + if err != nil { + return totalRead, err + } + + totalSize += sizeToWrite + } + } + + // Writer + for { + // Align to chunk size + sizeRead := uint64(0) + sizeToRead := uint64(offset+totalRead) % d.chunksize + if sizeToRead == 0 { + sizeToRead = d.chunksize + } + + // Read from `reader` + for sizeRead < sizeToRead { + nn, err := reader.Read(buf[sizeRead:sizeToRead]) + sizeRead += uint64(nn) + + if err != nil { + if err != io.EOF { + return totalRead, err + } + + break + } + } + + // End of file and nothing was read + if sizeRead == 0 { + break + } + + // Write chunk object + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) + err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) + + if err != nil { + return totalRead, err + } + + // Update total object size as xattr in the first chunk of the object + err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) + if err != nil { + return totalRead, err + } + + totalRead += int64(sizeRead) + + // End of file + if sizeRead < sizeToRead { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // the path is a virtual directory? + if oid == "" { + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: 0, + IsDir: true, + }, + }, nil + } + + // stat first chunk + stat, err := d.Ioctx.Stat(oid + "-0") + + if err != nil { + return nil, err + } + + // get total size of chunked object + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(totalSize), + ModTime: stat.ModTime, + }, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { + files, err := d.listDirectoryOid(dirPath) + + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(files)) + for k := range files { + if k != dirPath { + keys = append(keys, path.Join(dirPath, k)) + } + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + // Get oid + oid, err := d.getOid(sourcePath) + + if err != nil { + return err + } + + // Move reference + err = d.putOid(destPath, oid) + + if err != nil { + return err + } + + // Delete old reference + err = d.deleteOid(sourcePath) + + if err != nil { + return err + } + + return nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, objectPath string) error { + // Get oid + oid, err := d.getOid(objectPath) + + if err != nil { + return err + } + + // Deleting virtual directory + if oid == "" { + objects, err := d.listDirectoryOid(objectPath) + if err != nil { + return err + } + + for object := range objects { + err = d.Delete(ctx, path.Join(objectPath, object)) + if err != nil { + return err + } + } + } else { + // Delete object chunks + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return err + } + + for offset := uint64(0); offset < totalSize; offset += d.chunksize { + chunkName, _ := d.getChunkNameFromOffset(oid, offset) + + err = d.Ioctx.Delete(chunkName) + if err != nil { + return err + } + } + + // Delete reference + err = d.deleteOid(objectPath) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +// Generate a blob identifier +func (d *driver) generateOid() string { + return objectBlobPrefix + uuid.Generate().String() +} + +// Reference a object and its hierarchy +func (d *driver) putOid(objectPath string, oid string) error { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + createParentReference := true + + // After creating this reference, skip the parents referencing since the + // hierarchy already exists + if oid == "" { + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + if (err == nil) && (len(firstReference) > 0) { + createParentReference = false + } + } + + oids := map[string][]byte{ + base: []byte(oid), + } + + // Reference object + err := d.Ioctx.SetOmap(directory, oids) + if err != nil { + return err + } + + // Esure parent virtual directories + if createParentReference { + return d.putOid(directory, "") + } + + return nil +} + +// Get the object identifier from an object name +func (d *driver) getOid(objectPath string) (string, error) { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + + files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) + + if (err != nil) || (files[base] == nil) { + return "", storagedriver.PathNotFoundError{Path: objectPath} + } + + return string(files[base]), nil +} + +// List the objects of a virtual directory +func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { + return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) +} + +// Remove a file from the files hierarchy +func (d *driver) deleteOid(objectPath string) error { + // Remove object reference + directory := path.Dir(objectPath) + base := path.Base(objectPath) + err := d.Ioctx.RmOmapKeys(directory, []string{base}) + + if err != nil { + return err + } + + // Remove virtual directory if empty (no more references) + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + + if err != nil { + return err + } + + if len(firstReference) == 0 { + // Delete omap + err := d.Ioctx.Delete(directory) + + if err != nil { + return err + } + + // Remove reference on parent omaps + if directory != "" { + return d.deleteOid(directory) + } + } + + return nil +} + +// Takes an offset in an chunked object and return the chunk name and a new +// offset in this chunk object +func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { + chunkID := offset / d.chunksize + chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) + chunkedOffset := offset % d.chunksize + return chunkedOid, chunkedOffset +} + +// Set the total size of a chunked object `oid` +func (d *driver) setXattrTotalSize(oid string, size uint64) error { + // Convert uint64 `size` to []byte + xattr := make([]byte, binary.MaxVarintLen64) + binary.LittleEndian.PutUint64(xattr, size) + + // Save the total size as a xattr in the first chunk + return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) +} + +// Get the total size of the chunked object `oid` stored as xattr +func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { + // Fetch xattr as []byte + xattr := make([]byte, binary.MaxVarintLen64) + xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) + + if err != nil { + return 0, err + } + + if xattrLength != len(xattr) { + context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) + return 0, storagedriver.PathNotFoundError{Path: oid} + } + + // Convert []byte as uint64 + totalSize := binary.LittleEndian.Uint64(xattr) + + return totalSize, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go new file mode 100644 index 000000000000..ce367fb56684 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go @@ -0,0 +1,40 @@ +// +build include_rados + +package rados + +import ( + "os" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + poolname := os.Getenv("RADOS_POOL") + username := os.Getenv("RADOS_USER") + + driverConstructor := func() (storagedriver.StorageDriver, error) { + parameters := DriverParameters{ + poolname, + username, + defaultChunkSize, + } + + return New(parameters) + } + + skipCheck := func() string { + if poolname == "" { + return "RADOS_POOL must be set to run Rado tests" + } + return "" + } + + testsuites.RegisterSuite(driverConstructor, skipCheck) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go index 57871b5d6990..552c221d02d3 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go @@ -29,6 +29,8 @@ import ( "github.com/AdRoll/goamz/aws" "github.com/AdRoll/goamz/s3" "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -199,7 +201,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { func New(params DriverParameters) (*Driver, error) { auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) } if !params.Secure { @@ -267,7 +269,7 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Bucket.Get(d.s3Path(path)) if err != nil { return nil, parseError(path, err) @@ -276,13 +278,13 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(path string, contents []byte) error { +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") @@ -304,7 +306,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // returned. May be used to resume writing a stream by providing a nonzero // offset. Offsets past the current size will write from the position // beyond the end of the file. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (totalRead int64, err error) { +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { partNumber := 1 bytesRead := 0 var putErrChan chan error @@ -350,7 +352,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total // Fills from 0 to total from current fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(path, 0) + current, err := d.ReadStream(ctx, path, 0) if err != nil { return err } @@ -638,7 +640,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) if err != nil { return nil, err @@ -671,7 +673,7 @@ func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { } // List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(path string) ([]string, error) { +func (d *driver) List(ctx context.Context, path string) ([]string, error) { if path != "/" && path[len(path)-1] != '/' { path = path + "/" } @@ -716,7 +718,7 @@ func (d *driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { /* This is terrible, but aws doesn't have an actual move. */ _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) @@ -724,11 +726,11 @@ func (d *driver) Move(sourcePath string, destPath string) error { return parseError(sourcePath, err) } - return d.Delete(sourcePath) + return d.Delete(ctx, sourcePath) } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { +func (d *driver) Delete(ctx context.Context, path string) error { listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} @@ -757,7 +759,7 @@ func (d *driver) Delete(path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { methodString := "GET" method, ok := options["method"] if ok { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go index 69543bcb6bdd..70172a6de087 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/AdRoll/goamz/aws" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" @@ -16,7 +17,8 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -type S3DriverConstructor func(rootDirectory string) (*Driver, error) +var s3DriverConstructor func(rootDirectory string) (*Driver, error) +var skipS3 func() string func init() { accessKey := os.Getenv("AWS_ACCESS_KEY") @@ -32,7 +34,7 @@ func init() { } defer os.Remove(root) - s3DriverConstructor := func(rootDirectory string) (*Driver, error) { + s3DriverConstructor = func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) @@ -73,78 +75,64 @@ func init() { } // Skip S3 storage driver tests if environment variable parameters are not provided - skipCheck := func() string { + skipS3 = func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" } return "" } - driverConstructor := func() (storagedriver.StorageDriver, error) { + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return s3DriverConstructor(root) - } - - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) - - // s3Constructor := func() (*Driver, error) { - // return s3DriverConstructor(aws.GetRegion(region)) - // } - - RegisterS3DriverSuite(s3DriverConstructor, skipCheck) - - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // "accesskey": accessKey, - // "secretkey": secretKey, - // "region": region.Name, - // "bucket": bucket, - // "encrypt": encrypt, - // }, skipCheck) - // } -} - -func RegisterS3DriverSuite(s3DriverConstructor S3DriverConstructor, skipCheck testsuites.SkipCheck) { - check.Suite(&S3DriverSuite{ - Constructor: s3DriverConstructor, - SkipCheck: skipCheck, - }) -} - -type S3DriverSuite struct { - Constructor S3DriverConstructor - testsuites.SkipCheck + }, skipS3) } -func (suite *S3DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) } -} -func (suite *S3DriverSuite) TestEmptyRootList(c *check.C) { validRoot, err := ioutil.TempDir("", "driver-") - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } defer os.Remove(validRoot) - rootedDriver, err := suite.Constructor(validRoot) - c.Assert(err, check.IsNil) - emptyRootDriver, err := suite.Constructor("") - c.Assert(err, check.IsNil) - slashRootDriver, err := suite.Constructor("/") - c.Assert(err, check.IsNil) + rootedDriver, err := s3DriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } filename := "/test" contents := []byte("contents") - err = rootedDriver.PutContent(filename, contents) - c.Assert(err, check.IsNil) - defer rootedDriver.Delete(filename) + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) - keys, err := emptyRootDriver.List("/") + keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } - keys, err = slashRootDriver.List("/") + keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go index cda1c37d8f1c..bade099f744c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -7,6 +7,8 @@ import ( "regexp" "strconv" "strings" + + "github.com/docker/distribution/context" ) // Version is a string representing the storage driver version, of the form @@ -42,45 +44,45 @@ type StorageDriver interface { // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. - GetContent(path string) ([]byte, error) + GetContent(ctx context.Context, path string) ([]byte, error) // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. - PutContent(path string, content []byte) error + PutContent(ctx context.Context, path string, content []byte) error // ReadStream retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(path string, offset int64) (io.ReadCloser, error) + ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) // WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. - WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) + WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. - Stat(path string) (FileInfo, error) + Stat(ctx context.Context, path string) (FileInfo, error) // List returns a list of the objects that are direct descendants of the //given path. - List(path string) ([]string, error) + List(ctx context.Context, path string) ([]string, error) // Move moves an object stored at sourcePath to destPath, removing the // original object. // Note: This may be no more efficient than a copy followed by a delete for // many implementations. - Move(sourcePath string, destPath string) error + Move(ctx context.Context, sourcePath string, destPath string) error // Delete recursively deletes all objects stored at "path" and its subpaths. - Delete(path string) error + Delete(ctx context.Context, path string) error // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // May return an ErrUnsupportedMethod in certain StorageDriver // implementations. - URLFor(path string, options map[string]interface{}) (string, error) + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) } // PathRegexp is the regular expression which each file path must match. A diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go new file mode 100644 index 000000000000..0921ccc03edd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -0,0 +1,657 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// Since Swift has no concept of directories (directories are an abstration), +// empty objects are created with the MIME type application/vnd.swift.directory. +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bytes" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + gopath "path" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + Region string + Container string + Prefix string + InsecureSkipVerify bool + ChunkSize int +} + +type swiftInfo map[string]interface{} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + ChunkSize int +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + BulkDeleteSupport: detectBulkDelete(params.AuthURL), + ChunkSize: params.ChunkSize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + var ( + segments []swift.Object + multi io.Reader + paddingReader io.Reader + currentLength int64 + cursor int64 + segmentPath string + ) + + partNumber := 1 + chunkSize := int64(d.ChunkSize) + zeroBuf := make([]byte, d.ChunkSize) + + getSegment := func() string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) + } + + max := func(a int64, b int64) int64 { + if a > b { + return a + } + return b + } + + createManifest := true + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + manifest, ok := headers["X-Object-Manifest"] + if !ok { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { + return 0, err + } + segments = append(segments, info) + } else { + _, segmentPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentPath); err != nil { + return 0, err + } + createManifest = false + } + currentLength = info.Bytes + } else if err == swift.ObjectNotFound { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + } else { + return 0, err + } + + if createManifest { + if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { + return 0, err + } + } + + // First, we skip the existing segments that are not modified by this call + for i := range segments { + if offset < cursor+segments[i].Bytes { + break + } + cursor += segments[i].Bytes + partNumber++ + } + + // We reached the end of the file but we haven't reached 'offset' yet + // Therefore we add blocks of zeros + if offset >= currentLength { + for offset-currentLength >= chunkSize { + // Insert a block a zero + _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + currentLength += chunkSize + partNumber++ + } + + cursor = currentLength + paddingReader = bytes.NewReader(zeroBuf) + } else if offset-cursor > 0 { + // Offset is inside the current segment : we need to read the + // data from the beginning of the segment to offset + file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + defer file.Close() + paddingReader = file + } + + readers := []io.Reader{} + if paddingReader != nil { + readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) + } + readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) + multi = io.MultiReader(readers...) + + writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { + currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + } + return false, bytesRead, err + } + + n, err := io.Copy(currentSegment, multi) + if err != nil { + return false, bytesRead, err + } + + if n > 0 { + defer currentSegment.Close() + bytesRead += n - max(0, offset-cursor) + } + + if n < chunkSize { + // We wrote all the data + if cursor+n < currentLength { + // Copy the end of the chunk + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + _, copyErr := io.Copy(currentSegment, file) + + if err := file.Close(); err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + if copyErr != nil { + return false, bytesRead, copyErr + } + } + + return true, bytesRead, nil + } + + multi = io.LimitReader(reader, chunkSize) + cursor += chunkSize + partNumber++ + + return false, bytesRead, nil + } + + finished := false + read := int64(0) + bytesRead := int64(0) + for finished == false { + finished, read, err = writeSegment(getSegment()) + bytesRead += read + if err != nil { + return bytesRead, err + } + } + + return bytesRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // On Swift 1.12, the 'bytes' field is always 0 + // so we need to do a second HEAD request + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + } + + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + if d.BulkDeleteSupport { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + segContainer, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + + for _, s := range segments { + if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: s.Name} + } + return err + } + } + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return segments, err +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func detectBulkDelete(authURL string) (bulkDelete bool) { + resp, err := http.Get(gopath.Join(authURL, "..", "..") + "/info") + if err == nil { + defer resp.Body.Close() + decoder := json.NewDecoder(resp.Body) + var infos swiftInfo + if decoder.Decode(&infos) == nil { + _, bulkDelete = infos["bulk_delete"] + } + } + return +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go new file mode 100644 index 000000000000..6be2238a5cbc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go @@ -0,0 +1,135 @@ +package swift + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/ncw/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var swiftDriverConstructor func(prefix string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + container string + region string + insecureSkipVerify bool + swiftServer *swifttest.SwiftServer + err error + ) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + prefix, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(prefix) + + swiftDriverConstructor = func(root string) (*Driver, error) { + parameters := Parameters{ + username, + password, + authURL, + tenant, + tenantID, + domain, + domainID, + region, + container, + root, + insecureSkipVerify, + defaultChunkSize, + } + + return New(parameters) + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(prefix) + } + + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) +} + +func TestEmptyRootList(t *testing.T) { + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go index 9f387a6270be..1772560b5317 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "gopkg.in/check.v1" ) @@ -21,48 +22,16 @@ import ( // Test hooks up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -// RegisterInProcessSuite registers an in-process storage driver test suite with +// RegisterSuite registers an in-process storage driver test suite with // the go test runner. -func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { check.Suite(&DriverSuite{ Constructor: driverConstructor, SkipCheck: skipCheck, + ctx: context.Background(), }) } -// RegisterIPCSuite registers a storage driver test suite which runs the named -// driver as a child process with the given parameters. -func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { - panic("ipc testing is disabled for now") - - // NOTE(stevvooe): IPC testing is disabled for now. Uncomment the code - // block before and remove the panic when we phase it back in. - - // suite := &DriverSuite{ - // Constructor: func() (storagedriver.StorageDriver, error) { - // d, err := ipc.NewDriverClient(driverName, ipcParams) - // if err != nil { - // return nil, err - // } - // err = d.Start() - // if err != nil { - // return nil, err - // } - // return d, nil - // }, - // SkipCheck: skipCheck, - // } - // suite.Teardown = func() error { - // if suite.StorageDriver == nil { - // return nil - // } - - // driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) - // return driverClient.Stop() - // } - // check.Suite(suite) -} - // SkipCheck is a function used to determine if a test suite should be skipped. // If a SkipCheck returns a non-empty skip reason, the suite is skipped with // the given reason. @@ -80,14 +49,14 @@ type DriverConstructor func() (storagedriver.StorageDriver, error) type DriverTeardown func() error // DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. -// The intended way to create a DriverSuite is with RegisterInProcessSuite or -// RegisterIPCSuite. +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. type DriverSuite struct { Constructor DriverConstructor Teardown DriverTeardown SkipCheck storagedriver.StorageDriver + ctx context.Context } // SetUpSuite sets up the gocheck test suite. @@ -112,12 +81,20 @@ func (suite *DriverSuite) TearDownSuite(c *check.C) { // This causes the suite to abort if any files are left around in the storage // driver. func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List("/") + files, _ := suite.StorageDriver.List(suite.ctx, "/") if len(files) > 0 { c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) } } +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists(c *check.C) { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + c.Fatalf(`the root path "/" should always exist: %v`, err) + } +} + // TestValidPaths checks that various valid file paths are accepted by the // storage driver. func (suite *DriverSuite) TestValidPaths(c *check.C) { @@ -141,11 +118,11 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { "/Abc/Cba"} for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(filename, contents) - defer suite.StorageDriver.Delete(firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) c.Assert(err, check.IsNil) - received, err := suite.StorageDriver.GetContent(filename) + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) } @@ -164,12 +141,12 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { "/abc_123/"} for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(filename, contents) - defer suite.StorageDriver.Delete(firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - _, err = suite.StorageDriver.GetContent(filename) + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) } @@ -225,7 +202,7 @@ func (suite *DriverSuite) TestTruncate(c *check.C) { // TestReadNonexistent tests reading content from an empty path. func (suite *DriverSuite) TestReadNonexistent(c *check.C) { filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(filename) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -277,18 +254,19 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { } filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) checksum := sha1.New() var fileSize int64 = 5 * 1024 * 1024 * 1024 contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(filename, 0, io.TeeReader(contents, checksum)) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, fileSize) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) + defer reader.Close() writtenChecksum := sha1.New() io.Copy(writtenChecksum, reader) @@ -300,7 +278,7 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { // reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) chunkSize := int64(32) @@ -308,10 +286,10 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { contentsChunk2 := randomContents(chunkSize) contentsChunk3 := randomContents(chunkSize) - err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) c.Assert(err, check.IsNil) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -320,7 +298,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) c.Assert(err, check.IsNil) defer reader.Close() @@ -329,7 +307,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*2) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) c.Assert(err, check.IsNil) defer reader.Close() @@ -338,7 +316,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, contentsChunk3) // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(filename, -1) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) @@ -346,7 +324,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { // Read past the end of the content and make sure we get a reader that // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) c.Assert(err, check.IsNil) defer reader.Close() @@ -356,7 +334,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(n, check.Equals, 0) // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3-1) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) c.Assert(err, check.IsNil) defer reader.Close() @@ -389,7 +367,7 @@ func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) contentsChunk1 := randomContents(chunkSize) contentsChunk2 := randomContents(chunkSize) @@ -399,39 +377,39 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contentsChunk1)) + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - fi, err := suite.StorageDriver.Stat(filename) + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - fi, err = suite.StorageDriver.Stat(filename) + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, 2*chunkSize) // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - fi, err = suite.StorageDriver.Stat(filename) + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, 2*chunkSize) - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) - received, err := suite.StorageDriver.GetContent(filename) + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, fullContents) @@ -443,16 +421,16 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) fullContents = append(fullContents, zeroChunk...) fullContents = append(fullContents, contentsChunk4...) - nn, err = suite.StorageDriver.WriteStream(filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, chunkSize) - fi, err = suite.StorageDriver.Stat(filename) + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - received, err = suite.StorageDriver.GetContent(filename) + received, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(len(received), check.Equals, len(fullContents)) c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) @@ -460,7 +438,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) c.Assert(received, check.DeepEquals, fullContents) // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(filename, -1, bytes.NewReader(zeroChunk)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) @@ -472,11 +450,11 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomPath(32) - _, err := suite.StorageDriver.ReadStream(filename, 0) + _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.ReadStream(filename, 64) + _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -484,27 +462,27 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(rootDirectory) + defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles[i] = childFile - err := suite.StorageDriver.PutContent(childFile, randomContents(32)) + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) c.Assert(err, check.IsNil) } sort.Strings(childFiles) - keys, err := suite.StorageDriver.List("/") + keys, err := suite.StorageDriver.List(suite.ctx, "/") c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - keys, err = suite.StorageDriver.List(rootDirectory) + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - keys, err = suite.StorageDriver.List(parentDirectory) + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) c.Assert(err, check.IsNil) sort.Strings(keys) @@ -523,20 +501,20 @@ func (suite *DriverSuite) TestMove(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(sourcePath)) - defer suite.StorageDriver.Delete(firstPart(destPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - err := suite.StorageDriver.PutContent(sourcePath, contents) + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Move(sourcePath, destPath) + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.IsNil) - received, err := suite.StorageDriver.GetContent(destPath) + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) - _, err = suite.StorageDriver.GetContent(sourcePath) + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -549,23 +527,23 @@ func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { sourceContents := randomContents(32) destContents := randomContents(64) - defer suite.StorageDriver.Delete(firstPart(sourcePath)) - defer suite.StorageDriver.Delete(firstPart(destPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - err := suite.StorageDriver.PutContent(sourcePath, sourceContents) + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.PutContent(destPath, destContents) + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Move(sourcePath, destPath) + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.IsNil) - received, err := suite.StorageDriver.GetContent(destPath) + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, sourceContents) - _, err = suite.StorageDriver.GetContent(sourcePath) + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -577,16 +555,16 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(destPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - err := suite.StorageDriver.PutContent(destPath, contents) + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Move(sourcePath, destPath) + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - received, err := suite.StorageDriver.GetContent(destPath) + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) } @@ -596,12 +574,12 @@ func (suite *DriverSuite) TestMoveInvalid(c *check.C) { contents := randomContents(32) // Create a regular file. - err := suite.StorageDriver.PutContent("/notadir", contents) + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) c.Assert(err, check.IsNil) - defer suite.StorageDriver.Delete("/notadir") + defer suite.StorageDriver.Delete(suite.ctx, "/notadir") // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move("/notadir/foo", "/notadir/bar") + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") c.Assert(err, check.NotNil) // non-nil error } @@ -611,15 +589,15 @@ func (suite *DriverSuite) TestDelete(c *check.C) { filename := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Delete(filename) + err = suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(filename) + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -630,12 +608,12 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { filename := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - url, err := suite.StorageDriver.URLFor(filename, nil) + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) if err == storagedriver.ErrUnsupportedMethod { return } @@ -649,7 +627,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { c.Assert(err, check.IsNil) c.Assert(read, check.DeepEquals, contents) - url, err = suite.StorageDriver.URLFor(filename, map[string]interface{}{"method": "HEAD"}) + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) if err == storagedriver.ErrUnsupportedMethod { return } @@ -663,7 +641,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { // TestDeleteNonexistent checks that removing a nonexistent key fails. func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { filename := randomPath(32) - err := suite.StorageDriver.Delete(filename) + err := suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -676,42 +654,42 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { filename3 := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(firstPart(dirname)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) - err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.PutContent(path.Join(dirname, filename3), contents) + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Delete(path.Join(dirname, filename1)) + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Delete(dirname) + err = suite.StorageDriver.Delete(suite.ctx, dirname) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -723,24 +701,24 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { fileName := randomFilename(32) filePath := path.Join(dirPath, fileName) - defer suite.StorageDriver.Delete(firstPart(dirPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(dirPath) + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(fi, check.IsNil) - fi, err = suite.StorageDriver.Stat(filePath) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(fi, check.IsNil) - err = suite.StorageDriver.PutContent(filePath, content) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) c.Assert(err, check.IsNil) // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(filePath) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Path(), check.Equals, filePath) @@ -751,9 +729,9 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { // Sleep and modify the file time.Sleep(time.Second * 10) content = randomContents(4096) - err = suite.StorageDriver.PutContent(filePath, content) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(filePath) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) @@ -768,7 +746,7 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { } // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(dirPath) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Path(), check.Equals, dirPath) @@ -784,15 +762,15 @@ func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { filename := randomPath(32) contents := randomContents(4096) - defer suite.StorageDriver.Delete(firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(filename, contents) + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - readContents, err := suite.StorageDriver.GetContent(filename) + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } @@ -810,9 +788,9 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { filename := randomPath(32) contents := randomContents(filesize) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) var wg sync.WaitGroup @@ -820,7 +798,7 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { readContents := func() { defer wg.Done() offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(filename, offset) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) @@ -838,10 +816,6 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { // TestConcurrentFileStreams checks that multiple *os.File objects can be passed // in to WriteStream concurrently without hanging. func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - // if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { - // c.Skip("Need to fix out-of-process concurrency") - // } - numStreams := 32 if testing.Short() { @@ -872,7 +846,7 @@ func (suite *DriverSuite) TestEventualConsistency(c *check.C) { } filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) var offset int64 var misswrites int @@ -880,17 +854,17 @@ func (suite *DriverSuite) TestEventualConsistency(c *check.C) { for i := 0; i < 1024; i++ { contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(filename, offset, bytes.NewReader(contents)) + read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) c.Assert(err, check.IsNil) - fi, err := suite.StorageDriver.Stat(filename) + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) // We are most concerned with being able to read data as soon as Stat declares // it is uploaded. This is the strongest guarantee that some drivers (that guarantee // at best eventual consistency) absolutely need to provide. if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(filename, offset) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) @@ -937,15 +911,15 @@ func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { parentDir := randomPath(8) defer func() { c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(filename, randomContents(size)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(filename) + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) } } @@ -975,16 +949,16 @@ func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { parentDir := randomPath(8) defer func() { c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(randomContents(size))) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, size) - rc, err := suite.StorageDriver.ReadStream(filename, 0) + rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) rc.Close() } @@ -1004,17 +978,17 @@ func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { parentDir := randomPath(8) defer func() { c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) c.Assert(err, check.IsNil) } c.ResetTimer() for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(parentDir) + files, err := suite.StorageDriver.List(suite.ctx, parentDir) c.Assert(err, check.IsNil) c.Assert(int64(len(files)), check.Equals, numFiles) } @@ -1033,17 +1007,17 @@ func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { for i := 0; i < c.N; i++ { parentDir := randomPath(8) - defer suite.StorageDriver.Delete(firstPart(parentDir)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) c.StopTimer() for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) c.Assert(err, check.IsNil) } c.StartTimer() // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(firstPart(parentDir)) + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) c.Assert(err, check.IsNil) } } @@ -1055,7 +1029,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { defer tf.Close() filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) contents := randomContents(size) @@ -1065,11 +1039,11 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf.Sync() tf.Seek(0, os.SEEK_SET) - nn, err := suite.StorageDriver.WriteStream(filename, 0, tf) + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, size) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -1080,25 +1054,25 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { } func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - readContents, err := suite.StorageDriver.GetContent(filename) + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contents)) + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go index 65d4347fafd1..b3a5f5203e37 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go @@ -7,8 +7,8 @@ import ( "io" "io/ioutil" "os" - "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -25,10 +25,11 @@ const fileReaderBufferSize = 4 << 20 type fileReader struct { driver storagedriver.StorageDriver + ctx context.Context + // identifying fields - path string - size int64 // size is the total size, must be set. - modtime time.Time // TODO(stevvooe): This is not needed anymore. + path string + size int64 // size is the total size, must be set. // mutable fields rc io.ReadCloser // remote read closer @@ -37,40 +38,17 @@ type fileReader struct { err error // terminal error, if set, reader is closed } -// newFileReader initializes a file reader for the remote file. The read takes -// on the offset and size at the time the reader is created. If the underlying -// file changes, one must create a new fileReader. -func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) { - rd := &fileReader{ +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, driver: driver, path: path, - } - - // Grab the size of the layer file, ensuring existence. - if fi, err := driver.Stat(path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is not - // actually present for the reader. If the caller needs to know - // whether or not the file exists, they should issue a stat call - // on the path. There is still no guarantee, since the file may be - // gone by the time the reader is created. The only correct - // behavior is to return a reader that immediately returns EOF. - default: - // Any other error we want propagated up the stack. - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot read a directory") - } - - // Fill in file information - rd.size = fi.Size() - rd.modtime = fi.ModTime() - } - - return rd, nil + size: size, + }, nil } func (fr *fileReader) Read(p []byte) (n int, err error) { @@ -141,7 +119,7 @@ func (fr *fileReader) reader() (io.Reader, error) { } // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.path, fr.offset) + rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: @@ -158,11 +136,6 @@ func (fr *fileReader) reader() (io.Reader, error) { fr.rc = rc if fr.brd == nil { - // TODO(stevvooe): Set an optimal buffer size here. We'll have to - // understand the latency characteristics of the underlying network to - // set this correctly, so we may want to leave it to the driver. For - // out of process drivers, we'll have to optimize this buffer size for - // local communication. fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) } else { fr.brd.Reset(fr.rc) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go index 8a07760374f5..774a864b7380 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader_test.go @@ -8,12 +8,13 @@ import ( "os" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver/inmemory" ) func TestSimpleRead(t *testing.T) { + ctx := context.Background() content := make([]byte, 1<<20) n, err := rand.Read(content) if err != nil { @@ -21,7 +22,7 @@ func TestSimpleRead(t *testing.T) { } if n != len(content) { - t.Fatalf("random read did't fill buffer") + t.Fatalf("random read didn't fill buffer") } dgst, err := digest.FromReader(bytes.NewReader(content)) @@ -32,11 +33,11 @@ func TestSimpleRead(t *testing.T) { driver := inmemory.New() path := "/random" - if err := driver.PutContent(path, content); err != nil { + if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } - fr, err := newFileReader(driver, path) + fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("error allocating file reader: %v", err) } @@ -59,12 +60,13 @@ func TestFileReaderSeek(t *testing.T) { repititions := 1024 path := "/patterned" content := bytes.Repeat([]byte(pattern), repititions) + ctx := context.Background() - if err := driver.PutContent(path, content); err != nil { + if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } - fr, err := newFileReader(driver, path) + fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("unexpected error creating file reader: %v", err) @@ -160,7 +162,7 @@ func TestFileReaderSeek(t *testing.T) { // read method, with an io.EOF error. func TestFileReaderNonExistentFile(t *testing.T) { driver := inmemory.New() - fr, err := newFileReader(driver, "/doesnotexist") + fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) if err != nil { t.Fatalf("unexpected error initializing reader: %v", err) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go index 5f22142e1892..529fa6736535 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go @@ -7,6 +7,7 @@ import ( "io" "os" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -18,6 +19,8 @@ const ( type fileWriter struct { driver storagedriver.StorageDriver + ctx context.Context + // identifying fields path string @@ -36,7 +39,6 @@ type bufferedFileWriter struct { // filewriter should implement. type fileWriterInterface interface { io.WriteSeeker - io.WriterAt io.ReaderFrom io.Closer } @@ -45,13 +47,14 @@ var _ fileWriterInterface = &fileWriter{} // newFileWriter returns a prepared fileWriter for the driver and path. This // could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { +func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { fw := fileWriter{ driver: driver, path: path, + ctx: ctx, } - if fi, err := driver.Stat(path); err != nil { + if fi, err := driver.Stat(ctx, path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // ignore, offset is zero @@ -106,21 +109,31 @@ func (bfw *bufferedFileWriter) Flush() error { // Write writes the buffer p at the current write offset. func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.readFromAt(bytes.NewReader(p), -1) - return int(nn), err -} - -// WriteAt writes p at the specified offset. The underlying offset does not -// change. -func (fw *fileWriter) WriteAt(p []byte, offset int64) (n int, err error) { - nn, err := fw.readFromAt(bytes.NewReader(p), offset) + nn, err := fw.ReadFrom(bytes.NewReader(p)) return int(nn), err } // ReadFrom reads reader r until io.EOF writing the contents at the current // offset. func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - return fw.readFromAt(r, -1) + if fw.err != nil { + return 0, fw.err + } + + nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) + + // We should forward the offset, whether or not there was an error. + // Basically, we keep the filewriter in sync with the reader's head. If an + // error is encountered, the whole thing should be retried but we proceed + // from an expected offset, even if the data didn't make it to the + // backend. + fw.offset += nn + + if fw.offset > fw.size { + fw.size = fw.offset + } + + return nn, err } // Seek moves the write position do the requested offest based on the whence @@ -165,34 +178,3 @@ func (fw *fileWriter) Close() error { return nil } - -// readFromAt writes to fw from r at the specified offset. If offset is less -// than zero, the value of fw.offset is used and updated after the operation. -func (fw *fileWriter) readFromAt(r io.Reader, offset int64) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - var updateOffset bool - if offset < 0 { - offset = fw.offset - updateOffset = true - } - - nn, err := fw.driver.WriteStream(fw.path, offset, r) - - if updateOffset { - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - } - - return nn, err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go index a8ea6241a5de..858b03272689 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go @@ -7,6 +7,7 @@ import ( "os" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -32,8 +33,9 @@ func TestSimpleWrite(t *testing.T) { driver := inmemory.New() path := "/random" + ctx := context.Background() - fw, err := newFileWriter(driver, path) + fw, err := newFileWriter(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating fileWriter: %v", err) } @@ -49,7 +51,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unexpected write length: %d != %d", n, len(content)) } - fr, err := newFileReader(driver, path) + fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -76,23 +78,23 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("write did not advance offset: %d != %d", end, len(content)) } - // Double the content, but use the WriteAt method + // Double the content doubled := append(content, content...) doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) if err != nil { t.Fatalf("unexpected error digesting doubled content: %v", err) } - n, err = fw.WriteAt(content, end) + nn, err := fw.ReadFrom(bytes.NewReader(content)) if err != nil { - t.Fatalf("unexpected error writing content at %d: %v", end, err) + t.Fatalf("unexpected error doubling content: %v", err) } - if n != len(content) { + if nn != int64(len(content)) { t.Fatalf("writeat was short: %d != %d", n, len(content)) } - fr, err = newFileReader(driver, path) + fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -109,32 +111,32 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unable to verify write data") } - // Check that WriteAt didn't update the offset. + // Check that Write updated the offset. end, err = fw.Seek(0, os.SEEK_END) if err != nil { t.Fatalf("unexpected error seeking: %v", err) } - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) + if end != int64(len(doubled)) { + t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) } // Now, we copy from one path to another, running the data through the // fileReader to fileWriter, rather than the driver.Move command to ensure // everything is working correctly. - fr, err = newFileReader(driver, path) + fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } defer fr.Close() - fw, err = newFileWriter(driver, "/copied") + fw, err = newFileWriter(ctx, driver, "/copied") if err != nil { t.Fatalf("unexpected error creating fileWriter: %v", err) } defer fw.Close() - nn, err := io.Copy(fw, fr) + nn, err = io.Copy(fw, fr) if err != nil { t.Fatalf("unexpected error copying data: %v", err) } @@ -143,7 +145,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) } - fr, err = newFileReader(driver, "/copied") + fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -162,7 +164,8 @@ func TestSimpleWrite(t *testing.T) { } func TestBufferedFileWriter(t *testing.T) { - writer, err := newFileWriter(inmemory.New(), "/random") + ctx := context.Background() + writer, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) @@ -203,8 +206,8 @@ func BenchmarkFileWriter(b *testing.B) { driver: inmemory.New(), path: "/random", } - - if fi, err := fw.driver.Stat(fw.path); err != nil { + ctx := context.Background() + if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // ignore, offset is zero @@ -236,8 +239,9 @@ func BenchmarkFileWriter(b *testing.B) { func BenchmarkBufferedFileWriter(b *testing.B) { b.StopTimer() // not sure how long setup above will take + ctx := context.Background() for i := 0; i < b.N; i++ { - bfw, err := newFileWriter(inmemory.New(), "/random") + bfw, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go deleted file mode 100644 index f25018daa0ce..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layer_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "golang.org/x/net/context" -) - -// TestSimpleLayerUpload covers the layer upload process, exercising common -// error paths that might be seen during an upload. -func TestSimpleLayerUpload(t *testing.T) { - randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - - dgst := digest.Digest(tarSumStr) - - if err != nil { - t.Fatalf("error allocating upload store: %v", err) - } - - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ls := repository.Layers() - - h := sha256.New() - rd := io.TeeReader(randomDataReader, h) - - layerUpload, err := ls.Upload() - - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Cancel the upload then restart it - if err := layerUpload.Cancel(); err != nil { - t.Fatalf("unexpected error during upload cancellation: %v", err) - } - - // Do a resume, get unknown upload - layerUpload, err = ls.Resume(layerUpload.UUID()) - if err != distribution.ErrLayerUploadUnknown { - t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) - } - - // Restart! - layerUpload, err = ls.Upload() - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - - // Get the size of our random tarfile - randomDataSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size of random data: %v", err) - } - - nn, err := io.Copy(layerUpload, rd) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("layer data write incomplete") - } - - offset, err := layerUpload.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("unexpected error seeking layer upload: %v", err) - } - - if offset != nn { - t.Fatalf("layerUpload not updated with correct offset: %v != %v", offset, nn) - } - layerUpload.Close() - - // Do a resume, for good fun - layerUpload, err = ls.Resume(layerUpload.UUID()) - if err != nil { - t.Fatalf("unexpected error resuming upload: %v", err) - } - - sha256Digest := digest.NewDigest("sha256", h) - layer, err := layerUpload.Finish(dgst) - - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - - // After finishing an upload, it should no longer exist. - if _, err := ls.Resume(layerUpload.UUID()); err != distribution.ErrLayerUploadUnknown { - t.Fatalf("expected layer upload to be unknown, got %v", err) - } - - // Test for existence. - exists, err := ls.Exists(layer.Digest()) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v", err) - } - - if !exists { - t.Fatalf("layer should now exist") - } - - h.Reset() - nn, err = io.Copy(h, layer) - if err != nil { - t.Fatalf("error reading layer: %v", err) - } - - if nn != randomDataSize { - t.Fatalf("incorrect read length") - } - - if digest.NewDigest("sha256", h) != sha256Digest { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) - } -} - -// TestSimpleLayerRead just creates a simple layer file and ensures that basic -// open, read, seek, read works. More specific edge cases should be covered in -// other tests. -func TestSimpleLayerRead(t *testing.T) { - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ls := repository.Layers() - - randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random data: %v", err) - } - - dgst := digest.Digest(tarSumStr) - - // Test for existence. - exists, err := ls.Exists(dgst) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v", err) - } - - if exists { - t.Fatalf("layer should not exist") - } - - // Try to get the layer and make sure we get a not found error - layer, err := ls.Fetch(dgst) - if err == nil { - t.Fatalf("error expected fetching unknown layer") - } - - switch err.(type) { - case distribution.ErrUnknownLayer: - err = nil - default: - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } - - randomLayerDigest, err := writeTestLayer(driver, defaultPathMapper, imageName, dgst, randomLayerReader) - if err != nil { - t.Fatalf("unexpected error writing test layer: %v", err) - } - - randomLayerSize, err := seekerSize(randomLayerReader) - if err != nil { - t.Fatalf("error getting seeker size for random layer: %v", err) - } - - layer, err = ls.Fetch(dgst) - if err != nil { - t.Fatal(err) - } - defer layer.Close() - - // Now check the sha digest and ensure its the same - h := sha256.New() - nn, err := io.Copy(h, layer) - if err != nil && err != io.EOF { - t.Fatalf("unexpected error copying to hash: %v", err) - } - - if nn != randomLayerSize { - t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize) - } - - sha256Digest := digest.NewDigest("sha256", h) - if sha256Digest != randomLayerDigest { - t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest) - } - - // Now seek back the layer, read the whole thing and check against randomLayerData - offset, err := layer.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error seeking layer: %v", err) - } - - if offset != 0 { - t.Fatalf("seek failed: expected 0 offset, got %d", offset) - } - - p, err := ioutil.ReadAll(layer) - if err != nil { - t.Fatalf("error reading all of layer: %v", err) - } - - if len(p) != int(randomLayerSize) { - t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize) - } - - // Reset the randomLayerReader and read back the buffer - _, err = randomLayerReader.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatalf("error resetting layer reader: %v", err) - } - - randomLayerData, err := ioutil.ReadAll(randomLayerReader) - if err != nil { - t.Fatalf("random layer read failed: %v", err) - } - - if !bytes.Equal(p, randomLayerData) { - t.Fatalf("layer data not equal") - } -} - -// TestLayerUploadZeroLength uploads zero-length -func TestLayerUploadZeroLength(t *testing.T) { - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) - repository, err := registry.Repository(ctx, imageName) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - ls := repository.Layers() - - upload, err := ls.Upload() - if err != nil { - t.Fatalf("unexpected error starting upload: %v", err) - } - - io.Copy(upload, bytes.NewReader([]byte{})) - - dgst, err := digest.FromReader(bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("error getting zero digest: %v", err) - } - - if dgst != digest.DigestSha256EmptyTar { - // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) - } - - layer, err := upload.Finish(dgst) - if err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - - if layer.Digest() != dgst { - t.Fatalf("unexpected digest: %v != %v", layer.Digest(), dgst) - } -} - -// writeRandomLayer creates a random layer under name and tarSum using driver -// and pathMapper. An io.ReadSeeker with the data is returned, along with the -// sha256 hex digest. -func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) { - reader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - return nil, "", "", err - } - - tarSum = digest.Digest(tarSumStr) - - // Now, actually create the layer. - randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader)) - - if _, err := reader.Seek(0, os.SEEK_SET); err != nil { - return nil, "", "", err - } - - return reader, tarSum, randomLayerDigest, err -} - -// seekerSize seeks to the end of seeker, checks the size and returns it to -// the original state, returning the size. The state of the seeker should be -// treated as unknown if an error is returned. -func seekerSize(seeker io.ReadSeeker) (int64, error) { - current, err := seeker.Seek(0, os.SEEK_CUR) - if err != nil { - return 0, err - } - - end, err := seeker.Seek(0, os.SEEK_END) - if err != nil { - return 0, err - } - - resumed, err := seeker.Seek(current, os.SEEK_SET) - if err != nil { - return 0, err - } - - if resumed != current { - return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") - } - - return end, nil -} - -// createTestLayer creates a simple test layer in the provided driver under -// tarsum dgst, returning the sha256 digest location. This is implemented -// piecemeal and should probably be replaced by the uploader when it's ready. -func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { - h := sha256.New() - rd := io.TeeReader(content, h) - - p, err := ioutil.ReadAll(rd) - - if err != nil { - return "", nil - } - - blobDigestSHA := digest.NewDigest("sha256", h) - - blobPath, err := pathMapper.path(blobDataPathSpec{ - digest: dgst, - }) - - if err := driver.PutContent(blobPath, p); err != nil { - return "", err - } - - if err != nil { - return "", err - } - - layerLinkPath, err := pathMapper.path(layerLinkPathSpec{ - name: name, - digest: dgst, - }) - - if err != nil { - return "", err - } - - if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil { - return "", nil - } - - return blobDigestSHA, err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go deleted file mode 100644 index 3d7949f4c1ca..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layercache.go +++ /dev/null @@ -1,210 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - "time" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// cachedLayerService implements the layer service with path-aware caching, -// using a LayerInfoCache interface. -type cachedLayerService struct { - distribution.LayerService // upstream layer service - repository distribution.Repository - ctx context.Context - driver driver.StorageDriver - *blobStore // global blob store - cache cache.LayerInfoCache -} - -// Exists checks for existence of the digest in the cache, immediately -// returning if it exists for the repository. If not, the upstream is checked. -// When a positive result is found, it is written into the cache. -func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst) - now := time.Now() - defer func() { - // TODO(stevvooe): Replace this with a decent context-based metrics solution - ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)). - Infof("(*cachedLayerService).Exists(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1) - return true, nil - } - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1) - exists, err := lc.LayerService.Exists(dgst) - if err != nil { - return exists, err - } - - if exists { - // we can only cache this if the existence is positive. - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return exists, err -} - -// Fetch checks for the availability of the layer in the repository via the -// cache. If present, the metadata is resolved and the layer is returned. If -// any operation fails, the layer is read directly from the upstream. The -// results are cached, if possible. -func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst) - now := time.Now() - defer func() { - ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)). - Infof("(*layerInfoCache).Fetch(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - // fast path: get the layer info and return - meta, err := lc.cache.Meta(lc.ctx, dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1) - return newLayerReader(lc.driver, dgst, meta.Path, meta.Length) - } - - // NOTE(stevvooe): Unfortunately, the cache here only makes checks for - // existing layers faster. We'd have to provide more careful - // synchronization with the backend to make the missing case as fast. - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1) - layer, err := lc.LayerService.Fetch(dgst) - if err != nil { - return nil, err - } - - // add the layer to the repository - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err) - } - - // lookup layer path and add it to the cache, if it succeds. Note that we - // still return the layer even if we have trouble caching it. - if path, err := lc.resolveLayerPath(layer); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err) - } else { - // add the layer to the cache once we've resolved the path. - if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return layer, err -} - -func (lc *cachedLayerService) Delete(dgst digest.Digest) error { - ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Delete(%q)", dgst) - if err := lc.cache.Delete(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error deleting layer link from cache; repo=%s, layer=%s: %v", lc.repository.Name(), dgst, err) - } - return lc.LayerService.Delete(dgst) -} - -// extractLayerInfo pulls the layerInfo from the layer, attempting to get the -// path information from either the concrete object or by resolving the -// primary blob store path. -func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) { - // try and resolve the type and driver, so we don't have to traverse links - switch v := layer.(type) { - case *layerReader: - // only set path if we have same driver instance. - if v.driver == lc.driver { - return v.path, nil - } - } - - ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest()) - // we have to do an expensive stat to resolve the layer location but no - // need to check the link, since we already have layer instance for this - // repository. - bp, err := lc.blobStore.path(layer.Digest()) - if err != nil { - return "", err - } - - return bp, nil -} - -// layerInfoCacheMetrics keeps track of cache metrics for layer info cache -// requests. Note this is kept globally and made available via expvar. For -// more detailed metrics, its recommend to instrument a particular cache -// implementation. -var layerInfoCacheMetrics struct { - // Exists tracks calls to the Exists caches. - Exists struct { - Requests uint64 - Hits uint64 - Misses uint64 - } - - // Fetch tracks calls to the fetch caches. - Fetch struct { - Requests uint64 - Hits uint64 - Misses uint64 - } -} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return layerInfoCacheMetrics - })) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go deleted file mode 100644 index 40deba6a70e6..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerreader.go +++ /dev/null @@ -1,79 +0,0 @@ -package storage - -import ( - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// layerReader implements Layer and provides facilities for reading and -// seeking. -type layerReader struct { - fileReader - - digest digest.Digest -} - -// newLayerReader returns a new layerReader with the digest, path and length, -// eliding round trips to the storage backend. -func newLayerReader(driver driver.StorageDriver, dgst digest.Digest, path string, length int64) (*layerReader, error) { - fr := &fileReader{ - driver: driver, - path: path, - size: length, - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -var _ distribution.Layer = &layerReader{} - -func (lr *layerReader) Digest() digest.Digest { - return lr.digest -} - -func (lr *layerReader) Length() int64 { - return lr.size -} - -func (lr *layerReader) CreatedAt() time.Time { - return lr.modtime -} - -// Close the layer. Should be called when the resource is no longer needed. -func (lr *layerReader) Close() error { - return lr.closeWithErr(distribution.ErrLayerClosed) -} - -func (lr *layerReader) Handler(r *http.Request) (h http.Handler, err error) { - var handlerFunc http.HandlerFunc - - redirectURL, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{"method": r.Method}) - - switch err { - case nil: - handlerFunc = func(w http.ResponseWriter, r *http.Request) { - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - } - case driver.ErrUnsupportedMethod: - handlerFunc = func(w http.ResponseWriter, r *http.Request) { - // Fallback to serving the content directly. - http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) - } - default: - // Some unexpected error. - return nil, err - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Docker-Content-Digest", lr.digest.String()) - handlerFunc.ServeHTTP(w, r) - }), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go deleted file mode 100644 index 6852244613b5..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerstore.go +++ /dev/null @@ -1,190 +0,0 @@ -package storage - -import ( - "strings" - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -type layerStore struct { - repository *repository -} - -func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") - - // Because this implementation just follows blob links, an existence check - // is pretty cheap by starting and closing a fetch. - _, err := ls.Fetch(digest) - - if err != nil { - switch err.(type) { - case distribution.ErrUnknownLayer: - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch") - bp, err := ls.path(dgst) - if err != nil { - return nil, err - } - - fr, err := newFileReader(ls.repository.driver, bp) - if err != nil { - return nil, err - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -func (ls *layerStore) Delete(dgst digest.Digest) error { - lp, err := ls.linkPath(dgst) - if err != nil { - return err - } - - lp = strings.TrimSuffix(lp, "/link") - - return ls.repository.driver.Delete(lp) -} - -// Upload begins a layer upload, returning a handle. If the layer upload -// is already in progress or the layer has already been uploaded, this -// will return an error. -func (ls *layerStore) Upload() (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload") - - // NOTE(stevvooe): Consider the issues with allowing concurrent upload of - // the same two layers. Should it be disallowed? For now, we allow both - // parties to proceed and the the first one uploads the layer. - - uuid := uuid.New() - startedAt := time.Now().UTC() - - path, err := ls.repository.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// Resume continues an in progress layer upload, returning the current -// state of the upload. -func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") - startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return nil, distribution.ErrLayerUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := ls.repository.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// newLayerUpload allocates a new upload controller with the given state. -func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) { - fw, err := newFileWriter(ls.repository.driver, path) - if err != nil { - return nil, err - } - - lw := &layerWriter{ - layerStore: ls, - uuid: uuid, - startedAt: startedAt, - bufferedFileWriter: *fw, - } - - lw.setupResumableDigester() - - return lw, nil -} - -func (ls *layerStore) linkPath(dgst digest.Digest) (string, error) { - return ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) -} - -func (ls *layerStore) path(dgst digest.Digest) (string, error) { - // We must traverse this path through the link to enforce ownership. - layerLinkPath, err := ls.linkPath(dgst) - if err != nil { - return "", err - } - - blobPath, err := ls.repository.blobStore.resolve(layerLinkPath) - - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return "", distribution.ErrUnknownLayer{ - FSLayer: manifest.FSLayer{BlobSum: dgst}, - } - default: - return "", err - } - } - - return blobPath, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go deleted file mode 100644 index adf68ca93c02..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter.go +++ /dev/null @@ -1,477 +0,0 @@ -package storage - -import ( - "fmt" - "io" - "os" - "path" - "strconv" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var _ distribution.LayerUpload = &layerWriter{} - -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. -type layerWriter struct { - layerStore *layerStore - - uuid string - startedAt time.Time - resumableDigester digest.ResumableDigester - - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - bufferedFileWriter -} - -var _ distribution.LayerUpload = &layerWriter{} - -// UUID returns the identifier for this upload. -func (lw *layerWriter) UUID() string { - return lw.uuid -} - -func (lw *layerWriter) StartedAt() time.Time { - return lw.startedAt -} - -// Finish marks the upload as completed, returning a valid handle to the -// uploaded layer. The final size and checksum are validated against the -// contents of the uploaded layer. The checksum should be provided in the -// format :. -func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") - - if err := lw.bufferedFileWriter.Close(); err != nil { - return nil, err - } - - var ( - canonical digest.Digest - err error - ) - - // HACK(stevvooe): To deal with s3's lack of consistency, attempt to retry - // validation on failure. Three attempts are made, backing off - // retries*100ms each time. - for retries := 0; ; retries++ { - canonical, err = lw.validateLayer(dgst) - if err == nil { - break - } - - ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries). - Errorf("error validating layer: %v", err) - - if retries < 3 { - time.Sleep(100 * time.Millisecond * time.Duration(retries+1)) - continue - } - - return nil, err - - } - - if err := lw.moveLayer(canonical); err != nil { - // TODO(stevvooe): Cleanup? - return nil, err - } - - // Link the layer blob into the repository. - if err := lw.linkLayer(canonical, dgst); err != nil { - return nil, err - } - - if err := lw.removeResources(); err != nil { - return nil, err - } - - return lw.layerStore.Fetch(canonical) -} - -// Cancel the layer upload process. -func (lw *layerWriter) Cancel() error { - ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Cancel") - if err := lw.removeResources(); err != nil { - return err - } - - lw.Close() - return nil -} - -func (lw *layerWriter) Write(p []byte) (int, error) { - if lw.resumableDigester == nil { - return lw.bufferedFileWriter.Write(p) - } - - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := lw.resumeHashAt(lw.offset); err != nil { - return 0, err - } - - return io.MultiWriter(&lw.bufferedFileWriter, lw.resumableDigester).Write(p) -} - -func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { - if lw.resumableDigester == nil { - return lw.bufferedFileWriter.ReadFrom(r) - } - - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := lw.resumeHashAt(lw.offset); err != nil { - return 0, err - } - - return lw.bufferedFileWriter.ReadFrom(io.TeeReader(r, lw.resumableDigester)) -} - -func (lw *layerWriter) Close() error { - if lw.err != nil { - return lw.err - } - - if lw.resumableDigester != nil { - if err := lw.storeHashState(); err != nil { - return err - } - } - - return lw.bufferedFileWriter.Close() -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - alg: lw.resumableDigester.Digest().Algorithm(), - list: true, - }) - if err != nil { - return nil, err - } - - paths, err := lw.driver.List(uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -// resumeHashAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (lw *layerWriter) resumeHashAt(offset int64) error { - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - - if offset == int64(lw.resumableDigester.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := lw.getStoredHashStates() - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset less than or equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := lw.driver.Delete(hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - lw.resumableDigester.Reset() - } else { - storedState, err := lw.driver.GetContent(hashStateMatch.path) - if err != nil { - return err - } - - if err = lw.resumableDigester.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(lw.resumableDigester.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired - // offset. - fr, err := newFileReader(lw.driver, lw.path) - if err != nil { - return err - } - - if _, err = fr.Seek(int64(lw.resumableDigester.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", lw.resumableDigester.Len(), err) - } - - if _, err := io.CopyN(lw.resumableDigester, fr, gapLen); err != nil { - return err - } - } - - return nil -} - -func (lw *layerWriter) storeHashState() error { - uploadHashStatePath, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - alg: lw.resumableDigester.Digest().Algorithm(), - offset: int64(lw.resumableDigester.Len()), - }) - if err != nil { - return err - } - - hashState, err := lw.resumableDigester.State() - if err != nil { - return err - } - - return lw.driver.PutContent(uploadHashStatePath, hashState) -} - -// validateLayer checks the layer data against the digest, returning an error -// if it does not match. The canonical digest is returned. -func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if lw.resumableDigester != nil { - // Restore the hasher state to the end of the upload. - if err := lw.resumeHashAt(lw.size); err != nil { - return "", err - } - - canonical = lw.resumableDigester.Digest() - - if canonical.Algorithm() == dgst.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = dgst == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } - - if fullHash { - digester := digest.NewCanonicalDigester() - - digestVerifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return "", err - } - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) - if err != nil { - return "", err - } - - tr := io.TeeReader(fr, digester) - - if _, err = io.Copy(digestVerifier, tr); err != nil { - return "", err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() - } - - if !verified { - ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst). - Errorf("canonical digest does match provided digest") - return "", distribution.ErrLayerInvalidDigest{ - Digest: dgst, - Reason: fmt.Errorf("content does not match digest"), - } - } - - return canonical, nil -} - -// moveLayer moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (lw *layerWriter) moveLayer(dgst digest.Digest) error { - blobPath, err := lw.layerStore.repository.pm.path(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return err - } - - // Check for existence - if _, err := lw.driver.Stat(blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // tars. - if _, err := lw.driver.Stat(lw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the zero tarsum. - if dgst == digest.DigestSha256EmptyTar { - return lw.driver.PutContent(blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.uuid", lw.UUID()). - WithField("digest", dgst).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - return lw.driver.Move(lw.path, blobPath) -} - -// linkLayer links a valid, written layer blob into the registry under the -// named repository for the upload controller. -func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical}, aliases...) - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - layerLinkPath, err := lw.layerStore.repository.pm.path(layerLinkPathSpec{ - name: lw.layerStore.repository.Name(), - digest: dgst, - }) - - if err != nil { - return err - } - - if err := lw.layerStore.repository.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { - return err - } - } - - return nil -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (lw *layerWriter) removeResources() error { - dataPath, err := lw.layerStore.repository.pm.path(uploadDataPathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - - if err := lw.driver.Delete(dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - logrus.Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go deleted file mode 100644 index d4350c6b843a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_nonresumable.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build noresumabledigest - -package storage - -func (lw *layerWriter) setupResumableDigester() { -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go deleted file mode 100644 index 7d8c63354cf2..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/layerwriter_resumable.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !noresumabledigest - -package storage - -import "github.com/docker/distribution/digest" - -func (lw *layerWriter) setupResumableDigester() { - lw.resumableDigester = digest.NewCanonicalResumableDigester() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go new file mode 100644 index 000000000000..f19941de6ac9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -0,0 +1,503 @@ +package storage + +import ( + "fmt" + "io" + "net/http" + "path" + "sort" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +type byDigestString []digest.Digest + +func (bd byDigestString) Len() int { return len(bd) } +func (bd byDigestString) Less(i, j int) bool { return bd[i].String() < bd[j].String() } +func (bd byDigestString) Swap(i, j int) { bd[i], bd[j] = bd[j], bd[i] } + +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(pm *pathMapper, name string, dgst digest.Digest) (string, error) + +// blobsRootPathFunc describes a function that can resolve a root directory of +// blob links based on the repository name. +type blobsRootPathFunc func(pm *pathMapper, name string) (string, error) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc + + // blobsRootPathFns functions the same way for blob root directories as + // linkPathFns for blob links. + blobsRootPathFns []blobsRootPathFunc +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst, err := digest.FromBytes(p) + if err != nil { + return distribution.Descriptor{}, err + } + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Create") + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := lbs.pm.path(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Delete") + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + return lbs.blobAccessController.Clear(ctx, dgst) +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { + fw, err := newFileWriter(ctx, lbs.driver, path) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + bufferedFileWriter: *fw, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + // only use the first link + linkPathFn := lbs.linkPathFns[0] + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, digests []digest.Digest, last string) (n int, err error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Enumerate") + bufs := make([][]digest.Digest, len(lbs.blobsRootPathFns)) + + for i := range lbs.blobsRootPathFns { + found, err := lbs.enumerateBlobsRootPathFn(ctx, i, last) + if err != nil { + return 0, err + } + bufs[i] = found + n += len(found) + } + + digestsFound := make([]digest.Digest, n) + copied := 0 + for _, buf := range bufs { + copied += copy(digestsFound[copied:], buf) + } + digestsFound = digestsFound[:copied] + sort.Sort(byDigestString(digestsFound)) + n = copy(digests, digestsFound) + + if n < len(digests) { + err = io.EOF + } + + return +} + +// enumerateBlobsRootPathFn returns an array of blobs found at particular blobs +// root returned by path function identified by index. Blobs are walked in lexicographical +// order. All the blobs found before the 'last' offset will be skipped. +func (lbs *linkedBlobStore) enumerateBlobsRootPathFn(ctx context.Context, index int, last string) (results []digest.Digest, err error) { + var lastPathComponents []string + + if last != "" { + lastPathComponents, err = digestPathComponents(digest.Digest(last), false) + if err != nil { + return nil, fmt.Errorf("invalid last digest %q", last) + } + } + + if index < 0 || index >= len(lbs.blobsRootPathFns) { + return nil, fmt.Errorf("No blobs root path function at index %d", index) + } + + rootPath, err := lbs.blobsRootPathFns[index](lbs.pm, lbs.repository.Name()) + if err != nil { + return nil, err + } + + pastLastOffset := func(pthSepCount int, pth string, lastPathComponents []string) bool { + maxComponents := pthSepCount + 1 + if maxComponents >= len(lastPathComponents) { + return pth > path.Join(lastPathComponents...) + } + return pth >= path.Join(lastPathComponents[:maxComponents]...) + } + + err = WalkSorted(ctx, lbs.driver, rootPath, func(fi driver.FileInfo) error { + if !fi.IsDir() { + // ignore files + return nil + } + + // trim / prefix + pth := strings.TrimPrefix(strings.TrimPrefix(fi.Path(), rootPath), "/") + sepCount := strings.Count(pth, "/") + + if !pastLastOffset(sepCount, pth, lastPathComponents) { + // we haven't reached the 'last' offset yet + return ErrSkipDir + } + + if sepCount == 0 { + // continue walking + return nil + } + + alg := "" + tarsumParts := reTarsumPrefix.FindStringSubmatch(pth) + isTarsum := len(tarsumParts) > 0 + if sepCount > 3 || (!isTarsum && sepCount > 1) { + // too many path components + return ErrSkipDir + } + + if len(tarsumParts) > 0 { + alg = tarsumPrefix + "." + tarsumParts[1] + "+" + // trim "tarsum//" prefix from path + pth = strings.TrimPrefix(pth[len(tarsumParts[0]):], "/") + } + + digestParts := reDigestPath.FindStringSubmatch(pth) + if len(digestParts) > 0 { + alg += digestParts[1] + dgstHex := digestParts[2] + dgst := digest.NewDigestFromHex(alg, dgstHex) + // append only valid digests + if err := dgst.Validate(); err == nil { + results = append(results, dgst) + } else { + context.GetLogger(ctx).Warnf("skipping invalid digest %q: %v", dgst.String(), err) + } + return ErrSkipDir + } + + return nil + }) + + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + // ignore + err = nil + default: + if err == ErrStopWalking { + err = nil + } + return nil, err + } + } + + return results, nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc + + // Causes directory containing blob's data to be removed recursively upon + // Clear. + removeParentsOnDelete bool +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var ( + resolveErr error + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + resolveErr = nil + break // success! + } + + switch err := err.(type) { + case driver.PathNotFoundError: + resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error + default: + return distribution.Descriptor{}, err + } + } + + if resolveErr != nil { + return distribution.Descriptor{}, resolveErr + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + resolveErr := distribution.ErrBlobUnknown + + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + pth := blobLinkPath + if lbs.removeParentsOnDelete { + pth = path.Dir(blobLinkPath) + } + err = lbs.blobStore.driver.Delete(ctx, pth) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } + resolveErr = nil + } + + return resolveErr +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +} + +// blobsRootPath provides the path to the root of blob links, also known as +// layers. +func blobsRootPath(pm *pathMapper, name string) (string, error) { + return pm.path(layersPathSpec{name: name}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +} + +// manifestRevisionsPath provides the path to the manifest revisions directory. +func manifestRevisionsPath(pm *pathMapper, name string) (string, error) { + return pm.path(manifestRevisionsPathSpec{name: name}) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go index 0a554ad3ad47..7c987e792684 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go @@ -4,89 +4,140 @@ import ( "fmt" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" - "golang.org/x/net/context" ) type manifestStore struct { - repository *repository - - revisionStore *revisionStore - tagStore *tagStore + repository *repository + revisionStore *revisionStore + tagStore *tagStore + ctx context.Context + skipDependencyVerification bool + enumerateAllDigests bool } var _ distribution.ManifestService = &manifestStore{} -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Exists") - return ms.revisionStore.exists(dgst) +func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.revisionStore.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil } -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(dgst) +func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + return ms.revisionStore.get(ms.ctx, dgst) } -func (ms *manifestStore) Put(ctx context.Context, manifest *manifest.SignedManifest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Put") +// SkipLayerVerification allows a manifest to be Put before it's +// layers are on the filesystem +func SkipLayerVerification(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifeststore") +} - // TODO(stevvooe): Add check here to see if the revision is already - // present in the repository. If it is, we should merge the signatures, do - // a shallow verify (or a full one, doesn't matter) and return an error - // indicating what happened. +// EnumerateAllDigests causes Enumerate method to include all the digests found +// without checking whether they exist and belong to manifest revisions or not. +func EnumerateAllDigests(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifestStore); ok { + ms.enumerateAllDigests = true + return nil + } + return fmt.Errorf("enumerate all digests only valid for manifeststore") +} - // Verify the manifest. - if err := ms.verifyManifest(manifest); err != nil { +func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + if err := ms.verifyManifest(ms.ctx, manifest); err != nil { return err } // Store the revision of the manifest - revision, err := ms.revisionStore.put(manifest) + revision, err := ms.revisionStore.put(ms.ctx, manifest) if err != nil { return err } // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision) + return ms.tagStore.tag(manifest.Tag, revision.Digest) } // Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete") - return ms.revisionStore.delete(dgst) +func (ms *manifestStore) Delete(dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.revisionStore.delete(ms.ctx, dgst) } -func (ms *manifestStore) Tags(ctx context.Context) ([]string, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") +func (ms *manifestStore) Tags() ([]string, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Tags") return ms.tagStore.tags() } -func (ms *manifestStore) ExistsByTag(ctx context.Context, tag string) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).ExistsByTag") +func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).ExistsByTag") return ms.tagStore.exists(tag) } -func (ms *manifestStore) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).GetByTag") +func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") dgst, err := ms.tagStore.resolve(tag) if err != nil { return nil, err } - return ms.revisionStore.get(dgst) + return ms.revisionStore.get(ms.ctx, dgst) +} + +// Enumerate retuns an array of digests of all manifest revisions in repository. +// Returned digests may not be resolvable to actual data. +func (ms *manifestStore) Enumerate() ([]digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Enumerate") + dgsts, err := ms.revisionStore.enumerate() + if err != nil { + return nil, err + } + if ms.enumerateAllDigests { + return dgsts, err + } + res := make([]digest.Digest, 0, len(dgsts)) + for _, dgst := range dgsts { + if _, err := ms.Get(dgst); err == nil { + res = append(res, dgst) + } + } + return res, nil } // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid // content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { +func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *manifest.SignedManifest) error { var errs distribution.ErrManifestVerification if mnfst.Name != ms.repository.Name() { - // TODO(stevvooe): This needs to be an exported error errs = append(errs, fmt.Errorf("repository name does not match manifest name")) } @@ -103,19 +154,20 @@ func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { } } - for _, fsLayer := range mnfst.FSLayers { - exists, err := ms.repository.Layers().Exists(fsLayer.BlobSum) - if err != nil { - errs = append(errs, err) - } + if !ms.skipDependencyVerification { + for _, fsLayer := range mnfst.FSLayers { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } - if !exists { - errs = append(errs, distribution.ErrUnknownLayer{FSLayer: fsLayer}) + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) + } } } - if len(errs) != 0 { - // TODO(stevvooe): These need to be recoverable by a caller. return errs } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go index 1026c8aee050..2e5cea4f7b11 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -6,16 +6,15 @@ import ( "reflect" "testing" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "golang.org/x/net/context" ) type manifestStoreTestEnv struct { @@ -30,7 +29,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false, false) repo, err := registry.Repository(ctx, name) if err != nil { @@ -49,9 +48,13 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE func TestManifestStorage(t *testing.T) { env := newManifestStoreTestEnv(t, "foo/bar", "thetag") - ms := env.repository.Manifests() + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } - exists, err := ms.ExistsByTag(env.ctx, env.tag) + exists, err := ms.ExistsByTag(env.tag) if err != nil { t.Fatalf("unexpected error checking manifest existence: %v", err) } @@ -60,7 +63,14 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest should not exist") } - if _, err := ms.GetByTag(env.ctx, env.tag); true { + dgsts, err := ms.Enumerate() + if err != nil { + t.Errorf("unexpected error enumerating manifest revisions: %v", err) + } else if len(dgsts) != 0 { + t.Errorf("expected exactly 0 manifests, not %d", len(dgsts)) + } + + if _, err := ms.GetByTag(env.tag); true { switch err.(type) { case distribution.ErrManifestUnknown: break @@ -98,39 +108,52 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error generating private key: %v", err) } - sm, err := manifest.Sign(&m, pk) - if err != nil { + sm, merr := manifest.Sign(&m, pk) + if merr != nil { t.Fatalf("error signing manifest: %v", err) } - err = ms.Put(env.ctx, sm) + err = ms.Put(sm) if err == nil { - t.Fatalf("expected errors putting manifest") + t.Fatalf("expected errors putting manifest with full verification") } - // TODO(stevvooe): We expect errors describing all of the missing layers. + switch err := err.(type) { + case distribution.ErrManifestVerification: + if len(err) != 2 { + t.Fatalf("expected 2 verification errors: %#v", err) + } + + for _, err := range err { + if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { + t.Fatalf("unexpected error type: %v", err) + } + } + default: + t.Fatalf("unexpected error verifying manifest: %v", err) + } // Now, upload the layers that were missing! for dgst, rs := range testLayers { - upload, err := env.repository.Layers().Upload() + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } - if _, err := io.Copy(upload, rs); err != nil { + if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } - if _, err := upload.Finish(dgst); err != nil { + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } - if err = ms.Put(env.ctx, sm); err != nil { + if err = ms.Put(sm); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - exists, err = ms.ExistsByTag(env.ctx, env.tag) + exists, err = ms.ExistsByTag(env.tag) if err != nil { t.Fatalf("unexpected error checking manifest existence: %v", err) } @@ -139,7 +162,8 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest should exist") } - fetchedManifest, err := ms.GetByTag(env.ctx, env.tag) + fetchedManifest, err := ms.GetByTag(env.tag) + if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } @@ -165,7 +189,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("error getting manifest digest: %v", err) } - exists, err = ms.Exists(env.ctx, dgst) + exists, err = ms.Exists(dgst) if err != nil { t.Fatalf("error checking manifest existence by digest: %v", err) } @@ -174,7 +198,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest %s should exist", dgst) } - fetchedByDigest, err := ms.Get(env.ctx, dgst) + fetchedByDigest, err := ms.Get(dgst) if err != nil { t.Fatalf("unexpected error fetching manifest by digest: %v", err) } @@ -192,8 +216,45 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) } + // Enumerate only valid manifest revision digests + dgsts, err = ms.Enumerate() + if err != nil { + t.Errorf("unexpected error enumerating manifest revisions: %v", err) + } else if len(dgsts) != 1 { + t.Errorf("expected exactly 1 manifest, not %d", len(dgsts)) + } else if dgsts[0] != dgst { + t.Errorf("got unexpected digest manifest (%s != %s)", dgsts[0], dgst) + } + + // Enumerate all digests + if err := EnumerateAllDigests(ms); err != nil { + t.Fatalf("failed to configure enumeration of all digests: %v", err) + } + dgsts, err = ms.Enumerate() + if err != nil { + t.Errorf("unexpected error enumerating manifest revisions: %v", err) + } else { + // _layers contain 2 links per one tarsum blob + expCount := 1 + len(testLayers)*2 + if len(dgsts) != expCount { + t.Errorf("unexpected number of returned digests (%d != %d)", len(dgsts), expCount) + } + received := make(map[digest.Digest]struct{}) + for _, dgst := range dgsts { + received[dgst] = struct{}{} + } + if _, exists := received[dgst]; !exists { + t.Errorf("expected manifest revision %s to be returned", dgst.String()) + } + for dgst := range testLayers { + if _, exists := received[dgst]; !exists { + t.Errorf("expected layer blob %s to be returned", dgst.String()) + } + } + } + // Grabs the tags and check that this tagged manifest is present - tags, err := ms.Tags(env.ctx) + tags, err := ms.Tags() if err != nil { t.Fatalf("unexpected error fetching tags: %v", err) } @@ -231,11 +292,11 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) } - if err = ms.Put(env.ctx, sm2); err != nil { + if err = ms.Put(sm2); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - fetched, err := ms.GetByTag(env.ctx, env.tag) + fetched, err := ms.GetByTag(env.tag) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } @@ -280,11 +341,102 @@ func TestManifestStorage(t *testing.T) { } } - // TODO(stevvooe): Currently, deletes are not supported due to some - // complexity around managing tag indexes. We'll add this support back in - // when the manifest format has settled. For now, we expect an error for - // all deletes. - if err := ms.Delete(env.ctx, dgst); err == nil { + // Test deleting manifests + err = ms.Delete(dgst) + if err != nil { t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if exists { + t.Errorf("Deleted manifest should not exist") + } + + deletedManifest, err := ms.Get(dgst) + if err == nil { + t.Errorf("Unexpected success getting deleted manifest") + } + switch err.(type) { + case distribution.ErrManifestUnknownRevision: + break + default: + t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) + } + + if deletedManifest != nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + // Re-upload should restore manifest to a good state + err = ms.Put(sm) + if err != nil { + t.Errorf("Error re-uploading deleted manifest") + } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if !exists { + t.Errorf("Restored manifest should exist") + } + + deletedManifest, err = ms.Get(dgst) + if err != nil { + t.Errorf("Unexpected error getting manifest") + } + if deletedManifest == nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false, false) + repo, err := r.Repository(ctx, env.name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ms, err = repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + err = ms.Delete(dgst) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestLinkPathFuncs ensures that the link path functions behavior are locked +// down and implemented as expected. +func TestLinkPathFuncs(t *testing.T) { + for _, testcase := range []struct { + repo string + digest digest.Digest + linkPathFn linkPathFunc + expected string + }{ + { + repo: "foo/bar", + digest: "sha256:deadbeaf", + linkPathFn: blobLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf/link", + }, + { + repo: "foo/bar", + digest: "sha256:deadbeaf", + linkPathFn: manifestRevisionLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link", + }, + } { + p, err := testcase.linkPathFn(defaultPathMapper, testcase.repo, testcase.digest) + if err != nil { + t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) + } + + if p != testcase.expected { + t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) + } + } + } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go index fe648f519db3..2ecc72081b1e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go @@ -8,7 +8,21 @@ import ( "github.com/docker/distribution/digest" ) -const storagePathVersion = "v2" +const ( + storagePathVersion = "v2" + storagePathRoot = "/docker/registry/" // all driver paths have a prefix + + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. + + layersDirectory = "_layers" + manifestsDirectory = "_manifests" + uploadsDirectory = "_uploads" + + multilevelHexPrefixLength = 2 +) // pathMapper maps paths based on "object names" and their ids. The "object // names" mapped by pathMapper are internal to the storage system. @@ -30,11 +44,11 @@ const storagePathVersion = "v2" // -> //link // -> _layers/ // -// -> _uploads/ +// -> _uploads/ // data // startedat // hashstates// -// -> blob/ +// -> blobs/ // // // The storage backend layout is broken up into a content- addressable blob @@ -47,7 +61,7 @@ const storagePathVersion = "v2" // is just a directory of layers which are "linked" into a repository. A layer // can only be accessed through a qualified repository name if it is linked in // the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload uuid. When all data for an upload is received, the +// which is key by upload id. When all data for an upload is received, the // data is moved into the blob store and the upload directory is deleted. // Abandoned uploads can be garbage collected by reading the startedat file // and removing uploads that have been active for longer than a certain time. @@ -66,6 +80,7 @@ const storagePathVersion = "v2" // // Manifests: // +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ // manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// // manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link // manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ @@ -80,20 +95,22 @@ const storagePathVersion = "v2" // manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// // manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link // -// Layers: +// Blobs: // -// layerLinkPathSpec: /v2/repositories//_layers/tarsum////link +// layersPathSpec: /v2/repositories//_layers/ +// layerLinkPathSpec: /v2/repositories//_layers///link // // Uploads: // -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// // // Blob Store: // // blobPathSpec: /v2/blobs/// // blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data // // For more information on the semantic meaning of each path and their // contents, please see the path spec documentation. @@ -127,13 +144,20 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { switch v := spec.(type) { + case manifestRevisionsPathSpec: + + return path.Join(append(repoPrefix, v.name, manifestsDirectory, "revisions")...), nil case manifestRevisionPathSpec: + revisionsPrefix, err := pm.path(manifestRevisionsPathSpec{name: v.name}) + if err != nil { + return "", err + } components, err := digestPathComponents(v.revision, false) if err != nil { return "", err } - return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + return path.Join(append([]string{revisionsPrefix}, components...)...), nil case manifestRevisionLinkPathSpec: root, err := pm.path(manifestRevisionPathSpec{ name: v.name, @@ -172,7 +196,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + return path.Join(append(repoPrefix, v.name, manifestsDirectory, "tags")...), nil case manifestTagPathSpec: root, err := pm.path(manifestTagsPathSpec{ name: v.name, @@ -228,35 +252,54 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { } return path.Join(root, path.Join(components...)), nil + case layersPathSpec: + + return path.Join(append(repoPrefix, v.name, layersDirectory)...), nil case layerLinkPathSpec: + layersPrefix, err := pm.path(layersPathSpec{name: v.name}) + if err != nil { + return "", err + } components, err := digestPathComponents(v.digest, false) if err != nil { return "", err } + components = append(components, "link") - layerLinkPathComponents := append(repoPrefix, v.name, "_layers") + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. - return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil - case blobDataPathSpec: + return path.Join(append([]string{layersPrefix}, components...)...), nil + case blobPathSpec: components, err := digestPathComponents(v.digest, true) if err != nil { return "", err } - components = append(components, "data") blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + case blobDataPathSpec: + blobPathPrefix, err := pm.path(blobPathSpec{ + digest: v.digest, + }) + if err != nil { + return "", err + } + return path.Join(blobPathPrefix, "data"), nil case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "data")...), nil + return path.Join(append(repoPrefix, v.name, uploadsDirectory, v.id, "data")...), nil case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "startedat")...), nil + return path.Join(append(repoPrefix, v.name, uploadsDirectory, v.id, "startedat")...), nil case uploadHashStatePathSpec: offset := fmt.Sprintf("%d", v.offset) if v.list { offset = "" // Limit to the prefix for listing offsets. } - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "hashstates", v.alg, offset)...), nil + return path.Join(append(repoPrefix, v.name, uploadsDirectory, v.id, "hashstates", string(v.alg), offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: @@ -272,6 +315,14 @@ type pathSpec interface { pathSpec() } +// manifestRevisionsPathSpec describes the components of the directory path for +// a root of repository revisions. +type manifestRevisionsPathSpec struct { + name string +} + +func (manifestRevisionsPathSpec) pathSpec() {} + // manifestRevisionPathSpec describes the components of the directory path for // a manifest revision. type manifestRevisionPathSpec struct { @@ -367,8 +418,15 @@ type manifestTagIndexEntryLinkPathSpec struct { func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} -// layerLink specifies a path for a layer link, which is a file with a blob -// id. The layer link will contain a content addressable blob id reference +// layersPathSpec describes the root directory of repository layer links. +type layersPathSpec struct { + name string +} + +func (layersPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference // into the blob store. The format of the contents is as follows: // // : @@ -377,7 +435,7 @@ func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} // // sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 // -// This says indicates that there is a blob with the id/digest, calculated via +// This indicates that there is a blob with the id/digest, calculated via // sha256 that can be fetched from the blob store. type layerLinkPathSpec struct { name string @@ -396,12 +454,12 @@ var blobAlgorithmReplacer = strings.NewReplacer( ";", "/", ) -// // blobPathSpec contains the path for the registry global blob store. -// type blobPathSpec struct { -// digest digest.Digest -// } +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest +} -// func (blobPathSpec) pathSpec() {} +func (blobPathSpec) pathSpec() {} // blobDataPathSpec contains the path for the registry global blob store. For // now, this contains layer data, exclusively. @@ -415,7 +473,7 @@ func (blobDataPathSpec) pathSpec() {} // uploads. type uploadDataPathSpec struct { name string - uuid string + id string } func (uploadDataPathSpec) pathSpec() {} @@ -429,7 +487,7 @@ func (uploadDataPathSpec) pathSpec() {} // the client to enforce time out policies. type uploadStartedAtPathSpec struct { name string - uuid string + id string } func (uploadStartedAtPathSpec) pathSpec() {} @@ -437,11 +495,11 @@ func (uploadStartedAtPathSpec) pathSpec() {} // uploadHashStatePathSpec defines the path parameters for the file that stores // the hash function state of an upload at a specific byte offset. If `list` is // set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, uuid, and alg. +// offsets for the upload identified by the name, id, and alg. type uploadHashStatePathSpec struct { name string - uuid string - alg string + id string + alg digest.Algorithm offset int64 list bool } @@ -473,14 +531,14 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) return nil, err } - algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) hex := dgst.Hex() prefix := []string{algorithm} var suffix []string if multilevel { - suffix = append(suffix, hex[:2]) + suffix = append(suffix, hex[:multilevelHexPrefixLength]) } suffix = append(suffix, hex) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go index 7dff6e09352a..3d17b3779c61 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go @@ -111,14 +111,14 @@ func TestPathMapper(t *testing.T) { { spec: uploadDataPathSpec{ name: "foo/bar", - uuid: "asdf-asdf-asdf-adsf", + id: "asdf-asdf-asdf-adsf", }, expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", }, { spec: uploadStartedAtPathSpec{ name: "foo/bar", - uuid: "asdf-asdf-asdf-adsf", + id: "asdf-asdf-asdf-adsf", }, expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", }, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go index 13c468dedfe3..c66f8881a426 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -5,9 +5,10 @@ import ( "strings" "time" - "code.google.com/p/go-uuid/uuid" log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" ) // uploadData stored the location of temporary files created during a layer upload @@ -28,9 +29,9 @@ func newUploadData() uploadData { // PurgeUploads deletes files from the upload directory // created before olderThan. The list of files deleted and errors // encountered are returned -func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(driver) + uploadData, errors := getOutstandingUploads(ctx, driver) var deleted []string for _, uploadData := range uploadData { if uploadData.startedAt.Before(olderThan) { @@ -38,7 +39,7 @@ func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actua log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", uploadData.containingDir, uploadData.startedAt, olderThan) if actuallyDelete { - err = driver.Delete(uploadData.containingDir) + err = driver.Delete(ctx, uploadData.containingDir) } if err == nil { deleted = append(deleted, uploadData.containingDir) @@ -56,7 +57,7 @@ func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actua // which could be eligible for deletion. The only reliable way to // classify the age of a file is with the date stored in the startedAt // file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploadData, []error) { +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { var errors []error uploads := make(map[string]uploadData, 0) @@ -65,7 +66,7 @@ func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploa if err != nil { return uploads, append(errors, err) } - err = Walk(driver, root, func(fileInfo storageDriver.FileInfo) error { + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file[0] == '_' { @@ -115,8 +116,8 @@ func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploa func uUIDFromPath(path string) (string, bool) { components := strings.Split(path, "/") for i := len(components) - 1; i >= 0; i-- { - if uuid := uuid.Parse(components[i]); uuid != nil { - return uuid.String(), i == len(components)-1 + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 } } return "", false @@ -124,7 +125,8 @@ func uUIDFromPath(path string) (string, bool) { // readStartedAtFile reads the date from an upload's startedAtFile func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - startedAtBytes, err := driver.GetContent(path) + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) if err != nil { return time.Now(), err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go index 368e7c86da4c..18c98af8f8bc 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads_test.go @@ -6,36 +6,38 @@ import ( "testing" "time" - "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/uuid" ) var pm = defaultPathMapper -func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) driver.StorageDriver { +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { d := inmemory.New() + ctx := context.Background() for i := 0; i < numUploads; i++ { - addUploads(t, d, uuid.New(), repoName, startedAt) + addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) } - return d + return d, ctx } -func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID}) +func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pm.path(uploadDataPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } - if err := d.PutContent(dataPath, []byte("")); err != nil { + if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { t.Fatalf("Unable to write data file") } - startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, uuid: uploadID}) + startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } - if d.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { t.Fatalf("Unable to write startedAt file") } @@ -43,8 +45,8 @@ func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, sta func TestPurgeGather(t *testing.T) { uploadCount := 5 - fs := testUploadFS(t, uploadCount, "test-repo", time.Now()) - uploadData, errs := getOutstandingUploads(fs) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(ctx, fs) if len(errs) != 0 { t.Errorf("Unexepected errors: %q", errs) } @@ -54,9 +56,9 @@ func TestPurgeGather(t *testing.T) { } func TestPurgeNone(t *testing.T) { - fs := testUploadFS(t, 10, "test-repo", time.Now()) + fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) oneHourAgo := time.Now().Add(-1 * time.Hour) - deleted, errs := PurgeUploads(fs, oneHourAgo, true) + deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } @@ -68,13 +70,13 @@ func TestPurgeNone(t *testing.T) { func TestPurgeAll(t *testing.T) { uploadCount := 10 oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) // Ensure > 1 repos are purged - addUploads(t, fs, uuid.New(), "test-repo2", oneHourAgo) + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) uploadCount++ - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } @@ -88,15 +90,15 @@ func TestPurgeAll(t *testing.T) { func TestPurgeSome(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) newUploadCount := 4 for i := 0; i < newUploadCount; i++ { - addUploads(t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) } - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } @@ -109,11 +111,11 @@ func TestPurgeSome(t *testing.T) { func TestPurgeOnlyUploads(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) // Create a directory tree outside _uploads and ensure // these files aren't deleted. - dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", uuid: uuid.New()}) + dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) if err != nil { t.Fatalf(err.Error()) } @@ -123,11 +125,11 @@ func TestPurgeOnlyUploads(t *testing.T) { } nonUploadFile := path.Join(nonUploadPath, "file") - if err = fs.PutContent(nonUploadFile, []byte("")); err != nil { + if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { t.Fatalf("Unable to write data file") } - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } @@ -140,13 +142,14 @@ func TestPurgeOnlyUploads(t *testing.T) { func TestPurgeMissingStartedAt(t *testing.T) { oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, 1, "test-repo", oneHourAgo) - err := Walk(fs, "/", func(fileInfo driver.FileInfo) error { + fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) + + err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file == "startedat" { - if err := fs.Delete(filePath); err != nil { + if err := fs.Delete(ctx, filePath); err != nil { t.Fatalf("Unable to delete startedat file: %s", filePath) } } @@ -155,7 +158,7 @@ func TestPurgeMissingStartedAt(t *testing.T) { if err != nil { t.Fatalf("Unexpected error during Walk: %s ", err.Error()) } - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) > 0 { t.Errorf("Unexpected errors") } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go index 919fd7b70525..09822bdac9d0 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go @@ -1,38 +1,62 @@ package storage import ( + "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" ) // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { - driver storagedriver.StorageDriver - pm *pathMapper - blobStore *blobStore - layerInfoCache cache.LayerInfoCache + blobStore *blobStore + blobServer distribution.BlobServer + statter distribution.BlobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool } // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is -// cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { - bs := &blobStore{ +// cheap to allocate. If redirect is true, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +// +// TODO(stevvooe): This function signature is getting very out of hand. Move to +// functional options for instance configuration. +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool, isCache bool, removeParentsOnDelete bool) distribution.Namespace { + // create global statter, with cache. + var statter distribution.BlobDescriptorService = &blobStatter{ driver: driver, pm: defaultPathMapper, } + if blobDescriptorCacheProvider != nil { + statter = cache.NewCachedBlobStatter(blobDescriptorCacheProvider, statter) + } + + bs := &blobStore{ + driver: driver, + pm: defaultPathMapper, + statter: statter, + deleteEnabled: deleteEnabled, + removeParentsOnDelete: removeParentsOnDelete, + } + return ®istry{ - driver: driver, blobStore: bs, - - // TODO(sday): This should be configurable. - pm: defaultPathMapper, - layerInfoCache: layerInfoCache, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + redirect: redirect, + }, + blobDescriptorCacheProvider: blobDescriptorCacheProvider, + deleteEnabled: deleteEnabled, + resumableDigestEnabled: !isCache, } } @@ -46,25 +70,59 @@ func (reg *registry) Scope() distribution.Scope { // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - if err := v2.ValidateRespositoryName(name); err != nil { + if err := v2.ValidateRepositoryName(name); err != nil { return nil, distribution.ErrRepositoryNameInvalid{ Name: name, Reason: err, } } + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(name) + if err != nil { + return nil, err + } + } + return &repository{ - ctx: ctx, - registry: reg, - name: name, + ctx: ctx, + registry: reg, + name: name, + descriptorCache: descriptorCache, }, nil } +// Blobs returns an instance of the BlobServer for registry's blob access. +func (reg *registry) Blobs() distribution.BlobService { + return reg.blobStore +} + +// RegistryBlobEnumerator returns an instance of BlobEnumerator for given registry object. +func RegistryBlobEnumerator(ns distribution.Namespace) (distribution.BlobEnumerator, error) { + reg, ok := ns.(*registry) + if !ok { + return nil, fmt.Errorf("cannot instantiate BlobEnumerator with given namespace object (%T)", ns) + } + return reg.blobStore, nil +} + +// RegistryBlobDeleter returns an instance of BlobDeleter for given registry object. +func RegistryBlobDeleter(ns distribution.Namespace) (distribution.BlobDeleter, error) { + reg, ok := ns.(*registry) + if !ok { + return nil, fmt.Errorf("cannot instantiate BlobDeleter with given namespace object (%T)", ns) + } + return reg.blobStore, nil +} + // repository provides name-scoped access to various services. type repository struct { *registry - ctx context.Context - name string + ctx context.Context + name string + descriptorCache distribution.BlobDescriptorService } // Name returns the name of the repository. @@ -75,53 +133,94 @@ func (repo *repository) Name() string { // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. -func (repo *repository) Manifests() distribution.ManifestService { - return &manifestStore{ +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + manifestRootPathFns := []blobsRootPathFunc{ + manifestRevisionsPath, + blobsRootPath, + } + + ms := &manifestStore{ + ctx: ctx, repository: repo, revisionStore: &revisionStore{ + ctx: ctx, repository: repo, + blobStore: &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + removeParentsOnDelete: repo.registry.blobStore.removeParentsOnDelete, + }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + blobsRootPathFns: manifestRootPathFns, + }, }, tagStore: &tagStore{ + ctx: ctx, repository: repo, + blobStore: repo.registry.blobStore, }, } + + // Apply options + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + return ms, nil } -// Layers returns an instance of the LayerService. Instantiation is cheap and +// Blobs returns an instance of the BlobStore. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. -func (repo *repository) Layers() distribution.LayerService { - ls := &layerStore{ - repository: repo, +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, + removeParentsOnDelete: repo.registry.blobStore.removeParentsOnDelete, } - if repo.registry.layerInfoCache != nil { - // TODO(stevvooe): This is not the best place to setup a cache. We would - // really like to decouple the cache from the backend but also have the - // manifeset service use the layer service cache. For now, we can simply - // integrate the cache directly. The main issue is that we have layer - // access and layer data coupled in a single object. Work is already under - // way to decouple this. - - return &cachedLayerService{ - LayerService: ls, - repository: repo, - ctx: repo.ctx, - driver: repo.driver, - blobStore: repo.blobStore, - cache: repo.registry.layerInfoCache, - } + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) } - return ls + return &linkedBlobStore{ + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPathFns: []linkPathFunc{blobLinkPath}, + blobsRootPathFns: []blobsRootPathFunc{blobsRootPath}, + deleteEnabled: repo.registry.deleteEnabled, + } } func (repo *repository) Signatures() distribution.SignatureService { return &signatureStore{ repository: repo, + blobStore: repo.blobStore, + ctx: repo.ctx, } } - -func (reg *registry) Blobs() distribution.BlobService { - return reg.blobStore -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go index ac6053602530..17675dba7616 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go @@ -3,8 +3,8 @@ package storage import ( "encoding/json" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" @@ -12,47 +12,43 @@ import ( // revisionStore supports storing and managing manifest revisions. type revisionStore struct { - *repository + repository *repository + blobStore *linkedBlobStore + ctx context.Context } -// exists returns true if the revision is available in the named repository. -func (rs *revisionStore) exists(revision digest.Digest) (bool, error) { - revpath, err := rs.pm.path(manifestRevisionPathSpec{ - name: rs.Name(), - revision: revision, - }) - +// get retrieves the manifest, keyed by revision digest. +func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*manifest.SignedManifest, error) { + // Ensure that this revision is available in this repository. + _, err := rs.blobStore.Stat(ctx, revision) if err != nil { - return false, err - } + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: rs.repository.Name(), + Revision: revision, + } + } - exists, err := exists(rs.driver, revpath) - if err != nil { - return false, err + return nil, err } - return exists, nil -} + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. -// get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, error) { - // Ensure that this revision is available in this repository. - if exists, err := rs.exists(revision); err != nil { - return nil, err - } else if !exists { - return nil, distribution.ErrUnknownManifestRevision{ - Name: rs.Name(), - Revision: revision, + content, err := rs.blobStore.Get(ctx, revision) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: rs.repository.Name(), + Revision: revision, + } } - } - content, err := rs.blobStore.get(revision) - if err != nil { return nil, err } // Fetch the signatures for the manifest - signatures, err := rs.Signatures().Get(revision) + signatures, err := rs.repository.Signatures().Get(revision) if err != nil { return nil, err } @@ -78,69 +74,43 @@ func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, // put stores the manifest in the repository, if not already present. Any // updated signatures will be stored, as well. -func (rs *revisionStore) put(sm *manifest.SignedManifest) (digest.Digest, error) { +func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) (distribution.Descriptor, error) { // Resolve the payload in the manifest. payload, err := sm.Payload() if err != nil { - return "", err + return distribution.Descriptor{}, err } // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.put(payload) + revision, err := rs.blobStore.Put(ctx, manifest.ManifestMediaType, payload) if err != nil { - logrus.Errorf("error putting payload into blobstore: %v", err) - return "", err + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return distribution.Descriptor{}, err } // Link the revision into the repository. - if err := rs.link(revision); err != nil { - return "", err + if err := rs.blobStore.linkBlob(ctx, revision); err != nil { + return distribution.Descriptor{}, err } // Grab each json signature and store them. signatures, err := sm.Signatures() if err != nil { - return "", err + return distribution.Descriptor{}, err } - if err := rs.Signatures().Put(revision, signatures...); err != nil { - return "", err + if err := rs.repository.Signatures().Put(revision.Digest, signatures...); err != nil { + return distribution.Descriptor{}, err } return revision, nil } -// link links the revision into the repository. -func (rs *revisionStore) link(revision digest.Digest) error { - revisionPath, err := rs.pm.path(manifestRevisionLinkPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return err - } - - if exists, err := exists(rs.driver, revisionPath); err != nil { - return err - } else if exists { - // Revision has already been linked! - return nil - } - - return rs.blobStore.link(revisionPath, revision) +// enumerate returns an array of digests of all found manifest revisions. +func (rs *revisionStore) enumerate() ([]digest.Digest, error) { + return EnumerateAllBlobs(rs.blobStore, rs.ctx) } -// delete removes the specified manifest revision from storage. -func (rs *revisionStore) delete(revision digest.Digest) error { - revisionPath, err := rs.pm.path(manifestRevisionPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return err - } - - return rs.driver.Delete(revisionPath) +func (rs *revisionStore) delete(ctx context.Context, revision digest.Digest) error { + return rs.blobStore.Delete(ctx, revision) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go index 7094b69e274f..9ed70d240941 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go @@ -10,14 +10,24 @@ import ( ) type signatureStore struct { - *repository + repository *repository + blobStore *blobStore + ctx context.Context +} + +func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobStore) *signatureStore { + return &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + } } var _ distribution.SignatureService = &signatureStore{} func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := s.pm.path(manifestSignaturesPathSpec{ - name: s.Name(), + signaturesPath, err := s.blobStore.pm.path(manifestSignaturesPathSpec{ + name: s.repository.Name(), revision: dgst, }) @@ -30,7 +40,7 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { // can be eliminated by implementing listAll on drivers. signaturesPath = path.Join(signaturesPath, "sha256") - signaturePaths, err := s.driver.List(signaturesPath) + signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) if err != nil { return nil, err } @@ -43,27 +53,32 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { } ch := make(chan result) + bs := s.linkedBlobStore(s.ctx, dgst) for i, sigPath := range signaturePaths { - // Append the link portion - sigPath = path.Join(sigPath, "link") + sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) + if err != nil { + context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) + continue + } wg.Add(1) - go func(idx int, sigPath string) { + go func(idx int, sigdgst digest.Digest) { defer wg.Done() context.GetLogger(s.ctx). - Debugf("fetching signature from %q", sigPath) + Debugf("fetching signature %q", sigdgst) r := result{index: idx} - if p, err := s.blobStore.linked(sigPath); err != nil { + + if p, err := bs.Get(s.ctx, sigdgst); err != nil { context.GetLogger(s.ctx). - Errorf("error fetching signature from %q: %v", sigPath, err) + Errorf("error fetching signature %q: %v", sigdgst, err) r.err = err } else { r.signature = p } ch <- r - }(i, sigPath) + }(i, sigdgst) } done := make(chan struct{}) go func() { @@ -90,26 +105,56 @@ loop: return signatures, err } +// Enumerate returns an array of digests of manifest signatures. +func (s *signatureStore) Enumerate(manifestReference digest.Digest) ([]digest.Digest, error) { + return EnumerateAllBlobs(s.linkedBlobStore(s.ctx, manifestReference), s.ctx) +} + func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { + bs := s.linkedBlobStore(s.ctx, dgst) for _, signature := range signatures { - signatureDigest, err := s.blobStore.put(signature) - if err != nil { + if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { return err } + } + return nil +} - signaturePath, err := s.pm.path(manifestSignatureLinkPathSpec{ - name: s.Name(), - revision: dgst, - signature: signatureDigest, - }) +// Delete removes a signature's link of given manifest revision identified by +// digest. +func (s *signatureStore) Delete(revision, dgst digest.Digest) error { + return s.linkedBlobStore(s.ctx, revision).Delete(s.ctx, dgst) +} - if err != nil { - return err - } +// linkedBlobStore returns the namedBlobStore of the signatures for the +// manifest with the given digest. Effectively, each signature link path +// layout is a unique linked blob store. +func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { + linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestSignatureLinkPathSpec{ + name: name, + revision: revision, + signature: dgst, + }) + } + linkRootPath := func(pm *pathMapper, name string) (string, error) { + return pm.path(manifestSignaturesPathSpec{ + name: name, + revision: revision, + }) + } - if err := s.blobStore.link(signaturePath, signatureDigest); err != nil { - return err - } + return &linkedBlobStore{ + ctx: ctx, + repository: s.repository, + blobStore: s.blobStore, + blobAccessController: &linkedBlobStatter{ + blobStore: s.blobStore, + repository: s.repository, + linkPathFns: []linkPathFunc{linkpath}, + removeParentsOnDelete: true, + }, + linkPathFns: []linkPathFunc{linkpath}, + blobsRootPathFns: []blobsRootPathFunc{linkRootPath}, } - return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go index 616df9526e10..a7ca3301a542 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go @@ -4,30 +4,33 @@ import ( "path" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // tagStore provides methods to manage manifest tags in a backend storage driver. type tagStore struct { - *repository + repository *repository + blobStore *blobStore + ctx context.Context } // tags lists the manifest tags for the specified repository. func (ts *tagStore) tags() ([]string, error) { - p, err := ts.pm.path(manifestTagPathSpec{ - name: ts.name, + p, err := ts.blobStore.pm.path(manifestTagPathSpec{ + name: ts.repository.Name(), }) if err != nil { return nil, err } var tags []string - entries, err := ts.driver.List(p) + entries, err := ts.blobStore.driver.List(ts.ctx, p) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, distribution.ErrRepositoryUnknown{Name: ts.name} + return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} default: return nil, err } @@ -44,15 +47,15 @@ func (ts *tagStore) tags() ([]string, error) { // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(tag string) (bool, error) { - tagPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), + tagPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), tag: tag, }) if err != nil { return false, err } - exists, err := exists(ts.driver, tagPath) + exists, err := exists(ts.ctx, ts.blobStore.driver, tagPath) if err != nil { return false, err } @@ -63,18 +66,8 @@ func (ts *tagStore) exists(tag string) (bool, error) { // tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) tag(tag string, revision digest.Digest) error { - indexEntryPath, err := ts.pm.path(manifestTagIndexEntryLinkPathSpec{ - name: ts.Name(), - tag: tag, - revision: revision, - }) - - if err != nil { - return err - } - - currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), + currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), tag: tag, }) @@ -82,77 +75,68 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { return err } + nbs := ts.linkedBlobStore(ts.ctx, tag) // Link into the index - if err := ts.blobStore.link(indexEntryPath, revision); err != nil { + if err := nbs.linkBlob(ts.ctx, distribution.Descriptor{Digest: revision}); err != nil { return err } // Overwrite the current link - return ts.blobStore.link(currentPath, revision) + return ts.blobStore.link(ts.ctx, currentPath, revision) } // resolve the current revision for name and tag. func (ts *tagStore) resolve(tag string) (digest.Digest, error) { - currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), + currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), tag: tag, }) - if err != nil { return "", err } - if exists, err := exists(ts.driver, currentPath); err != nil { - return "", err - } else if !exists { - return "", distribution.ErrManifestUnknown{Name: ts.Name(), Tag: tag} - } - - revision, err := ts.blobStore.readlink(currentPath) + revision, err := ts.blobStore.readlink(ts.ctx, currentPath) if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return "", distribution.ErrManifestUnknown{Name: ts.repository.Name(), Tag: tag} + } + return "", err } return revision, nil } -// revisions returns all revisions with the specified name and tag. -func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) { - manifestTagIndexPath, err := ts.pm.path(manifestTagIndexPathSpec{ - name: ts.Name(), - tag: tag, - }) - - if err != nil { - return nil, err - } - - // TODO(stevvooe): Need to append digest alg to get listing of revisions. - manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256") - - entries, err := ts.driver.List(manifestTagIndexPath) - if err != nil { - return nil, err - } - - var revisions []digest.Digest - for _, entry := range entries { - revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry))) - } - - return revisions, nil -} - // delete removes the tag from repository, including the history of all // revisions that have the specified tag. func (ts *tagStore) delete(tag string) error { - tagPath, err := ts.pm.path(manifestTagPathSpec{ - name: ts.Name(), + tagPath, err := ts.blobStore.pm.path(manifestTagPathSpec{ + name: ts.repository.Name(), tag: tag, }) if err != nil { return err } - return ts.driver.Delete(tagPath) + return ts.blobStore.driver.Delete(ts.ctx, tagPath) +} + +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + }}, + } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go new file mode 100644 index 000000000000..773d7ba0be67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go new file mode 100644 index 000000000000..46b8096b3f55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go @@ -0,0 +1,67 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + pm: defaultPathMapper, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + pm *pathMapper + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.ParseDigest(dgst) + if err != nil { + return err + } + + blobPath, err := v.pm.path(blobDataPathSpec{digest: d}) + if err != nil { + return err + } + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := v.pm.path(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go index 7b958d879ce3..94c2ce19b631 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go @@ -3,15 +3,29 @@ package storage import ( "errors" "fmt" + "io" + "sort" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" storageDriver "github.com/docker/distribution/registry/storage/driver" ) +const ( + enumBufLength = 2048 +) + // SkipDir is used as a return value from onFileFunc to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var ErrSkipDir = errors.New("skip this directory") +// ErrStopWalking is used as a return value from onFileFunc to indicate that +// Walk should terminate without an error. If used, Walk will terminate with +// this error. +var ErrStopWalking = errors.New("stop walking") + // WalkFn is called once per file by Walk // If the returned error is ErrSkipDir and fileInfo refers // to a directory, the directory will not be entered and Walk @@ -20,24 +34,61 @@ type WalkFn func(fileInfo storageDriver.FileInfo) error // Walk traverses a filesystem defined within driver, starting // from the given path, calling f on each file -func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(from) +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) if err != nil { return err } for _, child := range children { - fileInfo, err := driver.Stat(child) + fileInfo, err := driver.Stat(ctx, child) if err != nil { return err } - err = f(fileInfo) - skipDir := (err == ErrSkipDir) - if err != nil && !skipDir { + switch f(fileInfo) { + case ErrSkipDir: + continue + case ErrStopWalking: + return ErrStopWalking + case nil: + if fileInfo.IsDir() { + if err := Walk(ctx, driver, child, f); err == ErrStopWalking { + return err + } + } + default: return err } + } + return nil +} - if fileInfo.IsDir() && !skipDir { - Walk(driver, child, f) +// WalkSorted traverses a filesystem defined within driver, starting +// from the given path, calling f on each file. Directories are walked +// in lexicographical order. +func WalkSorted(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Strings(children) + for _, child := range children { + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + switch f(fileInfo) { + case ErrSkipDir: + continue + case ErrStopWalking: + return ErrStopWalking + case nil: + if fileInfo.IsDir() { + if err := WalkSorted(ctx, driver, child, f); err == ErrStopWalking { + return err + } + } + default: + return err } } return nil @@ -48,3 +99,42 @@ func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { func pushError(errors []error, path string, err error) []error { return append(errors, fmt.Errorf("%s: %s", path, err)) } + +// EnumerateAllBlobs is a utility function that returns all the blob digests +// found in given blob store. It should be used with care because of memory and +// time complexity. +func EnumerateAllBlobs(be distribution.BlobEnumerator, ctx context.Context) ([]digest.Digest, error) { + // prepare an array of equally-sized buffers to collect all the digests + // TODO: set a limit to bufs size? + bufs := make([][]digest.Digest, 0, 1) + total := 0 + last := "" + + for { + buf := make([]digest.Digest, enumBufLength) + n, err := be.Enumerate(ctx, buf, last) + total += n + if n > 0 { + bufs = append(bufs, buf) + last = buf[n-1].String() + } + if err != nil { + if err != io.EOF { + return nil, err + } + break + } + } + + // join all buffers + res := make([]digest.Digest, total) + for i := range bufs { + if i < len(bufs)-1 { + copy(res[enumBufLength*i:], bufs[i]) + } else { + copy(res[enumBufLength*i:], bufs[i][:total-enumBufLength*i]) + } + } + + return res, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go index 22b91b35627b..40b8547cf214 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go @@ -4,17 +4,19 @@ import ( "fmt" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) -func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { +func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { d := inmemory.New() c := []byte("") - if err := d.PutContent("/a/b/c/d", c); err != nil { + ctx := context.Background() + if err := d.PutContent(ctx, "/a/b/c/d", c); err != nil { t.Fatalf("Unable to put to inmemory fs") } - if err := d.PutContent("/a/b/c/e", c); err != nil { + if err := d.PutContent(ctx, "/a/b/c/e", c); err != nil { t.Fatalf("Unable to put to inmemory fs") } @@ -26,20 +28,20 @@ func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { "/a/b/c/e": "file", } - return d, expected + return d, expected, ctx } func TestWalkErrors(t *testing.T) { - d, expected := testFS(t) + d, expected, ctx := testFS(t) fileCount := len(expected) - err := Walk(d, "", func(fileInfo driver.FileInfo) error { + err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { t.Error("Expected invalid root err") } - err = Walk(d, "/", func(fileInfo driver.FileInfo) error { + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { // error on the 2nd file if fileInfo.Path() == "/a/b" { return fmt.Errorf("Early termination") @@ -54,7 +56,7 @@ func TestWalkErrors(t *testing.T) { t.Error(err.Error()) } - err = Walk(d, "/nonexistant", func(fileInfo driver.FileInfo) error { + err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { @@ -64,8 +66,8 @@ func TestWalkErrors(t *testing.T) { } func TestWalk(t *testing.T) { - d, expected := testFS(t) - err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() filetype, ok := expected[filePath] if !ok { @@ -93,8 +95,8 @@ func TestWalk(t *testing.T) { } func TestWalkSkipDir(t *testing.T) { - d, expected := testFS(t) - err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() if filePath == "/a/b" { // skip processing /a/b/c and /a/b/c/d diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go index fa118cd1b11b..00cd8a6ac291 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go @@ -6,6 +6,7 @@ import ( "io" "io/ioutil" "net/http" + "net/url" "sort" "strings" ) @@ -20,8 +21,6 @@ type RequestResponseMapping struct { Response Response } -// TODO(bbland): add support for request headers - // Request is a simplified http.Request object type Request struct { // Method is the http method of the request, for example GET @@ -35,23 +34,43 @@ type Request struct { // Body is the byte contents of the http request Body []byte + + // Headers are the header for this request + Headers http.Header } func (r Request) String() string { queryString := "" if len(r.QueryParams) > 0 { - queryString = "?" keys := make([]string, 0, len(r.QueryParams)) + queryParts := make([]string, 0, len(r.QueryParams)) for k := range r.QueryParams { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { - queryString += strings.Join(r.QueryParams[k], "&") + "&" + for _, val := range r.QueryParams[k] { + queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) + } } - queryString = queryString[:len(queryString)-1] + queryString = "?" + strings.Join(queryParts, "&") } - return fmt.Sprintf("%s %s%s\n%s", r.Method, r.Route, queryString, r.Body) + var headers []string + if len(r.Headers) > 0 { + var headerKeys []string + for k := range r.Headers { + headerKeys = append(headerKeys, k) + } + sort.Strings(headerKeys) + + for _, k := range headerKeys { + for _, val := range r.Headers[k] { + headers = append(headers, fmt.Sprintf("%s:%s", k, val)) + } + } + + } + return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) } // Response is a simplified http.Response object @@ -98,6 +117,14 @@ func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Route: r.URL.Path, QueryParams: r.URL.Query(), Body: requestBody, + Headers: make(map[string][]string), + } + + // Add headers of interest here + for k, v := range r.Header { + if k == "If-None-Match" { + request.Headers[k] = v + } } responses, ok := app.responseMap[request.String()] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 000000000000..d433ccaf512d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go new file mode 100644 index 000000000000..09c3a7bb4def --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go index 3a542f9b6fe7..450d15c28f5f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go @@ -8,4 +8,4 @@ var Package = "github.com/docker/distribution" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var Version = "v2.0.0+unknown" +var Version = "v2.1.0+unknown" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh b/Godeps/_workspace/src/github.com/docker/distribution/version/version.sh old mode 100644 new mode 100755 diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore b/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE b/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE new file mode 100644 index 000000000000..2815cc36c9d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md b/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md new file mode 100644 index 000000000000..d2d3fb89ee3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md @@ -0,0 +1,6 @@ +# go-crypto +A Subset of the Go `crypto` Package with a Resumable Hash Interface + +### Documentation + +GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go new file mode 100644 index 000000000000..af4488f1246c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go @@ -0,0 +1,43 @@ +// Package resumable registers resumable versions of hash functions. Resumable +// varieties of hash functions are available via the standard crypto package. +// Support can be checked by type assertion against the resumable.Hash +// interface. +// +// While one can use these sub-packages directly, it makes more sense to +// register them using side-effect imports: +// +// import _ "github.com/stevvooe/resumable/sha256" +// +// This will make the resumable hashes available to the application through +// the standard crypto package. For example, if a new sha256 is required, one +// should use the following: +// +// h := crypto.SHA256.New() +// +// Such a features allows one to control the inclusion of resumable hash +// support in a single file. Applications that require the resumable hash +// implementation can type switch to detect support, while other parts of the +// application can be completely oblivious to the presence of the alternative +// hash functions. +// +// Also note that the implementations available in this package are completely +// untouched from their Go counterparts in the standard library. Only an extra +// file is added to each package to implement the extra resumable hash +// functions. +package resumable + +import "hash" + +// Hash is the common interface implemented by all resumable hash functions. +type Hash interface { + hash.Hash + + // Len returns the number of bytes written to the Hash so far. + Len() int64 + + // State returns a snapshot of the state of the Hash. + State() ([]byte, error) + + // Restore resets the Hash to the given state. + Restore(state []byte) error +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go new file mode 100644 index 000000000000..426d78adc3ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go @@ -0,0 +1,53 @@ +package sha256 + +import ( + "bytes" + "encoding/gob" + + // import to ensure that our init function runs after the standard package + _ "crypto/sha256" +) + +// Len returns the number of bytes which have been written to the digest. +func (d *digest) Len() int64 { + return int64(d.len) +} + +// State returns a snapshot of the state of the digest. +func (d *digest) State() ([]byte, error) { + var buf bytes.Buffer + encoder := gob.NewEncoder(&buf) + + // We encode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + d.h, d.x, d.nx, d.len, d.is224, + } + + for _, val := range vals { + if err := encoder.Encode(val); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +// Restore resets the digest to the given state. +func (d *digest) Restore(state []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(state)) + + // We decode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + &d.h, &d.x, &d.nx, &d.len, &d.is224, + } + + for _, val := range vals { + if err := decoder.Decode(val); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go new file mode 100644 index 000000000000..d84cebf2ff2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go @@ -0,0 +1,193 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined +// in FIPS 180-4. +package sha256 + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.SHA224, New224) + crypto.RegisterHash(crypto.SHA256, New) +} + +// The size of a SHA256 checksum in bytes. +const Size = 32 + +// The size of a SHA224 checksum in bytes. +const Size224 = 28 + +// The blocksize of SHA256 and SHA224 in bytes. +const BlockSize = 64 + +const ( + chunk = 64 + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 + init0_224 = 0xC1059ED8 + init1_224 = 0x367CD507 + init2_224 = 0x3070DD17 + init3_224 = 0xF70E5939 + init4_224 = 0xFFC00B31 + init5_224 = 0x68581511 + init6_224 = 0x64F98FA7 + init7_224 = 0xBEFA4FA4 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 + is224 bool // mark if this digest is SHA-224 +} + +func (d *digest) Reset() { + if !d.is224 { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + } else { + d.h[0] = init0_224 + d.h[1] = init1_224 + d.h[2] = init2_224 + d.h[3] = init3_224 + d.h[4] = init4_224 + d.h[5] = init5_224 + d.h[6] = init6_224 + d.h[7] = init7_224 + } + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the SHA256 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +// New224 returns a new hash.Hash computing the SHA224 checksum. +func New224() hash.Hash { + d := new(digest) + d.is224 = true + d.Reset() + return d +} + +func (d *digest) Size() int { + if !d.is224 { + return Size + } + return Size224 +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + hash := d.checkSum() + if d.is224 { + return append(in, hash[:Size224]...) + } + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + h := d.h[:] + if d.is224 { + h = d.h[:7] + } + + var digest [Size]byte + for i, s := range h { + digest[i*4] = byte(s >> 24) + digest[i*4+1] = byte(s >> 16) + digest[i*4+2] = byte(s >> 8) + digest[i*4+3] = byte(s) + } + + return digest +} + +// Sum256 returns the SHA256 checksum of the data. +func Sum256(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + +// Sum224 returns the SHA224 checksum of the data. +func Sum224(data []byte) (sum224 [Size224]byte) { + var d digest + d.is224 = true + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum224[:], sum[:Size224]) + return +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go new file mode 100644 index 000000000000..1d883d390595 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go @@ -0,0 +1,176 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA256 hash algorithm. See FIPS 180-2. + +package sha256 + +import ( + "fmt" + "io" + "testing" +) + +type sha256Test struct { + out string + in string +} + +var golden = []sha256Test{ + {"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ""}, + {"ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb", "a"}, + {"fb8e20fc2e4c3f248c60c39bd652f3c1347298bb977b8b4d5903b85055620603", "ab"}, + {"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc"}, + {"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", "abcd"}, + {"36bbe50ed96841d10443bcb670d6554f0a34b761be67ec9c4a8ad2c0c44ca42c", "abcde"}, + {"bef57ec7f53a6d40beb640a780a639c83bc29ac8a9816f1fc6c5c6dcd93c4721", "abcdef"}, + {"7d1a54127b222502f5b79b5fb0803061152a44f92b37e23c6527baf665d4da9a", "abcdefg"}, + {"9c56cc51b374c3ba189210d5b6d4bf57790d351c96c47c02190ecf1e430635ab", "abcdefgh"}, + {"19cc02f26df43cc571bc9ed7b0c4d29224a3ec229529221725ef76d021c8326f", "abcdefghi"}, + {"72399361da6a7754fec986dca5b7cbaf1c810a28ded4abaf56b2106d06cb78b0", "abcdefghij"}, + {"a144061c271f152da4d151034508fed1c138b8c976339de229c3bb6d4bbb4fce", "Discard medicine more than two years old."}, + {"6dae5caa713a10ad04b46028bf6dad68837c581616a1589a265a11288d4bb5c4", "He who has a shady past knows that nice guys finish last."}, + {"ae7a702a9509039ddbf29f0765e70d0001177914b86459284dab8b348c2dce3f", "I wouldn't marry him with a ten foot pole."}, + {"6748450b01c568586715291dfa3ee018da07d36bb7ea6f180c1af6270215c64f", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"14b82014ad2b11f661b5ae6a99b75105c2ffac278cd071cd6c05832793635774", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"7102cfd76e2e324889eece5d6c41921b1e142a4ac5a2692be78803097f6a48d8", "Nepal premier won't resign."}, + {"23b1018cd81db1d67983c5f7417c44da9deb582459e378d7a068552ea649dc9f", "For every action there is an equal and opposite government program."}, + {"8001f190dfb527261c4cfcab70c98e8097a7a1922129bc4096950e57c7999a5a", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"8c87deb65505c3993eb24b7a150c4155e82eee6960cf0c3a8114ff736d69cad5", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"bfb0a67a19cdec3646498b2e0f751bddc41bba4b7f30081b0b932aad214d16d7", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"7f9a0b9bf56332e19f5a0ec1ad9c1425a153da1c624868fda44561d6b74daf36", "size: a.out: bad magic"}, + {"b13f81b8aad9e3666879af19886140904f7f429ef083286195982a7588858cfc", "The major problem is with sendmail. -Mark Horton"}, + {"b26c38d61519e894480c70c8374ea35aa0ad05b2ae3d6674eec5f52a69305ed4", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"049d5e26d4f10222cd841a119e38bd8d2e0d1129728688449575d4ff42b842c1", "If the enemy is within range, then so are you."}, + {"0e116838e3cc1c1a14cd045397e29b4d087aa11b0853fc69ec82e90330d60949", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"4f7d8eb5bcf11de2a56b971021a444aa4eafd6ecd0f307b5109e4e776cd0fe46", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"61c0cc4c4bd8406d5120b3fb4ebc31ce87667c162f29468b3c779675a85aebce", "C is as portable as Stonehedge!!"}, + {"1fb2eb3688093c4a3f80cd87a5547e2ce940a4f923243a79a2a1e242220693ac", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"395585ce30617b62c80b93e8208ce866d4edc811a177fdb4b82d3911d8696423", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"4f9b189a13d030838269dce846b16a1ce9ce81fe63e65de2f636863336a98fe6", "How can you write a big system without C++? -Paul Glick"}, +} + +var golden224 = []sha256Test{ + {"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", ""}, + {"abd37534c7d9a2efb9465de931cd7055ffdb8879563ae98078d6d6d5", "a"}, + {"db3cda86d4429a1d39c148989566b38f7bda0156296bd364ba2f878b", "ab"}, + {"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", "abc"}, + {"a76654d8e3550e9a2d67a0eeb6c67b220e5885eddd3fde135806e601", "abcd"}, + {"bdd03d560993e675516ba5a50638b6531ac2ac3d5847c61916cfced6", "abcde"}, + {"7043631cb415556a275a4ebecb802c74ee9f6153908e1792a90b6a98", "abcdef"}, + {"d1884e711701ad81abe0c77a3b0ea12e19ba9af64077286c72fc602d", "abcdefg"}, + {"17eb7d40f0356f8598e89eafad5f6c759b1f822975d9c9b737c8a517", "abcdefgh"}, + {"aeb35915346c584db820d2de7af3929ffafef9222a9bcb26516c7334", "abcdefghi"}, + {"d35e1e5af29ddb0d7e154357df4ad9842afee527c689ee547f753188", "abcdefghij"}, + {"19297f1cef7ddc8a7e947f5c5a341e10f7245045e425db67043988d7", "Discard medicine more than two years old."}, + {"0f10c2eb436251f777fbbd125e260d36aecf180411726c7c885f599a", "He who has a shady past knows that nice guys finish last."}, + {"4d1842104919f314cad8a3cd20b3cba7e8ed3e7abed62b57441358f6", "I wouldn't marry him with a ten foot pole."}, + {"a8ba85c6fe0c48fbffc72bbb2f03fcdbc87ae2dc7a56804d1590fb3b", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"5543fbab26e67e8885b1a852d567d1cb8b9bfe42e0899584c50449a9", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"65ca107390f5da9efa05d28e57b221657edc7e43a9a18fb15b053ddb", "Nepal premier won't resign."}, + {"84953962be366305a9cc9b5cd16ed019edc37ac96c0deb3e12cca116", "For every action there is an equal and opposite government program."}, + {"35a189ce987151dfd00b3577583cc6a74b9869eecf894459cb52038d", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"2fc333713983edfd4ef2c0da6fb6d6415afb94987c91e4069eb063e6", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"cbe32d38d577a1b355960a4bc3c659c2dc4670859a19777a875842c4", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"a2dc118ce959e027576413a7b440c875cdc8d40df9141d6ef78a57e1", "size: a.out: bad magic"}, + {"d10787e24052bcff26dc484787a54ed819e4e4511c54890ee977bf81", "The major problem is with sendmail. -Mark Horton"}, + {"62efcf16ab8a893acdf2f348aaf06b63039ff1bf55508c830532c9fb", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"3e9b7e4613c59f58665104c5fa86c272db5d3a2ff30df5bb194a5c99", "If the enemy is within range, then so are you."}, + {"5999c208b8bdf6d471bb7c359ac5b829e73a8211dff686143a4e7f18", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"3b2d67ff54eabc4ef737b14edf87c64280ef582bcdf2a6d56908b405", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"d0733595d20e4d3d6b5c565a445814d1bbb2fd08b9a3b8ffb97930c6", "C is as portable as Stonehedge!!"}, + {"43fb8aeed8a833175c9295c1165415f98c866ef08a4922959d673507", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"ec18e66e93afc4fb1604bc2baedbfd20b44c43d76e65c0996d7851c6", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"86ed2eaa9c75ba98396e5c9fb2f679ecf0ea2ed1e0ee9ceecb4a9332", "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + for i := 0; i < len(golden); i++ { + g := golden[i] + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", g.in, s, g.out) + } + c := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha256[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } + for i := 0; i < len(golden224); i++ { + g := golden224[i] + s := fmt.Sprintf("%x", Sum224([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum224 function: sha224(%s) = %s want %s", g.in, s, g.out) + } + c := New224() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha224[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } + c = New224() + if got := c.Size(); got != Size224 { + t.Errorf("New224.Size = %d; want %d", got, Size224) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d want %d", got, BlockSize) + } +} + +var bench = New() +var buf = make([]byte, 8192) + +func benchmarkSize(b *testing.B, size int) { + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:size]) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash8Bytes(b *testing.B) { + benchmarkSize(b, 8) +} + +func BenchmarkHash1K(b *testing.B) { + benchmarkSize(b, 1024) +} + +func BenchmarkHash8K(b *testing.B) { + benchmarkSize(b, 8192) +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go new file mode 100644 index 000000000000..ca5efd156a9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go @@ -0,0 +1,128 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !386,!amd64 + +// SHA256 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package sha256 + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} + +func block(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s new file mode 100644 index 000000000000..73ae2bf300eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s @@ -0,0 +1,283 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA256 block routine. See sha256block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 63 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVL (index*4)(SI), AX; \ + BSWAPL AX; \ + MOVL AX, (index*4)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) +// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) +#define MSGSCHEDULE1(index) \ + MOVL ((index-2)*4)(BP), AX; \ + MOVL AX, CX; \ + RORL $17, AX; \ + MOVL CX, DX; \ + RORL $19, CX; \ + SHRL $10, DX; \ + MOVL ((index-15)*4)(BP), BX; \ + XORL CX, AX; \ + MOVL BX, CX; \ + XORL DX, AX; \ + RORL $7, BX; \ + MOVL CX, DX; \ + SHRL $3, DX; \ + RORL $18, CX; \ + ADDL ((index-7)*4)(BP), AX; \ + XORL CX, BX; \ + XORL DX, BX; \ + ADDL ((index-16)*4)(BP), BX; \ + ADDL BX, AX; \ + MOVL AX, ((index)*4)(BP) + +// Calculate T1 in AX - uses AX, BX, CX and DX registers. +// Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA256T1(const, e, f, g, h) \ + MOVL (h*4)(DI), BX; \ + ADDL AX, BX; \ + MOVL (e*4)(DI), AX; \ + ADDL $const, BX; \ + MOVL (e*4)(DI), CX; \ + RORL $6, AX; \ + MOVL (e*4)(DI), DX; \ + RORL $11, CX; \ + XORL CX, AX; \ + MOVL (e*4)(DI), CX; \ + RORL $25, DX; \ + ANDL (f*4)(DI), CX; \ + XORL AX, DX; \ + MOVL (e*4)(DI), AX; \ + NOTL AX; \ + ADDL DX, BX; \ + ANDL (g*4)(DI), AX; \ + XORL CX, AX; \ + ADDL BX, AX + +// Calculate T2 in BX - uses AX, BX, CX and DX registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA256T2(a, b, c) \ + MOVL (a*4)(DI), AX; \ + MOVL (c*4)(DI), BX; \ + RORL $2, AX; \ + MOVL (a*4)(DI), DX; \ + ANDL (b*4)(DI), BX; \ + RORL $13, DX; \ + MOVL (a*4)(DI), CX; \ + ANDL (c*4)(DI), CX; \ + XORL DX, AX; \ + XORL CX, BX; \ + MOVL (a*4)(DI), DX; \ + MOVL (b*4)(DI), CX; \ + RORL $22, DX; \ + ANDL (a*4)(DI), CX; \ + XORL CX, BX; \ + XORL DX, AX; \ + ADDL AX, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA256T1(const, e, f, g, h); \ + MOVL AX, 292(SP); \ + SHA256T2(a, b, c); \ + MOVL 292(SP), AX; \ + ADDL AX, BX; \ + ADDL AX, (d*4)(DI); \ + MOVL BX, (h*4)(DI) + +#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$296-12 + MOVL p_base+4(FP), SI + MOVL p_len+8(FP), DX + SHRL $6, DX + SHLL $6, DX + + LEAL (SI)(DX*1), DI + MOVL DI, 288(SP) + CMPL SI, DI + JEQ end + + LEAL 256(SP), DI // variables + + MOVL dig+0(FP), BP + MOVL (0*4)(BP), AX // a = H0 + MOVL AX, (0*4)(DI) + MOVL (1*4)(BP), BX // b = H1 + MOVL BX, (1*4)(DI) + MOVL (2*4)(BP), CX // c = H2 + MOVL CX, (2*4)(DI) + MOVL (3*4)(BP), DX // d = H3 + MOVL DX, (3*4)(DI) + MOVL (4*4)(BP), AX // e = H4 + MOVL AX, (4*4)(DI) + MOVL (5*4)(BP), BX // f = H5 + MOVL BX, (5*4)(DI) + MOVL (6*4)(BP), CX // g = H6 + MOVL CX, (6*4)(DI) + MOVL (7*4)(BP), DX // h = H7 + MOVL DX, (7*4)(DI) + +loop: + MOVL SP, BP // message schedule + + SHA256ROUND0(0, 0x428a2f98, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND0(1, 0x71374491, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND0(2, 0xb5c0fbcf, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND0(3, 0xe9b5dba5, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND0(4, 0x3956c25b, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND0(5, 0x59f111f1, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND0(6, 0x923f82a4, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND0(7, 0xab1c5ed5, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND0(8, 0xd807aa98, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND0(9, 0x12835b01, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND0(10, 0x243185be, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND0(11, 0x550c7dc3, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND0(12, 0x72be5d74, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND0(13, 0x80deb1fe, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND0(14, 0x9bdc06a7, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND0(15, 0xc19bf174, 1, 2, 3, 4, 5, 6, 7, 0) + + SHA256ROUND1(16, 0xe49b69c1, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(17, 0xefbe4786, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(18, 0x0fc19dc6, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(19, 0x240ca1cc, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(20, 0x2de92c6f, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(21, 0x4a7484aa, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(22, 0x5cb0a9dc, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(23, 0x76f988da, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(24, 0x983e5152, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(25, 0xa831c66d, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(26, 0xb00327c8, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(27, 0xbf597fc7, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(28, 0xc6e00bf3, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(29, 0xd5a79147, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(30, 0x06ca6351, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(31, 0x14292967, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(32, 0x27b70a85, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(33, 0x2e1b2138, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(34, 0x4d2c6dfc, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(35, 0x53380d13, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(36, 0x650a7354, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(37, 0x766a0abb, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(38, 0x81c2c92e, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(39, 0x92722c85, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(40, 0xa2bfe8a1, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(41, 0xa81a664b, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(42, 0xc24b8b70, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(43, 0xc76c51a3, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(44, 0xd192e819, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(45, 0xd6990624, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(46, 0xf40e3585, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(47, 0x106aa070, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(48, 0x19a4c116, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(49, 0x1e376c08, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(50, 0x2748774c, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(51, 0x34b0bcb5, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(52, 0x391c0cb3, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(53, 0x4ed8aa4a, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(54, 0x5b9cca4f, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(55, 0x682e6ff3, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(56, 0x748f82ee, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(57, 0x78a5636f, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(58, 0x84c87814, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(59, 0x8cc70208, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(60, 0x90befffa, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(61, 0xa4506ceb, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(62, 0xbef9a3f7, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(63, 0xc67178f2, 1, 2, 3, 4, 5, 6, 7, 0) + + MOVL dig+0(FP), BP + MOVL (0*4)(BP), AX // H0 = a + H0 + ADDL (0*4)(DI), AX + MOVL AX, (0*4)(DI) + MOVL AX, (0*4)(BP) + MOVL (1*4)(BP), BX // H1 = b + H1 + ADDL (1*4)(DI), BX + MOVL BX, (1*4)(DI) + MOVL BX, (1*4)(BP) + MOVL (2*4)(BP), CX // H2 = c + H2 + ADDL (2*4)(DI), CX + MOVL CX, (2*4)(DI) + MOVL CX, (2*4)(BP) + MOVL (3*4)(BP), DX // H3 = d + H3 + ADDL (3*4)(DI), DX + MOVL DX, (3*4)(DI) + MOVL DX, (3*4)(BP) + MOVL (4*4)(BP), AX // H4 = e + H4 + ADDL (4*4)(DI), AX + MOVL AX, (4*4)(DI) + MOVL AX, (4*4)(BP) + MOVL (5*4)(BP), BX // H5 = f + H5 + ADDL (5*4)(DI), BX + MOVL BX, (5*4)(DI) + MOVL BX, (5*4)(BP) + MOVL (6*4)(BP), CX // H6 = g + H6 + ADDL (6*4)(DI), CX + MOVL CX, (6*4)(DI) + MOVL CX, (6*4)(BP) + MOVL (7*4)(BP), DX // H7 = h + H7 + ADDL (7*4)(DI), DX + MOVL DX, (7*4)(DI) + MOVL DX, (7*4)(BP) + + ADDL $64, SI + CMPL SI, 288(SP) + JB loop + +end: + RET diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s new file mode 100644 index 000000000000..868eaed48965 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// SHA256 block routine. See sha256block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 63 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVL (index*4)(SI), AX; \ + BSWAPL AX; \ + MOVL AX, (index*4)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) +// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) +#define MSGSCHEDULE1(index) \ + MOVL ((index-2)*4)(BP), AX; \ + MOVL AX, CX; \ + RORL $17, AX; \ + MOVL CX, DX; \ + RORL $19, CX; \ + SHRL $10, DX; \ + MOVL ((index-15)*4)(BP), BX; \ + XORL CX, AX; \ + MOVL BX, CX; \ + XORL DX, AX; \ + RORL $7, BX; \ + MOVL CX, DX; \ + SHRL $3, DX; \ + RORL $18, CX; \ + ADDL ((index-7)*4)(BP), AX; \ + XORL CX, BX; \ + XORL DX, BX; \ + ADDL ((index-16)*4)(BP), BX; \ + ADDL BX, AX; \ + MOVL AX, ((index)*4)(BP) + +// Calculate T1 in AX - uses AX, CX and DX registers. +// h is also used as an accumulator. Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA256T1(const, e, f, g, h) \ + ADDL AX, h; \ + MOVL e, AX; \ + ADDL $const, h; \ + MOVL e, CX; \ + RORL $6, AX; \ + MOVL e, DX; \ + RORL $11, CX; \ + XORL CX, AX; \ + MOVL e, CX; \ + RORL $25, DX; \ + ANDL f, CX; \ + XORL AX, DX; \ + MOVL e, AX; \ + NOTL AX; \ + ADDL DX, h; \ + ANDL g, AX; \ + XORL CX, AX; \ + ADDL h, AX + +// Calculate T2 in BX - uses BX, CX, DX and DI registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA256T2(a, b, c) \ + MOVL a, DI; \ + MOVL c, BX; \ + RORL $2, DI; \ + MOVL a, DX; \ + ANDL b, BX; \ + RORL $13, DX; \ + MOVL a, CX; \ + ANDL c, CX; \ + XORL DX, DI; \ + XORL CX, BX; \ + MOVL a, DX; \ + MOVL b, CX; \ + RORL $22, DX; \ + ANDL a, CX; \ + XORL CX, BX; \ + XORL DX, DI; \ + ADDL DI, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA256T1(const, e, f, g, h); \ + SHA256T2(a, b, c); \ + MOVL BX, h; \ + ADDL AX, d; \ + ADDL AX, h + +#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$264-32 + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $6, DX + SHLQ $6, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 256(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVL (0*4)(BP), R8 // a = H0 + MOVL (1*4)(BP), R9 // b = H1 + MOVL (2*4)(BP), R10 // c = H2 + MOVL (3*4)(BP), R11 // d = H3 + MOVL (4*4)(BP), R12 // e = H4 + MOVL (5*4)(BP), R13 // f = H5 + MOVL (6*4)(BP), R14 // g = H6 + MOVL (7*4)(BP), R15 // h = H7 + +loop: + MOVQ SP, BP // message schedule + + SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8) + + SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) + + MOVQ dig+0(FP), BP + ADDL (0*4)(BP), R8 // H0 = a + H0 + MOVL R8, (0*4)(BP) + ADDL (1*4)(BP), R9 // H1 = b + H1 + MOVL R9, (1*4)(BP) + ADDL (2*4)(BP), R10 // H2 = c + H2 + MOVL R10, (2*4)(BP) + ADDL (3*4)(BP), R11 // H3 = d + H3 + MOVL R11, (3*4)(BP) + ADDL (4*4)(BP), R12 // H4 = e + H4 + MOVL R12, (4*4)(BP) + ADDL (5*4)(BP), R13 // H5 = f + H5 + MOVL R13, (5*4)(BP) + ADDL (6*4)(BP), R14 // H6 = g + H6 + MOVL R14, (6*4)(BP) + ADDL (7*4)(BP), R15 // H7 = h + H7 + MOVL R15, (7*4)(BP) + + ADDQ $64, SI + CMPQ SI, 256(SP) + JB loop + +end: + RET diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go new file mode 100644 index 000000000000..a50c9787108d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 + +package sha256 + +//go:noescape + +func block(dig *digest, p []byte) diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go new file mode 100644 index 000000000000..2ddbda4356e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go @@ -0,0 +1,74 @@ +package sha256 + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/sha256" // To register the stdlib sha224 and sha256 algs. + "hash" + "io" + "testing" + + "github.com/stevvooe/resumable" +) + +func compareResumableHash(t *testing.T, newResumable func() hash.Hash, newStdlib func() hash.Hash) { + // Read 3 Kilobytes of random data into a buffer. + buf := make([]byte, 3*1024) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + t.Fatalf("unable to load random data: %s", err) + } + + // Use two Hash objects to consume prefixes of the data. One will be + // snapshotted and resumed with each additional byte, then both will write + // that byte. The digests should be equal after each byte is digested. + resumableHasher := newResumable().(resumable.Hash) + stdlibHasher := newStdlib() + + // First, assert that the initial distest is the same. + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("initial digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + multiWriter := io.MultiWriter(resumableHasher, stdlibHasher) + + for i := 1; i <= len(buf); i++ { + + // Write the next byte. + multiWriter.Write(buf[i-1 : i]) + + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + // Snapshot, reset, and restore the chunk hasher. + hashState, err := resumableHasher.State() + if err != nil { + t.Fatalf("unable to get state of hash function: %s", err) + } + resumableHasher.Reset() + if err := resumableHasher.Restore(hashState); err != nil { + t.Fatalf("unable to restorte state of hash function: %s", err) + } + } +} + +func TestResumable(t *testing.T) { + compareResumableHash(t, New224, sha256.New224) + compareResumableHash(t, New, sha256.New) +} + +func TestResumableRegistered(t *testing.T) { + + for _, hf := range []crypto.Hash{crypto.SHA224, crypto.SHA256} { + // make sure that the hash gets the resumable version from the global + // registry in crypto library. + h := hf.New() + + if rh, ok := h.(resumable.Hash); !ok { + t.Fatalf("non-resumable hash function registered: %#v %#v", rh, crypto.SHA256) + } + + } + +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go new file mode 100644 index 000000000000..55b433e79bdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go @@ -0,0 +1,53 @@ +package sha512 + +import ( + "bytes" + "encoding/gob" + + // import to ensure that our init function runs after the standard package + _ "crypto/sha512" +) + +// Len returns the number of bytes which have been written to the digest. +func (d *digest) Len() int64 { + return int64(d.len) +} + +// State returns a snapshot of the state of the digest. +func (d *digest) State() ([]byte, error) { + var buf bytes.Buffer + encoder := gob.NewEncoder(&buf) + + // We encode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + d.h, d.x, d.nx, d.len, d.is384, + } + + for _, val := range vals { + if err := encoder.Encode(val); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +// Restore resets the digest to the given state. +func (d *digest) Restore(state []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(state)) + + // We decode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + &d.h, &d.x, &d.nx, &d.len, &d.is384, + } + + for _, val := range vals { + if err := decoder.Decode(val); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go new file mode 100644 index 000000000000..bca7a91e22ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go @@ -0,0 +1,198 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha512 implements the SHA384 and SHA512 hash algorithms as defined +// in FIPS 180-2. +package sha512 + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.SHA384, New384) + crypto.RegisterHash(crypto.SHA512, New) +} + +// The size of a SHA512 checksum in bytes. +const Size = 64 + +// The size of a SHA384 checksum in bytes. +const Size384 = 48 + +// The blocksize of SHA512 and SHA384 in bytes. +const BlockSize = 128 + +const ( + chunk = 128 + init0 = 0x6a09e667f3bcc908 + init1 = 0xbb67ae8584caa73b + init2 = 0x3c6ef372fe94f82b + init3 = 0xa54ff53a5f1d36f1 + init4 = 0x510e527fade682d1 + init5 = 0x9b05688c2b3e6c1f + init6 = 0x1f83d9abfb41bd6b + init7 = 0x5be0cd19137e2179 + init0_384 = 0xcbbb9d5dc1059ed8 + init1_384 = 0x629a292a367cd507 + init2_384 = 0x9159015a3070dd17 + init3_384 = 0x152fecd8f70e5939 + init4_384 = 0x67332667ffc00b31 + init5_384 = 0x8eb44a8768581511 + init6_384 = 0xdb0c2e0d64f98fa7 + init7_384 = 0x47b5481dbefa4fa4 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint64 + x [chunk]byte + nx int + len uint64 + is384 bool // mark if this digest is SHA-384 +} + +func (d *digest) Reset() { + if !d.is384 { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + } else { + d.h[0] = init0_384 + d.h[1] = init1_384 + d.h[2] = init2_384 + d.h[3] = init3_384 + d.h[4] = init4_384 + d.h[5] = init5_384 + d.h[6] = init6_384 + d.h[7] = init7_384 + } + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the SHA512 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +// New384 returns a new hash.Hash computing the SHA384 checksum. +func New384() hash.Hash { + d := new(digest) + d.is384 = true + d.Reset() + return d +} + +func (d *digest) Size() int { + if !d.is384 { + return Size + } + return Size384 +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + hash := d.checkSum() + if d.is384 { + return append(in, hash[:Size384]...) + } + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. + len := d.len + var tmp [128]byte + tmp[0] = 0x80 + if len%128 < 112 { + d.Write(tmp[0 : 112-len%128]) + } else { + d.Write(tmp[0 : 128+112-len%128]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 16; i++ { + tmp[i] = byte(len >> (120 - 8*i)) + } + d.Write(tmp[0:16]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + h := d.h[:] + if d.is384 { + h = d.h[:6] + } + + var digest [Size]byte + for i, s := range h { + digest[i*8] = byte(s >> 56) + digest[i*8+1] = byte(s >> 48) + digest[i*8+2] = byte(s >> 40) + digest[i*8+3] = byte(s >> 32) + digest[i*8+4] = byte(s >> 24) + digest[i*8+5] = byte(s >> 16) + digest[i*8+6] = byte(s >> 8) + digest[i*8+7] = byte(s) + } + + return digest +} + +// Sum512 returns the SHA512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + +// Sum384 returns the SHA384 checksum of the data. +func Sum384(data []byte) (sum384 [Size384]byte) { + var d digest + d.is384 = true + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum384[:], sum[:Size384]) + return +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go new file mode 100644 index 000000000000..541860f701b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go @@ -0,0 +1,176 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA512 hash algorithm. See FIPS 180-2. + +package sha512 + +import ( + "fmt" + "io" + "testing" +) + +type sha512Test struct { + out string + in string +} + +var golden = []sha512Test{ + {"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", ""}, + {"1f40fc92da241694750979ee6cf582f2d5d7d28e18335de05abc54d0560e0f5302860c652bf08d560252aa5e74210546f369fbbbce8c12cfc7957b2652fe9a75", "a"}, + {"2d408a0717ec188158278a796c689044361dc6fdde28d6f04973b80896e1823975cdbf12eb63f9e0591328ee235d80e9b5bf1aa6a44f4617ff3caf6400eb172d", "ab"}, + {"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f", "abc"}, + {"d8022f2060ad6efd297ab73dcc5355c9b214054b0d1776a136a669d26a7d3b14f73aa0d0ebff19ee333368f0164b6419a96da49e3e481753e7e96b716bdccb6f", "abcd"}, + {"878ae65a92e86cac011a570d4c30a7eaec442b85ce8eca0c2952b5e3cc0628c2e79d889ad4d5c7c626986d452dd86374b6ffaa7cd8b67665bef2289a5c70b0a1", "abcde"}, + {"e32ef19623e8ed9d267f657a81944b3d07adbb768518068e88435745564e8d4150a0a703be2a7d88b61e3d390c2bb97e2d4c311fdc69d6b1267f05f59aa920e7", "abcdef"}, + {"d716a4188569b68ab1b6dfac178e570114cdf0ea3a1cc0e31486c3e41241bc6a76424e8c37ab26f096fc85ef9886c8cb634187f4fddff645fb099f1ff54c6b8c", "abcdefg"}, + {"a3a8c81bc97c2560010d7389bc88aac974a104e0e2381220c6e084c4dccd1d2d17d4f86db31c2a851dc80e6681d74733c55dcd03dd96f6062cdda12a291ae6ce", "abcdefgh"}, + {"f22d51d25292ca1d0f68f69aedc7897019308cc9db46efb75a03dd494fc7f126c010e8ade6a00a0c1a5f1b75d81e0ed5a93ce98dc9b833db7839247b1d9c24fe", "abcdefghi"}, + {"ef6b97321f34b1fea2169a7db9e1960b471aa13302a988087357c520be957ca119c3ba68e6b4982c019ec89de3865ccf6a3cda1fe11e59f98d99f1502c8b9745", "abcdefghij"}, + {"2210d99af9c8bdecda1b4beff822136753d8342505ddce37f1314e2cdbb488c6016bdaa9bd2ffa513dd5de2e4b50f031393d8ab61f773b0e0130d7381e0f8a1d", "Discard medicine more than two years old."}, + {"a687a8985b4d8d0a24f115fe272255c6afaf3909225838546159c1ed685c211a203796ae8ecc4c81a5b6315919b3a64f10713da07e341fcdbb08541bf03066ce", "He who has a shady past knows that nice guys finish last."}, + {"8ddb0392e818b7d585ab22769a50df660d9f6d559cca3afc5691b8ca91b8451374e42bcdabd64589ed7c91d85f626596228a5c8572677eb98bc6b624befb7af8", "I wouldn't marry him with a ten foot pole."}, + {"26ed8f6ca7f8d44b6a8a54ae39640fa8ad5c673f70ee9ce074ba4ef0d483eea00bab2f61d8695d6b34df9c6c48ae36246362200ed820448bdc03a720366a87c6", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"e5a14bf044be69615aade89afcf1ab0389d5fc302a884d403579d1386a2400c089b0dbb387ed0f463f9ee342f8244d5a38cfbc0e819da9529fbff78368c9a982", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"420a1faa48919e14651bed45725abe0f7a58e0f099424c4e5a49194946e38b46c1f8034b18ef169b2e31050d1648e0b982386595f7df47da4b6fd18e55333015", "Nepal premier won't resign."}, + {"d926a863beadb20134db07683535c72007b0e695045876254f341ddcccde132a908c5af57baa6a6a9c63e6649bba0c213dc05fadcf9abccea09f23dcfb637fbe", "For every action there is an equal and opposite government program."}, + {"9a98dd9bb67d0da7bf83da5313dff4fd60a4bac0094f1b05633690ffa7f6d61de9a1d4f8617937d560833a9aaa9ccafe3fd24db418d0e728833545cadd3ad92d", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"d7fde2d2351efade52f4211d3746a0780a26eec3df9b2ed575368a8a1c09ec452402293a8ea4eceb5a4f60064ea29b13cdd86918cd7a4faf366160b009804107", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"b0f35ffa2697359c33a56f5c0cf715c7aeed96da9905ca2698acadb08fbc9e669bf566b6bd5d61a3e86dc22999bcc9f2224e33d1d4f32a228cf9d0349e2db518", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"3d2e5f91778c9e66f7e061293aaa8a8fc742dd3b2e4f483772464b1144189b49273e610e5cccd7a81a19ca1fa70f16b10f1a100a4d8c1372336be8484c64b311", "size: a.out: bad magic"}, + {"b2f68ff58ac015efb1c94c908b0d8c2bf06f491e4de8e6302c49016f7f8a33eac3e959856c7fddbc464de618701338a4b46f76dbfaf9a1e5262b5f40639771c7", "The major problem is with sendmail. -Mark Horton"}, + {"d8c92db5fdf52cf8215e4df3b4909d29203ff4d00e9ad0b64a6a4e04dec5e74f62e7c35c7fb881bd5de95442123df8f57a489b0ae616bd326f84d10021121c57", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"19a9f8dc0a233e464e8566ad3ca9b91e459a7b8c4780985b015776e1bf239a19bc233d0556343e2b0a9bc220900b4ebf4f8bdf89ff8efeaf79602d6849e6f72e", "If the enemy is within range, then so are you."}, + {"00b4c41f307bde87301cdc5b5ab1ae9a592e8ecbb2021dd7bc4b34e2ace60741cc362560bec566ba35178595a91932b8d5357e2c9cec92d393b0fa7831852476", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"91eccc3d5375fd026e4d6787874b1dce201cecd8a27dbded5065728cb2d09c58a3d467bb1faf353bf7ba567e005245d5321b55bc344f7c07b91cb6f26c959be7", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"fabbbe22180f1f137cfdc9556d2570e775d1ae02a597ded43a72a40f9b485d500043b7be128fb9fcd982b83159a0d99aa855a9e7cc4240c00dc01a9bdf8218d7", "C is as portable as Stonehedge!!"}, + {"2ecdec235c1fa4fc2a154d8fba1dddb8a72a1ad73838b51d792331d143f8b96a9f6fcb0f34d7caa351fe6d88771c4f105040e0392f06e0621689d33b2f3ba92e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"7ad681f6f96f82f7abfa7ecc0334e8fa16d3dc1cdc45b60b7af43fe4075d2357c0c1d60e98350f1afb1f2fe7a4d7cd2ad55b88e458e06b73c40b437331f5dab4", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"833f9248ab4a3b9e5131f745fda1ffd2dd435b30e965957e78291c7ab73605fd1912b0794e5c233ab0a12d205a39778d19b83515d6a47003f19cdee51d98c7e0", "How can you write a big system without C++? -Paul Glick"}, +} + +var golden384 = []sha512Test{ + {"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", ""}, + {"54a59b9f22b0b80880d8427e548b7c23abd873486e1f035dce9cd697e85175033caa88e6d57bc35efae0b5afd3145f31", "a"}, + {"c7be03ba5bcaa384727076db0018e99248e1a6e8bd1b9ef58a9ec9dd4eeebb3f48b836201221175befa74ddc3d35afdd", "ab"}, + {"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7", "abc"}, + {"1165b3406ff0b52a3d24721f785462ca2276c9f454a116c2b2ba20171a7905ea5a026682eb659c4d5f115c363aa3c79b", "abcd"}, + {"4c525cbeac729eaf4b4665815bc5db0c84fe6300068a727cf74e2813521565abc0ec57a37ee4d8be89d097c0d2ad52f0", "abcde"}, + {"c6a4c65b227e7387b9c3e839d44869c4cfca3ef583dea64117859b808c1e3d8ae689e1e314eeef52a6ffe22681aa11f5", "abcdef"}, + {"9f11fc131123f844c1226f429b6a0a6af0525d9f40f056c7fc16cdf1b06bda08e302554417a59fa7dcf6247421959d22", "abcdefg"}, + {"9000cd7cada59d1d2eb82912f7f24e5e69cc5517f68283b005fa27c285b61e05edf1ad1a8a9bded6fd29eb87d75ad806", "abcdefgh"}, + {"ef54915b60cf062b8dd0c29ae3cad69abe6310de63ac081f46ef019c5c90897caefd79b796cfa81139788a260ded52df", "abcdefghi"}, + {"a12070030a02d86b0ddacd0d3a5b598344513d0a051e7355053e556a0055489c1555399b03342845c4adde2dc44ff66c", "abcdefghij"}, + {"86f58ec2d74d1b7f8eb0c2ff0967316699639e8d4eb129de54bdf34c96cdbabe200d052149f2dd787f43571ba74670d4", "Discard medicine more than two years old."}, + {"ae4a2b639ca9bfa04b1855d5a05fe7f230994f790891c6979103e2605f660c4c1262a48142dcbeb57a1914ba5f7c3fa7", "He who has a shady past knows that nice guys finish last."}, + {"40ae213df6436eca952aa6841886fcdb82908ef1576a99c8f49bb9dd5023169f7c53035abdda0b54c302f4974e2105e7", "I wouldn't marry him with a ten foot pole."}, + {"e7cf8b873c9bc950f06259aa54309f349cefa72c00d597aebf903e6519a50011dfe355afff064a10701c705693848df9", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"c3d4f0f4047181c7d39d34703365f7bf70207183caf2c2f6145f04da895ef69124d9cdeb635da636c3a474e61024e29b", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"a097aab567e167d5cf93676ed73252a69f9687cb3179bb2d27c9878119e94bf7b7c4b58dc90582edfaf66e11388ed714", "Nepal premier won't resign."}, + {"5026ca45c41fc64712eb65065da92f6467541c78f8966d3fe2c8e3fb769a3ec14215f819654b47bd64f7f0eac17184f3", "For every action there is an equal and opposite government program."}, + {"ac1cc0f5ac8d5f5514a7b738ac322b7fb52a161b449c3672e9b6a6ad1a5e4b26b001cf3bad24c56598676ca17d4b445a", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"722d10c5de371ec0c8c4b5247ac8a5f1d240d68c73f8da13d8b25f0166d6f309bf9561979a111a0049405771d201941a", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"dc2d3ea18bfa10549c63bf2b75b39b5167a80c12aff0e05443168ea87ff149fb0eda5e0bd234eb5d48c7d02ffc5807f1", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"1d67c969e2a945ae5346d2139760261504d4ba164c522443afe19ef3e29b152a4c52445489cfc9d7215e5a450e8e1e4e", "size: a.out: bad magic"}, + {"5ff8e075e465646e7b73ef36d812c6e9f7d60fa6ea0e533e5569b4f73cde53cdd2cc787f33540af57cca3fe467d32fe0", "The major problem is with sendmail. -Mark Horton"}, + {"5bd0a997a67c9ae1979a894eb0cde403dde003c9b6f2c03cf21925c42ff4e1176e6df1ca005381612ef18457b9b7ec3b", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"1eee6da33e7e54fc5be52ae23b94b16ba4d2a947ae4505c6a3edfc7401151ea5205ac01b669b56f27d8ef7f175ed7762", "If the enemy is within range, then so are you."}, + {"76b06e9dea66bfbb1a96029426dc0dfd7830bd297eb447ff5358d94a87cd00c88b59df2493fef56ecbb5231073892ea9", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"12acaf21452cff586143e3f5db0bfdf7802c057e1adf2a619031c4e1b0ccc4208cf6cef8fe722bbaa2fb46a30d9135d8", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"0fc23d7f4183efd186f0bc4fc5db867e026e2146b06cb3d52f4bdbd57d1740122caa853b41868b197b2ac759db39df88", "C is as portable as Stonehedge!!"}, + {"bc805578a7f85d34a86a32976e1c34fe65cf815186fbef76f46ef99cda10723f971f3f1464d488243f5e29db7488598d", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"b23918399a12ebf4431559eec3813eaf7412e875fd7464f16d581e473330842d2e96c6be49a7ce3f9bb0b8bc0fcbe0fe", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"1764b700eb1ead52a2fc33cc28975c2180f1b8faa5038d94cffa8d78154aab16e91dd787e7b0303948ebed62561542c8", "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + for i := 0; i < len(golden); i++ { + g := golden[i] + s := fmt.Sprintf("%x", Sum512([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum512 function: sha512(%s) = %s want %s", g.in, s, g.out) + } + c := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha512[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } + for i := 0; i < len(golden384); i++ { + g := golden384[i] + s := fmt.Sprintf("%x", Sum384([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum384 function: sha384(%s) = %s want %s", g.in, s, g.out) + } + c := New384() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha384[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } + c = New384() + if got := c.Size(); got != Size384 { + t.Errorf("New384.Size = %d; want %d", got, Size384) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d; want %d", got, BlockSize) + } +} + +var bench = New() +var buf = make([]byte, 8192) + +func benchmarkSize(b *testing.B, size int) { + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:size]) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash8Bytes(b *testing.B) { + benchmarkSize(b, 8) +} + +func BenchmarkHash1K(b *testing.B) { + benchmarkSize(b, 1024) +} + +func BenchmarkHash8K(b *testing.B) { + benchmarkSize(b, 8192) +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go new file mode 100644 index 000000000000..648ae8f7e1f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go @@ -0,0 +1,144 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 + +// SHA512 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package sha512 + +var _K = []uint64{ + 0x428a2f98d728ae22, + 0x7137449123ef65cd, + 0xb5c0fbcfec4d3b2f, + 0xe9b5dba58189dbbc, + 0x3956c25bf348b538, + 0x59f111f1b605d019, + 0x923f82a4af194f9b, + 0xab1c5ed5da6d8118, + 0xd807aa98a3030242, + 0x12835b0145706fbe, + 0x243185be4ee4b28c, + 0x550c7dc3d5ffb4e2, + 0x72be5d74f27b896f, + 0x80deb1fe3b1696b1, + 0x9bdc06a725c71235, + 0xc19bf174cf692694, + 0xe49b69c19ef14ad2, + 0xefbe4786384f25e3, + 0x0fc19dc68b8cd5b5, + 0x240ca1cc77ac9c65, + 0x2de92c6f592b0275, + 0x4a7484aa6ea6e483, + 0x5cb0a9dcbd41fbd4, + 0x76f988da831153b5, + 0x983e5152ee66dfab, + 0xa831c66d2db43210, + 0xb00327c898fb213f, + 0xbf597fc7beef0ee4, + 0xc6e00bf33da88fc2, + 0xd5a79147930aa725, + 0x06ca6351e003826f, + 0x142929670a0e6e70, + 0x27b70a8546d22ffc, + 0x2e1b21385c26c926, + 0x4d2c6dfc5ac42aed, + 0x53380d139d95b3df, + 0x650a73548baf63de, + 0x766a0abb3c77b2a8, + 0x81c2c92e47edaee6, + 0x92722c851482353b, + 0xa2bfe8a14cf10364, + 0xa81a664bbc423001, + 0xc24b8b70d0f89791, + 0xc76c51a30654be30, + 0xd192e819d6ef5218, + 0xd69906245565a910, + 0xf40e35855771202a, + 0x106aa07032bbd1b8, + 0x19a4c116b8d2d0c8, + 0x1e376c085141ab53, + 0x2748774cdf8eeb99, + 0x34b0bcb5e19b48a8, + 0x391c0cb3c5c95a63, + 0x4ed8aa4ae3418acb, + 0x5b9cca4f7763e373, + 0x682e6ff3d6b2b8a3, + 0x748f82ee5defb2fc, + 0x78a5636f43172f60, + 0x84c87814a1f0ab72, + 0x8cc702081a6439ec, + 0x90befffa23631e28, + 0xa4506cebde82bde9, + 0xbef9a3f7b2c67915, + 0xc67178f2e372532b, + 0xca273eceea26619c, + 0xd186b8c721c0c207, + 0xeada7dd6cde0eb1e, + 0xf57d4f7fee6ed178, + 0x06f067aa72176fba, + 0x0a637dc5a2c898a6, + 0x113f9804bef90dae, + 0x1b710b35131c471b, + 0x28db77f523047d84, + 0x32caab7b40c72493, + 0x3c9ebe0a15c9bebc, + 0x431d67c49c100d4c, + 0x4cc5d4becb3e42b6, + 0x597f299cfc657e2a, + 0x5fcb6fab3ad6faec, + 0x6c44198c4a475817, +} + +func block(dig *digest, p []byte) { + var w [80]uint64 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + for i := 0; i < 16; i++ { + j := i * 8 + w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 | + uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7]) + } + for i := 16; i < 80; i++ { + v1 := w[i-2] + t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6) + v2 := w[i-15] + t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7) + + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 80; i++ { + t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s new file mode 100644 index 000000000000..2e10233de1d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s @@ -0,0 +1,273 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// SHA512 block routine. See sha512block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 79 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVQ (index*8)(SI), AX; \ + BSWAPQ AX; \ + MOVQ AX, (index*8)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 +// SIGMA0(x) = ROTR(1,x) XOR ROTR(8,x) XOR SHR(7,x) +// SIGMA1(x) = ROTR(19,x) XOR ROTR(61,x) XOR SHR(6,x) +#define MSGSCHEDULE1(index) \ + MOVQ ((index-2)*8)(BP), AX; \ + MOVQ AX, CX; \ + RORQ $19, AX; \ + MOVQ CX, DX; \ + RORQ $61, CX; \ + SHRQ $6, DX; \ + MOVQ ((index-15)*8)(BP), BX; \ + XORQ CX, AX; \ + MOVQ BX, CX; \ + XORQ DX, AX; \ + RORQ $1, BX; \ + MOVQ CX, DX; \ + SHRQ $7, DX; \ + RORQ $8, CX; \ + ADDQ ((index-7)*8)(BP), AX; \ + XORQ CX, BX; \ + XORQ DX, BX; \ + ADDQ ((index-16)*8)(BP), BX; \ + ADDQ BX, AX; \ + MOVQ AX, ((index)*8)(BP) + +// Calculate T1 in AX - uses AX, CX and DX registers. +// h is also used as an accumulator. Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(14,x) XOR ROTR(18,x) XOR ROTR(41,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA512T1(const, e, f, g, h) \ + MOVQ $const, DX; \ + ADDQ AX, h; \ + MOVQ e, AX; \ + ADDQ DX, h; \ + MOVQ e, CX; \ + RORQ $14, AX; \ + MOVQ e, DX; \ + RORQ $18, CX; \ + XORQ CX, AX; \ + MOVQ e, CX; \ + RORQ $41, DX; \ + ANDQ f, CX; \ + XORQ AX, DX; \ + MOVQ e, AX; \ + NOTQ AX; \ + ADDQ DX, h; \ + ANDQ g, AX; \ + XORQ CX, AX; \ + ADDQ h, AX + +// Calculate T2 in BX - uses BX, CX, DX and DI registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(28,x) XOR ROTR(34,x) XOR ROTR(39,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA512T2(a, b, c) \ + MOVQ a, DI; \ + MOVQ c, BX; \ + RORQ $28, DI; \ + MOVQ a, DX; \ + ANDQ b, BX; \ + RORQ $34, DX; \ + MOVQ a, CX; \ + ANDQ c, CX; \ + XORQ DX, DI; \ + XORQ CX, BX; \ + MOVQ a, DX; \ + MOVQ b, CX; \ + RORQ $39, DX; \ + ANDQ a, CX; \ + XORQ CX, BX; \ + XORQ DX, DI; \ + ADDQ DI, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA512ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA512T1(const, e, f, g, h); \ + SHA512T2(a, b, c); \ + MOVQ BX, h; \ + ADDQ AX, d; \ + ADDQ AX, h + +#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA512ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA512ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$648-32 + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $7, DX + SHLQ $7, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 640(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVQ (0*8)(BP), R8 // a = H0 + MOVQ (1*8)(BP), R9 // b = H1 + MOVQ (2*8)(BP), R10 // c = H2 + MOVQ (3*8)(BP), R11 // d = H3 + MOVQ (4*8)(BP), R12 // e = H4 + MOVQ (5*8)(BP), R13 // f = H5 + MOVQ (6*8)(BP), R14 // g = H6 + MOVQ (7*8)(BP), R15 // h = H7 + +loop: + MOVQ SP, BP // message schedule + + SHA512ROUND0(0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND0(1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND0(2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND0(3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND0(4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND0(5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND0(6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND0(7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND0(8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND0(9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8) + + SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8) + + MOVQ dig+0(FP), BP + ADDQ (0*8)(BP), R8 // H0 = a + H0 + MOVQ R8, (0*8)(BP) + ADDQ (1*8)(BP), R9 // H1 = b + H1 + MOVQ R9, (1*8)(BP) + ADDQ (2*8)(BP), R10 // H2 = c + H2 + MOVQ R10, (2*8)(BP) + ADDQ (3*8)(BP), R11 // H3 = d + H3 + MOVQ R11, (3*8)(BP) + ADDQ (4*8)(BP), R12 // H4 = e + H4 + MOVQ R12, (4*8)(BP) + ADDQ (5*8)(BP), R13 // H5 = f + H5 + MOVQ R13, (5*8)(BP) + ADDQ (6*8)(BP), R14 // H6 = g + H6 + MOVQ R14, (6*8)(BP) + ADDQ (7*8)(BP), R15 // H7 = h + H7 + MOVQ R15, (7*8)(BP) + + ADDQ $128, SI + CMPQ SI, 640(SP) + JB loop + +end: + RET diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go new file mode 100644 index 000000000000..bef99de2e461 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 + +package sha512 + +//go:noescape + +func block(dig *digest, p []byte) diff --git a/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go new file mode 100644 index 000000000000..3066c2aed624 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go @@ -0,0 +1,74 @@ +package sha512 + +import ( + "bytes" + "crypto" + "crypto/rand" // To register the stdlib sha224 and sha256 algs. + "crypto/sha512" + "hash" + "io" + "testing" + + "github.com/stevvooe/resumable" +) + +func compareResumableHash(t *testing.T, newResumable func() hash.Hash, newStdlib func() hash.Hash) { + // Read 3 Kilobytes of random data into a buffer. + buf := make([]byte, 3*1024) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + t.Fatalf("unable to load random data: %s", err) + } + + // Use two Hash objects to consume prefixes of the data. One will be + // snapshotted and resumed with each additional byte, then both will write + // that byte. The digests should be equal after each byte is digested. + resumableHasher := newResumable().(resumable.Hash) + stdlibHasher := newStdlib() + + // First, assert that the initial distest is the same. + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("initial digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + multiWriter := io.MultiWriter(resumableHasher, stdlibHasher) + + for i := 1; i <= len(buf); i++ { + + // Write the next byte. + multiWriter.Write(buf[i-1 : i]) + + if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { + t.Fatalf("digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) + } + + // Snapshot, reset, and restore the chunk hasher. + hashState, err := resumableHasher.State() + if err != nil { + t.Fatalf("unable to get state of hash function: %s", err) + } + resumableHasher.Reset() + if err := resumableHasher.Restore(hashState); err != nil { + t.Fatalf("unable to restorte state of hash function: %s", err) + } + } +} + +func TestResumable(t *testing.T) { + compareResumableHash(t, New384, sha512.New384) + compareResumableHash(t, New, sha512.New) +} + +func TestResumableRegistered(t *testing.T) { + + for _, hf := range []crypto.Hash{crypto.SHA384, crypto.SHA512} { + // make sure that the hash gets the resumable version from the global + // registry in crypto library. + h := hf.New() + + if rh, ok := h.(resumable.Hash); !ok { + t.Fatalf("non-resumable hash function registered: %#v %#v", rh, crypto.SHA256) + } + + } + +} diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 1aa90bc62b60..cdead9e72632 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -1707,6 +1707,17 @@ func deepCopy_api_Image(in imageapi.Image, out *imageapi.Image, c *conversion.Cl } out.DockerImageMetadataVersion = in.DockerImageMetadataVersion out.DockerImageManifest = in.DockerImageManifest + if in.Finalizers != nil { + out.Finalizers = make([]pkgapi.FinalizerName, len(in.Finalizers)) + for i := range in.Finalizers { + out.Finalizers[i] = in.Finalizers[i] + } + } else { + out.Finalizers = nil + } + if err := deepCopy_api_ImageStatus(in.Status, &out.Status, c); err != nil { + return err + } return nil } @@ -1734,6 +1745,11 @@ func deepCopy_api_ImageList(in imageapi.ImageList, out *imageapi.ImageList, c *c return nil } +func deepCopy_api_ImageStatus(in imageapi.ImageStatus, out *imageapi.ImageStatus, c *conversion.Cloner) error { + out.Phase = in.Phase + return nil +} + func deepCopy_api_ImageStream(in imageapi.ImageStream, out *imageapi.ImageStream, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1754,6 +1770,44 @@ func deepCopy_api_ImageStream(in imageapi.ImageStream, out *imageapi.ImageStream return nil } +func deepCopy_api_ImageStreamDeletion(in imageapi.ImageStreamDeletion, out *imageapi.ImageStreamDeletion, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapi.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = newVal.(pkgapi.ObjectMeta) + } + return nil +} + +func deepCopy_api_ImageStreamDeletionList(in imageapi.ImageStreamDeletionList, out *imageapi.ImageStreamDeletionList, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapi.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ListMeta); err != nil { + return err + } else { + out.ListMeta = newVal.(pkgapi.ListMeta) + } + if in.Items != nil { + out.Items = make([]imageapi.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := deepCopy_api_ImageStreamDeletion(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func deepCopy_api_ImageStreamImage(in imageapi.ImageStreamImage, out *imageapi.ImageStreamImage, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1771,6 +1825,30 @@ func deepCopy_api_ImageStreamImage(in imageapi.ImageStreamImage, out *imageapi.I return nil } +func deepCopy_api_ImageStreamImageList(in imageapi.ImageStreamImageList, out *imageapi.ImageStreamImageList, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapi.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ListMeta); err != nil { + return err + } else { + out.ListMeta = newVal.(pkgapi.ListMeta) + } + if in.Items != nil { + out.Items = make([]imageapi.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := deepCopy_api_ImageStreamImage(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func deepCopy_api_ImageStreamList(in imageapi.ImageStreamList, out *imageapi.ImageStreamList, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1828,6 +1906,14 @@ func deepCopy_api_ImageStreamSpec(in imageapi.ImageStreamSpec, out *imageapi.Ima } else { out.Tags = nil } + if in.Finalizers != nil { + out.Finalizers = make([]pkgapi.FinalizerName, len(in.Finalizers)) + for i := range in.Finalizers { + out.Finalizers[i] = in.Finalizers[i] + } + } else { + out.Finalizers = nil + } return nil } @@ -1845,6 +1931,7 @@ func deepCopy_api_ImageStreamStatus(in imageapi.ImageStreamStatus, out *imageapi } else { out.Tags = nil } + out.Phase = in.Phase return nil } @@ -2736,8 +2823,12 @@ func init() { deepCopy_api_DockerImage, deepCopy_api_Image, deepCopy_api_ImageList, + deepCopy_api_ImageStatus, deepCopy_api_ImageStream, + deepCopy_api_ImageStreamDeletion, + deepCopy_api_ImageStreamDeletionList, deepCopy_api_ImageStreamImage, + deepCopy_api_ImageStreamImageList, deepCopy_api_ImageStreamList, deepCopy_api_ImageStreamMapping, deepCopy_api_ImageStreamSpec, diff --git a/pkg/api/latest/latest.go b/pkg/api/latest/latest.go index b5858c1e274c..0c68ff8588a9 100644 --- a/pkg/api/latest/latest.go +++ b/pkg/api/latest/latest.go @@ -119,7 +119,8 @@ func init() { "Project": true, "ProjectRequest": true, - "Image": true, + "Image": true, + "ImageStreamDeletion": true, "User": true, "Identity": true, diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index 2a9507dfdcac..f13c9aa787b7 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -136,9 +136,16 @@ func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, se j.DockerImageMetadata.Kind = "" j.DockerImageMetadataVersion = []string{"pre012", "1.0"}[c.Rand.Intn(2)] j.DockerImageReference = c.RandString() + if j.DeletionTimestamp == nil { + j.Status.Phase = image.ImageAvailable + j.Finalizers = append(j.Finalizers, osapi.FinalizerOrigin) + } else { + j.Status.Phase = image.ImagePurging + } }, func(j *image.ImageStreamMapping, c fuzz.Continue) { c.FuzzNoCustom(j) + c.Fuzz(&j.Image) j.DockerImageRepository = "" }, func(j *image.ImageStreamImage, c fuzz.Continue) { @@ -164,12 +171,24 @@ func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, se j.DockerImageRepository = "" } }, + func(j *image.ImageStream, c fuzz.Continue) { + c.FuzzNoCustom(j) + if j.DeletionTimestamp == nil { + j.Status.Phase = image.ImageStreamAvailable + j.Spec.Finalizers = append(j.Spec.Finalizers, osapi.FinalizerOrigin) + } else { + j.Status.Phase = image.ImageStreamTerminating + } + }, func(j *image.ImageStreamTag, c fuzz.Continue) { c.Fuzz(&j.Image) // because we de-embedded Image from ImageStreamTag, in order to round trip // successfully, the ImageStreamTag's ObjectMeta must match the Image's. j.ObjectMeta = j.Image.ObjectMeta }, + func(j *image.ImageStreamDeletion, c fuzz.Continue) { + c.Fuzz(&j.ObjectMeta) + }, func(j *image.TagReference, c fuzz.Continue) { c.FuzzNoCustom(j) if j.From != nil { @@ -420,10 +439,13 @@ func TestTypes(t *testing.T) { } continue } + t.Logf("doing a round trip with osapi codec") fuzzInternalObject(t, "", item, seed) roundTrip(t, osapi.Codec, item) + t.Logf("doing a round trip with v1beta3 codec") fuzzInternalObject(t, "v1beta3", item, seed) roundTrip(t, v1beta3.Codec, item) + t.Logf("doing a round trip with v1 codec") fuzzInternalObject(t, "v1", item, seed) roundTrip(t, v1.Codec, item) } diff --git a/pkg/api/types.go b/pkg/api/types.go index 778f64ec17cd..1b8ab26e01cb 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1 +1,10 @@ package api + +import ( + kapi "k8s.io/kubernetes/pkg/api" +) + +const ( + // These are internal finalizer values to Origin + FinalizerOrigin kapi.FinalizerName = "openshift.io/origin" +) diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index 563260e09539..2b1abf833d14 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -1446,37 +1446,48 @@ func convert_api_ImageList_To_v1_ImageList(in *imageapi.ImageList, out *imageapi return nil } -func convert_api_ImageStream_To_v1_ImageStream(in *imageapi.ImageStream, out *imageapiv1.ImageStream, s conversion.Scope) error { +func convert_api_ImageStreamDeletionList_To_v1_ImageStreamDeletionList(in *imageapi.ImageStreamDeletionList, out *imageapiv1.ImageStreamDeletionList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*imageapi.ImageStream))(in) + defaulting.(func(*imageapi.ImageStreamDeletionList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := s.Convert(&in.Spec, &out.Spec, 0); err != nil { + if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } - if err := s.Convert(&in.Status, &out.Status, 0); err != nil { - return err + if in.Items != nil { + out.Items = make([]imageapiv1.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } -func convert_api_ImageStreamImage_To_v1_ImageStreamImage(in *imageapi.ImageStreamImage, out *imageapiv1.ImageStreamImage, s conversion.Scope) error { +func convert_api_ImageStreamImageList_To_v1_ImageStreamImageList(in *imageapi.ImageStreamImageList, out *imageapiv1.ImageStreamImageList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*imageapi.ImageStreamImage))(in) + defaulting.(func(*imageapi.ImageStreamImageList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } - if err := s.Convert(&in.Image, &out.Image, 0); err != nil { - return err + if in.Items != nil { + out.Items = make([]imageapiv1.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } @@ -1494,7 +1505,7 @@ func convert_api_ImageStreamList_To_v1_ImageStreamList(in *imageapi.ImageStreamL if in.Items != nil { out.Items = make([]imageapiv1.ImageStream, len(in.Items)) for i := range in.Items { - if err := convert_api_ImageStream_To_v1_ImageStream(&in.Items[i], &out.Items[i], s); err != nil { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { return err } } @@ -1543,37 +1554,48 @@ func convert_v1_ImageList_To_api_ImageList(in *imageapiv1.ImageList, out *imagea return nil } -func convert_v1_ImageStream_To_api_ImageStream(in *imageapiv1.ImageStream, out *imageapi.ImageStream, s conversion.Scope) error { +func convert_v1_ImageStreamDeletionList_To_api_ImageStreamDeletionList(in *imageapiv1.ImageStreamDeletionList, out *imageapi.ImageStreamDeletionList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*imageapiv1.ImageStream))(in) + defaulting.(func(*imageapiv1.ImageStreamDeletionList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := s.Convert(&in.Spec, &out.Spec, 0); err != nil { + if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } - if err := s.Convert(&in.Status, &out.Status, 0); err != nil { - return err + if in.Items != nil { + out.Items = make([]imageapi.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } -func convert_v1_ImageStreamImage_To_api_ImageStreamImage(in *imageapiv1.ImageStreamImage, out *imageapi.ImageStreamImage, s conversion.Scope) error { +func convert_v1_ImageStreamImageList_To_api_ImageStreamImageList(in *imageapiv1.ImageStreamImageList, out *imageapi.ImageStreamImageList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*imageapiv1.ImageStreamImage))(in) + defaulting.(func(*imageapiv1.ImageStreamImageList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } - if err := s.Convert(&in.Image, &out.Image, 0); err != nil { - return err + if in.Items != nil { + out.Items = make([]imageapi.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } @@ -1591,7 +1613,7 @@ func convert_v1_ImageStreamList_To_api_ImageStreamList(in *imageapiv1.ImageStrea if in.Items != nil { out.Items = make([]imageapi.ImageStream, len(in.Items)) for i := range in.Items { - if err := convert_v1_ImageStream_To_api_ImageStream(&in.Items[i], &out.Items[i], s); err != nil { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { return err } } @@ -3352,10 +3374,10 @@ func init() { convert_api_Identity_To_v1_Identity, convert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger, convert_api_ImageList_To_v1_ImageList, - convert_api_ImageStreamImage_To_v1_ImageStreamImage, + convert_api_ImageStreamDeletionList_To_v1_ImageStreamDeletionList, + convert_api_ImageStreamImageList_To_v1_ImageStreamImageList, convert_api_ImageStreamList_To_v1_ImageStreamList, convert_api_ImageStreamTag_To_v1_ImageStreamTag, - convert_api_ImageStream_To_v1_ImageStream, convert_api_IsPersonalSubjectAccessReview_To_v1_IsPersonalSubjectAccessReview, convert_api_ListMeta_To_v1_ListMeta, convert_api_LocalObjectReference_To_v1_LocalObjectReference, @@ -3435,10 +3457,10 @@ func init() { convert_v1_Identity_To_api_Identity, convert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger, convert_v1_ImageList_To_api_ImageList, - convert_v1_ImageStreamImage_To_api_ImageStreamImage, + convert_v1_ImageStreamDeletionList_To_api_ImageStreamDeletionList, + convert_v1_ImageStreamImageList_To_api_ImageStreamImageList, convert_v1_ImageStreamList_To_api_ImageStreamList, convert_v1_ImageStreamTag_To_api_ImageStreamTag, - convert_v1_ImageStream_To_api_ImageStream, convert_v1_IsPersonalSubjectAccessReview_To_api_IsPersonalSubjectAccessReview, convert_v1_ListMeta_To_api_ListMeta, convert_v1_LocalObjectReference_To_api_LocalObjectReference, diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index 38f79d1d72bc..67419e02b8c5 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -1620,6 +1620,17 @@ func deepCopy_v1_Image(in imageapiv1.Image, out *imageapiv1.Image, c *conversion } out.DockerImageMetadataVersion = in.DockerImageMetadataVersion out.DockerImageManifest = in.DockerImageManifest + if in.Finalizers != nil { + out.Finalizers = make([]pkgapiv1.FinalizerName, len(in.Finalizers)) + for i := range in.Finalizers { + out.Finalizers[i] = in.Finalizers[i] + } + } else { + out.Finalizers = nil + } + if err := deepCopy_v1_ImageStatus(in.Status, &out.Status, c); err != nil { + return err + } return nil } @@ -1647,6 +1658,11 @@ func deepCopy_v1_ImageList(in imageapiv1.ImageList, out *imageapiv1.ImageList, c return nil } +func deepCopy_v1_ImageStatus(in imageapiv1.ImageStatus, out *imageapiv1.ImageStatus, c *conversion.Cloner) error { + out.Phase = in.Phase + return nil +} + func deepCopy_v1_ImageStream(in imageapiv1.ImageStream, out *imageapiv1.ImageStream, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1667,6 +1683,44 @@ func deepCopy_v1_ImageStream(in imageapiv1.ImageStream, out *imageapiv1.ImageStr return nil } +func deepCopy_v1_ImageStreamDeletion(in imageapiv1.ImageStreamDeletion, out *imageapiv1.ImageStreamDeletion, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapiv1.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = newVal.(pkgapiv1.ObjectMeta) + } + return nil +} + +func deepCopy_v1_ImageStreamDeletionList(in imageapiv1.ImageStreamDeletionList, out *imageapiv1.ImageStreamDeletionList, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapiv1.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ListMeta); err != nil { + return err + } else { + out.ListMeta = newVal.(pkgapiv1.ListMeta) + } + if in.Items != nil { + out.Items = make([]imageapiv1.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := deepCopy_v1_ImageStreamDeletion(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func deepCopy_v1_ImageStreamImage(in imageapiv1.ImageStreamImage, out *imageapiv1.ImageStreamImage, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1684,6 +1738,30 @@ func deepCopy_v1_ImageStreamImage(in imageapiv1.ImageStreamImage, out *imageapiv return nil } +func deepCopy_v1_ImageStreamImageList(in imageapiv1.ImageStreamImageList, out *imageapiv1.ImageStreamImageList, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapiv1.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ListMeta); err != nil { + return err + } else { + out.ListMeta = newVal.(pkgapiv1.ListMeta) + } + if in.Items != nil { + out.Items = make([]imageapiv1.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := deepCopy_v1_ImageStreamImage(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func deepCopy_v1_ImageStreamList(in imageapiv1.ImageStreamList, out *imageapiv1.ImageStreamList, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1738,6 +1816,14 @@ func deepCopy_v1_ImageStreamSpec(in imageapiv1.ImageStreamSpec, out *imageapiv1. } else { out.Tags = nil } + if in.Finalizers != nil { + out.Finalizers = make([]pkgapiv1.FinalizerName, len(in.Finalizers)) + for i := range in.Finalizers { + out.Finalizers[i] = in.Finalizers[i] + } + } else { + out.Finalizers = nil + } return nil } @@ -1753,6 +1839,7 @@ func deepCopy_v1_ImageStreamStatus(in imageapiv1.ImageStreamStatus, out *imageap } else { out.Tags = nil } + out.Phase = in.Phase return nil } @@ -2647,8 +2734,12 @@ func init() { deepCopy_v1_RollingDeploymentStrategyParams, deepCopy_v1_Image, deepCopy_v1_ImageList, + deepCopy_v1_ImageStatus, deepCopy_v1_ImageStream, + deepCopy_v1_ImageStreamDeletion, + deepCopy_v1_ImageStreamDeletionList, deepCopy_v1_ImageStreamImage, + deepCopy_v1_ImageStreamImageList, deepCopy_v1_ImageStreamList, deepCopy_v1_ImageStreamMapping, deepCopy_v1_ImageStreamSpec, diff --git a/pkg/api/v1beta3/conversion_generated.go b/pkg/api/v1beta3/conversion_generated.go index 0e3b05364a57..bd65c456bf0f 100644 --- a/pkg/api/v1beta3/conversion_generated.go +++ b/pkg/api/v1beta3/conversion_generated.go @@ -1484,6 +1484,52 @@ func convert_api_ImageList_To_v1beta3_ImageList(in *imageapi.ImageList, out *ima return nil } +func convert_api_ImageStreamDeletionList_To_v1beta3_ImageStreamDeletionList(in *imageapi.ImageStreamDeletionList, out *imageapiv1beta3.ImageStreamDeletionList, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*imageapi.ImageStreamDeletionList))(in) + } + if err := convert_api_TypeMeta_To_v1beta3_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_api_ListMeta_To_v1beta3_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]imageapiv1beta3.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func convert_api_ImageStreamImageList_To_v1beta3_ImageStreamImageList(in *imageapi.ImageStreamImageList, out *imageapiv1beta3.ImageStreamImageList, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*imageapi.ImageStreamImageList))(in) + } + if err := convert_api_TypeMeta_To_v1beta3_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_api_ListMeta_To_v1beta3_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]imageapiv1beta3.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func convert_api_ImageStreamList_To_v1beta3_ImageStreamList(in *imageapi.ImageStreamList, out *imageapiv1beta3.ImageStreamList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*imageapi.ImageStreamList))(in) @@ -1530,6 +1576,52 @@ func convert_v1beta3_ImageList_To_api_ImageList(in *imageapiv1beta3.ImageList, o return nil } +func convert_v1beta3_ImageStreamDeletionList_To_api_ImageStreamDeletionList(in *imageapiv1beta3.ImageStreamDeletionList, out *imageapi.ImageStreamDeletionList, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*imageapiv1beta3.ImageStreamDeletionList))(in) + } + if err := convert_v1beta3_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_v1beta3_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]imageapi.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func convert_v1beta3_ImageStreamImageList_To_api_ImageStreamImageList(in *imageapiv1beta3.ImageStreamImageList, out *imageapi.ImageStreamImageList, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*imageapiv1beta3.ImageStreamImageList))(in) + } + if err := convert_v1beta3_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := convert_v1beta3_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + out.Items = make([]imageapi.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func convert_v1beta3_ImageStreamList_To_api_ImageStreamList(in *imageapiv1beta3.ImageStreamList, out *imageapi.ImageStreamList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*imageapiv1beta3.ImageStreamList))(in) @@ -3289,6 +3381,8 @@ func init() { convert_api_Identity_To_v1beta3_Identity, convert_api_ImageChangeTrigger_To_v1beta3_ImageChangeTrigger, convert_api_ImageList_To_v1beta3_ImageList, + convert_api_ImageStreamDeletionList_To_v1beta3_ImageStreamDeletionList, + convert_api_ImageStreamImageList_To_v1beta3_ImageStreamImageList, convert_api_ImageStreamList_To_v1beta3_ImageStreamList, convert_api_IsPersonalSubjectAccessReview_To_v1beta3_IsPersonalSubjectAccessReview, convert_api_ListMeta_To_v1beta3_ListMeta, @@ -3370,6 +3464,8 @@ func init() { convert_v1beta3_Identity_To_api_Identity, convert_v1beta3_ImageChangeTrigger_To_api_ImageChangeTrigger, convert_v1beta3_ImageList_To_api_ImageList, + convert_v1beta3_ImageStreamDeletionList_To_api_ImageStreamDeletionList, + convert_v1beta3_ImageStreamImageList_To_api_ImageStreamImageList, convert_v1beta3_ImageStreamList_To_api_ImageStreamList, convert_v1beta3_IsPersonalSubjectAccessReview_To_api_IsPersonalSubjectAccessReview, convert_v1beta3_ListMeta_To_api_ListMeta, diff --git a/pkg/api/v1beta3/deep_copy_generated.go b/pkg/api/v1beta3/deep_copy_generated.go index fec3053a1ce9..de99ad55df42 100644 --- a/pkg/api/v1beta3/deep_copy_generated.go +++ b/pkg/api/v1beta3/deep_copy_generated.go @@ -1628,6 +1628,17 @@ func deepCopy_v1beta3_Image(in imageapiv1beta3.Image, out *imageapiv1beta3.Image } out.DockerImageMetadataVersion = in.DockerImageMetadataVersion out.DockerImageManifest = in.DockerImageManifest + if in.Finalizers != nil { + out.Finalizers = make([]pkgapiv1beta3.FinalizerName, len(in.Finalizers)) + for i := range in.Finalizers { + out.Finalizers[i] = in.Finalizers[i] + } + } else { + out.Finalizers = nil + } + if err := deepCopy_v1beta3_ImageStatus(in.Status, &out.Status, c); err != nil { + return err + } return nil } @@ -1655,6 +1666,11 @@ func deepCopy_v1beta3_ImageList(in imageapiv1beta3.ImageList, out *imageapiv1bet return nil } +func deepCopy_v1beta3_ImageStatus(in imageapiv1beta3.ImageStatus, out *imageapiv1beta3.ImageStatus, c *conversion.Cloner) error { + out.Phase = in.Phase + return nil +} + func deepCopy_v1beta3_ImageStream(in imageapiv1beta3.ImageStream, out *imageapiv1beta3.ImageStream, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1675,6 +1691,44 @@ func deepCopy_v1beta3_ImageStream(in imageapiv1beta3.ImageStream, out *imageapiv return nil } +func deepCopy_v1beta3_ImageStreamDeletion(in imageapiv1beta3.ImageStreamDeletion, out *imageapiv1beta3.ImageStreamDeletion, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapiv1beta3.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = newVal.(pkgapiv1beta3.ObjectMeta) + } + return nil +} + +func deepCopy_v1beta3_ImageStreamDeletionList(in imageapiv1beta3.ImageStreamDeletionList, out *imageapiv1beta3.ImageStreamDeletionList, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapiv1beta3.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ListMeta); err != nil { + return err + } else { + out.ListMeta = newVal.(pkgapiv1beta3.ListMeta) + } + if in.Items != nil { + out.Items = make([]imageapiv1beta3.ImageStreamDeletion, len(in.Items)) + for i := range in.Items { + if err := deepCopy_v1beta3_ImageStreamDeletion(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func deepCopy_v1beta3_ImageStreamImage(in imageapiv1beta3.ImageStreamImage, out *imageapiv1beta3.ImageStreamImage, c *conversion.Cloner) error { if err := deepCopy_v1beta3_Image(in.Image, &out.Image, c); err != nil { return err @@ -1683,6 +1737,30 @@ func deepCopy_v1beta3_ImageStreamImage(in imageapiv1beta3.ImageStreamImage, out return nil } +func deepCopy_v1beta3_ImageStreamImageList(in imageapiv1beta3.ImageStreamImageList, out *imageapiv1beta3.ImageStreamImageList, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { + return err + } else { + out.TypeMeta = newVal.(pkgapiv1beta3.TypeMeta) + } + if newVal, err := c.DeepCopy(in.ListMeta); err != nil { + return err + } else { + out.ListMeta = newVal.(pkgapiv1beta3.ListMeta) + } + if in.Items != nil { + out.Items = make([]imageapiv1beta3.ImageStreamImage, len(in.Items)) + for i := range in.Items { + if err := deepCopy_v1beta3_ImageStreamImage(in.Items[i], &out.Items[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + func deepCopy_v1beta3_ImageStreamList(in imageapiv1beta3.ImageStreamList, out *imageapiv1beta3.ImageStreamList, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.TypeMeta); err != nil { return err @@ -1737,6 +1815,14 @@ func deepCopy_v1beta3_ImageStreamSpec(in imageapiv1beta3.ImageStreamSpec, out *i } else { out.Tags = nil } + if in.Finalizers != nil { + out.Finalizers = make([]pkgapiv1beta3.FinalizerName, len(in.Finalizers)) + for i := range in.Finalizers { + out.Finalizers[i] = in.Finalizers[i] + } + } else { + out.Finalizers = nil + } return nil } @@ -1752,6 +1838,7 @@ func deepCopy_v1beta3_ImageStreamStatus(in imageapiv1beta3.ImageStreamStatus, ou } else { out.Tags = nil } + out.Phase = in.Phase return nil } @@ -2637,8 +2724,12 @@ func init() { deepCopy_v1beta3_RollingDeploymentStrategyParams, deepCopy_v1beta3_Image, deepCopy_v1beta3_ImageList, + deepCopy_v1beta3_ImageStatus, deepCopy_v1beta3_ImageStream, + deepCopy_v1beta3_ImageStreamDeletion, + deepCopy_v1beta3_ImageStreamDeletionList, deepCopy_v1beta3_ImageStreamImage, + deepCopy_v1beta3_ImageStreamImageList, deepCopy_v1beta3_ImageStreamList, deepCopy_v1beta3_ImageStreamMapping, deepCopy_v1beta3_ImageStreamSpec, diff --git a/pkg/api/validation/register.go b/pkg/api/validation/register.go index b3941c2b8870..c57526fc6ce3 100644 --- a/pkg/api/validation/register.go +++ b/pkg/api/validation/register.go @@ -50,6 +50,7 @@ func init() { Validator.Register(&imageapi.Image{}, imagevalidation.ValidateImage, imagevalidation.ValidateImageUpdate) Validator.Register(&imageapi.ImageStream{}, imagevalidation.ValidateImageStream, imagevalidation.ValidateImageStreamUpdate) Validator.Register(&imageapi.ImageStreamMapping{}, imagevalidation.ValidateImageStreamMapping, nil) + Validator.Register(&imageapi.ImageStreamDeletion{}, imagevalidation.ValidateImageStreamDeletion, nil) Validator.Register(&oauthapi.OAuthAccessToken{}, oauthvalidation.ValidateAccessToken, nil) Validator.Register(&oauthapi.OAuthAuthorizeToken{}, oauthvalidation.ValidateAuthorizeToken, nil) diff --git a/pkg/authorization/api/types.go b/pkg/authorization/api/types.go index 8871bf1f33f9..26c2d3dea59a 100644 --- a/pkg/authorization/api/types.go +++ b/pkg/authorization/api/types.go @@ -73,7 +73,7 @@ const ( var ( GroupsToResources = map[string][]string{ BuildGroupName: {"builds", "buildconfigs", "buildlogs", "buildconfigs/instantiate", "builds/log", "builds/clone", "buildconfigs/webhooks"}, - ImageGroupName: {"imagestreams", "imagestreammappings", "imagestreamtags", "imagestreamimages"}, + ImageGroupName: {"imagestreams", "imagestreams/finalize", "imagestreammappings", "imagestreamtags", "imagestreamimages"}, DeploymentGroupName: {"deployments", "deploymentconfigs", "generatedeploymentconfigs", "deploymentconfigrollbacks"}, SDNGroupName: {"clusternetworks", "hostsubnets", "netnamespaces"}, TemplateGroupName: {"templates", "templateconfigs", "processedtemplates"}, @@ -86,7 +86,7 @@ var ( PermissionGrantingGroupName: {"roles", "rolebindings", "resourceaccessreviews" /* cluster scoped*/, "subjectaccessreviews" /* cluster scoped*/, "localresourceaccessreviews", "localsubjectaccessreviews"}, OpenshiftExposedGroupName: {BuildGroupName, ImageGroupName, DeploymentGroupName, TemplateGroupName, "routes"}, OpenshiftAllGroupName: {OpenshiftExposedGroupName, UserGroupName, OAuthGroupName, PolicyOwnerGroupName, SDNGroupName, PermissionGrantingGroupName, OpenshiftStatusGroupName, "projects", - "clusterroles", "clusterrolebindings", "clusterpolicies", "clusterpolicybindings", "images" /* cluster scoped*/, "projectrequests"}, + "clusterroles", "clusterrolebindings", "clusterpolicies", "clusterpolicybindings", "images", "images/status", "images/finalize", "imagestreamdeletions" /* cluster scoped*/, "projectrequests"}, OpenshiftStatusGroupName: {"imagestreams/status", "routes/status"}, QuotaGroupName: {"limitranges", "resourcequotas", "resourcequotausages"}, diff --git a/pkg/client/client.go b/pkg/client/client.go index b352c0d930c4..70b9516a60ff 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -23,6 +23,7 @@ type Interface interface { ImageStreamMappingsNamespacer ImageStreamTagsNamespacer ImageStreamImagesNamespacer + ImageStreamDeletionsInterfacer DeploymentConfigsNamespacer RoutesNamespacer HostSubnetsInterface @@ -93,6 +94,11 @@ func (c *Client) ImageStreamImages(namespace string) ImageStreamImageInterface { return newImageStreamImages(c, namespace) } +// ImageStreamDeletions provides a REST client for ImageStreamDeletion +func (c *Client) ImageStreamDeletions() ImageStreamDeletionInterface { + return newImageStreamDeletions(c) +} + // DeploymentConfigs provides a REST client for DeploymentConfig func (c *Client) DeploymentConfigs(namespace string) DeploymentConfigInterface { return newDeploymentConfigs(c, namespace) diff --git a/pkg/client/images.go b/pkg/client/images.go index bc679de04140..a6c2ad6282c7 100644 --- a/pkg/client/images.go +++ b/pkg/client/images.go @@ -1,6 +1,7 @@ package client import ( + "fmt" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" @@ -18,6 +19,8 @@ type ImageInterface interface { Get(name string) (*imageapi.Image, error) Create(image *imageapi.Image) (*imageapi.Image, error) Delete(name string) error + UpdateStatus(image *imageapi.Image) (*imageapi.Image, error) + Finalize(item *imageapi.Image) (*imageapi.Image, error) } // images implements ImagesInterface. @@ -63,3 +66,21 @@ func (c *images) Delete(name string) (err error) { err = c.r.Delete().Resource("images").Name(name).Do().Error() return } + +// UpdateStatus updates the image's status. Returns the server's representation of the image, and an error, if it occurs. +func (c *images) UpdateStatus(image *imageapi.Image) (result *imageapi.Image, err error) { + result = &imageapi.Image{} + err = c.r.Put().Resource("images").Name(image.Name).SubResource("status").Body(image).Do().Into(result) + return +} + +// Finalize takes the representation of a image to update. Returns the server's representation of the image, and an error, if it occurs. +func (c *images) Finalize(image *imageapi.Image) (result *imageapi.Image, err error) { + result = &imageapi.Image{} + if len(image.ResourceVersion) == 0 { + err = fmt.Errorf("invalid update object, missing resource version: %v", image) + return + } + err = c.r.Put().Resource("images").Name(image.Name).SubResource("finalize").Body(image).Do().Into(result) + return +} diff --git a/pkg/client/imagestreamdeletions.go b/pkg/client/imagestreamdeletions.go new file mode 100644 index 000000000000..eafae425c885 --- /dev/null +++ b/pkg/client/imagestreamdeletions.go @@ -0,0 +1,65 @@ +package client + +import ( + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + imageapi "github.com/openshift/origin/pkg/image/api" +) + +// ImageStreamDeletionsInterfacer has methods to work with ImageStreamDeletion resources +type ImageStreamDeletionsInterfacer interface { + ImageStreamDeletions() ImageStreamDeletionInterface +} + +// ImageStreamDeletionInterface exposes methods on ImageStreamDeletion resources. +type ImageStreamDeletionInterface interface { + List(label labels.Selector, field fields.Selector) (*imageapi.ImageStreamDeletionList, error) + Get(name string) (*imageapi.ImageStreamDeletion, error) + Create(stream *imageapi.ImageStreamDeletion) (*imageapi.ImageStreamDeletion, error) + Delete(name string) error +} + +// imageStreamDeletions implements ImageStreamDeletionsInterfacer interface +type imageStreamDeletions struct { + r *Client +} + +// newImageStreamDeletions returns an imageStreamDeletions +func newImageStreamDeletions(c *Client) *imageStreamDeletions { + return &imageStreamDeletions{r: c} +} + +// List returns a list of image stream deletions that match the label and field selectors. +func (c *imageStreamDeletions) List(label labels.Selector, field fields.Selector) (result *imageapi.ImageStreamDeletionList, err error) { + result = &imageapi.ImageStreamDeletionList{} + err = c.r.Get(). + Resource("imageStreamDeletions"). + LabelsSelectorParam(label). + FieldsSelectorParam(field). + Do(). + Into(result) + return +} + +// Get returns information about a particular image stream deletion and error +// if one occurs. +func (c *imageStreamDeletions) Get(name string) (result *imageapi.ImageStreamDeletion, err error) { + result = &imageapi.ImageStreamDeletion{} + err = c.r.Get().Resource("imageStreamDeletions").Name(name).Do().Into(result) + return +} + +// Create creates a new image stream deletion. Returns the server's +// representation of the image stream deletion and error if one occurs. +func (c *imageStreamDeletions) Create(stream *imageapi.ImageStreamDeletion) (result *imageapi.ImageStreamDeletion, err error) { + result = &imageapi.ImageStreamDeletion{} + err = c.r.Post().Resource("imageStreamDeletions").Body(stream).Do().Into(result) + return +} + +// Delete deletes an image stream deletion, returns error if one occurs. +func (c *imageStreamDeletions) Delete(name string) (err error) { + err = c.r.Delete().Resource("imageStreamDeletions").Name(name).Do().Error() + return +} diff --git a/pkg/client/imagestreamimages.go b/pkg/client/imagestreamimages.go index 0f68c7c023d5..862e621bad8e 100644 --- a/pkg/client/imagestreamimages.go +++ b/pkg/client/imagestreamimages.go @@ -3,6 +3,9 @@ package client import ( "fmt" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "github.com/openshift/origin/pkg/image/api" ) @@ -14,6 +17,7 @@ type ImageStreamImagesNamespacer interface { // ImageStreamImageInterface exposes methods on ImageStreamImage resources. type ImageStreamImageInterface interface { Get(name, id string) (*api.ImageStreamImage, error) + List(label labels.Selector, field fields.Selector) (*api.ImageStreamImageList, error) } // imageStreamImages implements ImageStreamImagesNamespacer interface @@ -36,3 +40,15 @@ func (c *imageStreamImages) Get(name, id string) (result *api.ImageStreamImage, err = c.r.Get().Namespace(c.ns).Resource("imageStreamImages").Name(fmt.Sprintf("%s@%s", name, id)).Do().Into(result) return } + +func (c *imageStreamImages) List(label labels.Selector, field fields.Selector) (result *api.ImageStreamImageList, err error) { + result = &api.ImageStreamImageList{} + err = c.r.Get(). + Namespace(c.ns). + Resource("imageStreamImages"). + LabelsSelectorParam(label). + FieldsSelectorParam(field). + Do(). + Into(result) + return +} diff --git a/pkg/client/imagestreams.go b/pkg/client/imagestreams.go index f6a45e55fe82..37bd8a73aaef 100644 --- a/pkg/client/imagestreams.go +++ b/pkg/client/imagestreams.go @@ -1,6 +1,8 @@ package client import ( + "fmt" + "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" @@ -22,6 +24,7 @@ type ImageStreamInterface interface { Delete(name string) error Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) UpdateStatus(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) + Finalize(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) } // ImageStreamNamespaceGetter exposes methods to get ImageStreams by Namespace @@ -108,3 +111,14 @@ func (c *imageStreams) UpdateStatus(stream *imageapi.ImageStream) (result *image err = c.r.Put().Namespace(c.ns).Resource("imageStreams").Name(stream.Name).SubResource("status").Body(stream).Do().Into(result) return } + +// Finalize takes the representation of an image stream to update. Returns the server's representation of the image stream, and an error, if it occurs. +func (c *imageStreams) Finalize(imageStream *imageapi.ImageStream) (result *imageapi.ImageStream, err error) { + result = &imageapi.ImageStream{} + if len(imageStream.ResourceVersion) == 0 { + err = fmt.Errorf("invalid update object, missing resource version: %v", imageStream) + return + } + err = c.r.Put().Namespace(c.ns).Resource("imageStreams").Name(imageStream.Name).SubResource("finalize").Body(imageStream).Do().Into(result) + return +} diff --git a/pkg/client/testclient/fake.go b/pkg/client/testclient/fake.go index 44f634b98303..827c31488c74 100644 --- a/pkg/client/testclient/fake.go +++ b/pkg/client/testclient/fake.go @@ -160,6 +160,11 @@ func (c *Fake) ImageStreamImages(namespace string) client.ImageStreamImageInterf return &FakeImageStreamImages{Fake: c, Namespace: namespace} } +// ImageStreamDeletions provides a fake REST client for ImageStreamDeletions +func (c *Fake) ImageStreamDeletions() client.ImageStreamDeletionInterface { + return &FakeImageStreamDeletions{Fake: c} +} + // DeploymentConfigs provides a fake REST client for DeploymentConfigs func (c *Fake) DeploymentConfigs(namespace string) client.DeploymentConfigInterface { return &FakeDeploymentConfigs{Fake: c, Namespace: namespace} diff --git a/pkg/client/testclient/fake_images.go b/pkg/client/testclient/fake_images.go index f32890f1ac56..b8c3ea0b1949 100644 --- a/pkg/client/testclient/fake_images.go +++ b/pkg/client/testclient/fake_images.go @@ -49,3 +49,33 @@ func (c *FakeImages) Delete(name string) error { _, err := c.Fake.Invokes(ktestclient.NewRootDeleteAction("images", name), &imageapi.Image{}) return err } + +func (c *FakeImages) UpdateStatus(inObj *imageapi.Image) (result *imageapi.Image, err error) { + action := ktestclient.CreateActionImpl{} + action.Verb = "update" + action.Resource = "images" + action.Subresource = "status" + action.Object = inObj + + obj, err := c.Fake.Invokes(action, inObj) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.Image), err +} + +func (c *FakeImages) Finalize(inObj *imageapi.Image) (*imageapi.Image, error) { + action := ktestclient.CreateActionImpl{} + action.Verb = "create" + action.Resource = "images" + action.Subresource = "finalize" + action.Object = inObj + + obj, err := c.Fake.Invokes(action, inObj) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.Image), err +} diff --git a/pkg/client/testclient/fake_imagestreamdeletions.go b/pkg/client/testclient/fake_imagestreamdeletions.go new file mode 100644 index 000000000000..2e79c3038a18 --- /dev/null +++ b/pkg/client/testclient/fake_imagestreamdeletions.go @@ -0,0 +1,51 @@ +package testclient + +import ( + ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + "github.com/openshift/origin/pkg/client" + imageapi "github.com/openshift/origin/pkg/image/api" +) + +// FakeImageStreamDeletions implements ImageStreamDeletionInterface. Meant to be +// embedded into a struct to get a default implementation. This makes faking +// out just the methods you want to test easier. +type FakeImageStreamDeletions struct { + Fake *Fake +} + +var _ client.ImageStreamDeletionInterface = &FakeImageStreamDeletions{} + +func (c *FakeImageStreamDeletions) Get(name string) (*imageapi.ImageStreamDeletion, error) { + obj, err := c.Fake.Invokes(ktestclient.NewRootGetAction("imagestreamdeletions", name), &imageapi.ImageStreamDeletion{}) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.ImageStreamDeletion), err +} + +func (c *FakeImageStreamDeletions) List(label labels.Selector, field fields.Selector) (*imageapi.ImageStreamDeletionList, error) { + obj, err := c.Fake.Invokes(ktestclient.NewRootListAction("imagestreamdeletions", label, field), &imageapi.ImageStreamDeletionList{}) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.ImageStreamDeletionList), err +} + +func (c *FakeImageStreamDeletions) Create(inObj *imageapi.ImageStreamDeletion) (*imageapi.ImageStreamDeletion, error) { + obj, err := c.Fake.Invokes(ktestclient.NewRootCreateAction("imagestreamdeletions", inObj), inObj) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.ImageStreamDeletion), err +} + +func (c *FakeImageStreamDeletions) Delete(name string) error { + _, err := c.Fake.Invokes(ktestclient.NewRootDeleteAction("imagestreamdeletions", name), &imageapi.ImageStreamDeletion{}) + return err +} diff --git a/pkg/client/testclient/fake_imagestreamimages.go b/pkg/client/testclient/fake_imagestreamimages.go index 9a5e42ee7fe4..a35c97bfe5dd 100644 --- a/pkg/client/testclient/fake_imagestreamimages.go +++ b/pkg/client/testclient/fake_imagestreamimages.go @@ -4,6 +4,8 @@ import ( "fmt" ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "github.com/openshift/origin/pkg/client" imageapi "github.com/openshift/origin/pkg/image/api" @@ -29,3 +31,12 @@ func (c *FakeImageStreamImages) Get(repo, imageID string) (*imageapi.ImageStream return obj.(*imageapi.ImageStreamImage), err } + +func (c *FakeImageStreamImages) List(label labels.Selector, field fields.Selector) (*imageapi.ImageStreamImageList, error) { + obj, err := c.Fake.Invokes(ktestclient.NewListAction("imagestreamimages", c.Namespace, label, field), &imageapi.ImageStreamImageList{}) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.ImageStreamImageList), err +} diff --git a/pkg/client/testclient/fake_imagestreams.go b/pkg/client/testclient/fake_imagestreams.go index 76e6ea8f7e76..2eb557264225 100644 --- a/pkg/client/testclient/fake_imagestreams.go +++ b/pkg/client/testclient/fake_imagestreams.go @@ -79,3 +79,18 @@ func (c *FakeImageStreams) UpdateStatus(inObj *imageapi.ImageStream) (result *im return obj.(*imageapi.ImageStream), err } + +func (c *FakeImageStreams) Finalize(inObj *imageapi.ImageStream) (result *imageapi.ImageStream, err error) { + action := ktestclient.CreateActionImpl{} + action.Verb = "create" + action.Resource = "imagestreams" + action.Subresource = "finalize" + action.Object = inObj + + obj, err := c.Fake.Invokes(action, inObj) + if obj == nil { + return nil, err + } + + return obj.(*imageapi.ImageStream), err +} diff --git a/pkg/cmd/admin/prune/images.go b/pkg/cmd/admin/prune/images.go index b7caf5f9df75..eb8150a0aacc 100644 --- a/pkg/cmd/admin/prune/images.go +++ b/pkg/cmd/admin/prune/images.go @@ -1,12 +1,9 @@ package prune import ( - "crypto/x509" "errors" "fmt" "io" - "io/ioutil" - "net/http" "os" "strings" "text/tabwriter" @@ -14,7 +11,6 @@ import ( "github.com/spf13/cobra" kapi "k8s.io/kubernetes/pkg/api" - kclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/labels" @@ -41,9 +37,6 @@ type PruneImagesOptions struct { Confirm bool KeepYoungerThan time.Duration KeepTagRevisions int - - CABundle string - RegistryUrlOverride string } // NewCmdPruneImages implements the OpenShift cli prune images command @@ -77,8 +70,6 @@ func NewCmdPruneImages(f *clientcmd.Factory, parentName, name string, out io.Wri cmd.Flags().BoolVar(&opts.Confirm, "confirm", opts.Confirm, "Specify that image pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything.") cmd.Flags().DurationVar(&opts.KeepYoungerThan, "keep-younger-than", opts.KeepYoungerThan, "Specify the minimum age of an image for it to be considered a candidate for pruning.") cmd.Flags().IntVar(&opts.KeepTagRevisions, "keep-tag-revisions", opts.KeepTagRevisions, "Specify the number of image revisions for a tag in an image stream that will be preserved.") - cmd.Flags().StringVar(&opts.CABundle, "certificate-authority", opts.CABundle, "The path to a certificate authority bundle to use when communicating with the managed Docker registries. Defaults to the certificate authority data from the current user's config file.") - cmd.Flags().StringVar(&opts.RegistryUrlOverride, "registry-url", opts.RegistryUrlOverride, "The address to use when contacting the registry, instead of using the default value. This is useful if you can't resolve or reach the registry (e.g.; the default is a cluster-internal URL) but you do have an alternative route that works.") return cmd } @@ -91,10 +82,18 @@ func (o *PruneImagesOptions) Complete(f *clientcmd.Factory, args []string, out i o.Out = out - osClient, kClient, registryClient, err := getClients(f, o.CABundle) + clientConfig, err := f.OpenShiftClientConfig.ClientConfig() + if err != nil { + return err + } + if len(clientConfig.BearerToken) > 0 { + return errors.New("You must use a client config with a token") + } + osClient, kClient, err := f.Clients() if err != nil { return err } + o.Client = osClient allImages, err := osClient.Images().List(labels.Everything(), fields.Everything()) @@ -147,8 +146,6 @@ func (o *PruneImagesOptions) Complete(f *clientcmd.Factory, args []string, out i Builds: allBuilds, DCs: allDCs, DryRun: o.Confirm == false, - RegistryClient: registryClient, - RegistryURL: o.RegistryUrlOverride, } o.Pruner = prune.NewImageRegistryPruner(options) @@ -178,21 +175,15 @@ func (o *PruneImagesOptions) RunPruneImages() error { imagePruner := &describingImagePruner{w: w} imageStreamPruner := &describingImageStreamPruner{w: w} - layerPruner := &describingLayerPruner{w: w} - blobPruner := &describingBlobPruner{w: w} - manifestPruner := &describingManifestPruner{w: w} if o.Confirm { imagePruner.delegate = prune.NewDeletingImagePruner(o.Client.Images()) imageStreamPruner.delegate = prune.NewDeletingImageStreamPruner(o.Client) - layerPruner.delegate = prune.NewDeletingLayerPruner() - blobPruner.delegate = prune.NewDeletingBlobPruner() - manifestPruner.delegate = prune.NewDeletingManifestPruner() } else { fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made. Add --confirm to remove images") } - return o.Pruner.Prune(imagePruner, imageStreamPruner, layerPruner, blobPruner, manifestPruner) + return o.Pruner.Prune(imagePruner, imageStreamPruner) } // describingImageStreamPruner prints information about each image stream update. @@ -256,177 +247,3 @@ func (p *describingImagePruner) PruneImage(image *imageapi.Image) error { return err } - -// describingLayerPruner prints information about each repo layer link being -// deleted. If a delegate exists, its PruneLayer function is invoked prior to -// returning. -type describingLayerPruner struct { - w io.Writer - delegate prune.LayerPruner - headerPrinted bool -} - -var _ prune.LayerPruner = &describingLayerPruner{} - -func (p *describingLayerPruner) PruneLayer(registryClient *http.Client, registryURL, repo, layer string) error { - if !p.headerPrinted { - p.headerPrinted = true - fmt.Fprintln(p.w, "\nDeleting registry repository layer links ...") - fmt.Fprintln(p.w, "REPO\tLAYER") - } - - fmt.Fprintf(p.w, "%s\t%s\n", repo, layer) - - if p.delegate == nil { - return nil - } - - err := p.delegate.PruneLayer(registryClient, registryURL, repo, layer) - if err != nil { - fmt.Fprintf(os.Stderr, "error deleting repository %s layer link %s from the registry: %v\n", repo, layer, err) - } - - return err -} - -// describingBlobPruner prints information about each blob being deleted. If a -// delegate exists, its PruneBlob function is invoked prior to returning. -type describingBlobPruner struct { - w io.Writer - delegate prune.BlobPruner - headerPrinted bool -} - -var _ prune.BlobPruner = &describingBlobPruner{} - -func (p *describingBlobPruner) PruneBlob(registryClient *http.Client, registryURL, layer string) error { - if !p.headerPrinted { - p.headerPrinted = true - fmt.Fprintln(p.w, "\nDeleting registry layer blobs ...") - fmt.Fprintln(p.w, "BLOB") - } - - fmt.Fprintf(p.w, "%s\n", layer) - - if p.delegate == nil { - return nil - } - - err := p.delegate.PruneBlob(registryClient, registryURL, layer) - if err != nil { - fmt.Fprintf(os.Stderr, "error deleting blob %s from the registry: %v\n", layer, err) - } - - return err -} - -// describingManifestPruner prints information about each repo manifest being -// deleted. If a delegate exists, its PruneManifest function is invoked prior -// to returning. -type describingManifestPruner struct { - w io.Writer - delegate prune.ManifestPruner - headerPrinted bool -} - -var _ prune.ManifestPruner = &describingManifestPruner{} - -func (p *describingManifestPruner) PruneManifest(registryClient *http.Client, registryURL, repo, manifest string) error { - if !p.headerPrinted { - p.headerPrinted = true - fmt.Fprintln(p.w, "\nDeleting registry repository manifest data ...") - fmt.Fprintln(p.w, "REPO\tIMAGE") - } - - fmt.Fprintf(p.w, "%s\t%s\n", repo, manifest) - - if p.delegate == nil { - return nil - } - - err := p.delegate.PruneManifest(registryClient, registryURL, repo, manifest) - if err != nil { - fmt.Fprintf(os.Stderr, "error deleting data for repository %s image manifest %s from the registry: %v\n", repo, manifest, err) - } - - return err -} - -// getClients returns a Kube client, OpenShift client, and registry client. -func getClients(f *clientcmd.Factory, caBundle string) (*client.Client, *kclient.Client, *http.Client, error) { - clientConfig, err := f.OpenShiftClientConfig.ClientConfig() - if err != nil { - return nil, nil, nil, err - } - - var ( - token string - osClient *client.Client - kClient *kclient.Client - registryClient *http.Client - ) - - switch { - case len(clientConfig.BearerToken) > 0: - osClient, kClient, err = f.Clients() - if err != nil { - return nil, nil, nil, err - } - token = clientConfig.BearerToken - default: - err = errors.New("You must use a client config with a token") - return nil, nil, nil, err - } - - // copy the config - registryClientConfig := *clientConfig - - // zero out everything we don't want to use - registryClientConfig.BearerToken = "" - registryClientConfig.CertFile = "" - registryClientConfig.CertData = []byte{} - registryClientConfig.KeyFile = "" - registryClientConfig.KeyData = []byte{} - - // we have to set a username to something for the Docker login - // but it's not actually used - registryClientConfig.Username = "unused" - - // set the "password" to be the token - registryClientConfig.Password = token - - tlsConfig, err := kclient.TLSConfigFor(®istryClientConfig) - if err != nil { - return nil, nil, nil, err - } - - // if the user specified a CA on the command line, add it to the - // client config's CA roots - if len(caBundle) > 0 { - data, err := ioutil.ReadFile(caBundle) - if err != nil { - return nil, nil, nil, err - } - - if tlsConfig.RootCAs == nil { - tlsConfig.RootCAs = x509.NewCertPool() - } - - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - - transport := http.Transport{ - TLSClientConfig: tlsConfig, - } - - wrappedTransport, err := kclient.HTTPWrappersForConfig(®istryClientConfig, &transport) - if err != nil { - return nil, nil, nil, err - } - - registryClient = &http.Client{ - Transport: wrappedTransport, - } - - return osClient, kClient, registryClient, nil -} diff --git a/pkg/cmd/cli/describe/describer.go b/pkg/cmd/cli/describe/describer.go index 03605db10d44..11c2377f1343 100644 --- a/pkg/cmd/cli/describe/describer.go +++ b/pkg/cmd/cli/describe/describer.go @@ -548,6 +548,7 @@ func (d *ImageStreamDescriber) Describe(namespace, name string) (string, error) return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, imageStream.ObjectMeta) formatString(out, "Docker Pull Spec", imageStream.Status.DockerImageRepository) + formatString(out, "Status", imageStream.Status.Phase) formatImageStreamTags(out, imageStream) return nil }) diff --git a/pkg/cmd/cli/describe/describer_test.go b/pkg/cmd/cli/describe/describer_test.go index 496d7f7fc85b..754222c6c4af 100644 --- a/pkg/cmd/cli/describe/describer_test.go +++ b/pkg/cmd/cli/describe/describer_test.go @@ -39,6 +39,7 @@ var DescriberCoverageExceptions = []reflect.Type{ reflect.TypeOf(&buildapi.BuildLogOptions{}), // normal users don't ever look at these reflect.TypeOf(&buildapi.BuildRequest{}), // normal users don't ever look at these reflect.TypeOf(&imageapi.DockerImage{}), // not a top level resource + reflect.TypeOf(&imageapi.ImageStreamDeletion{}), // normal users don't ever look at these reflect.TypeOf(&oauthapi.OAuthAccessToken{}), // normal users don't ever look at these reflect.TypeOf(&oauthapi.OAuthAuthorizeToken{}), // normal users don't ever look at these reflect.TypeOf(&oauthapi.OAuthClientAuthorization{}), // normal users don't ever look at these diff --git a/pkg/cmd/cli/describe/printer.go b/pkg/cmd/cli/describe/printer.go index 015deb1239e8..cee7e2984eea 100644 --- a/pkg/cmd/cli/describe/printer.go +++ b/pkg/cmd/cli/describe/printer.go @@ -28,10 +28,10 @@ import ( var ( buildColumns = []string{"NAME", "TYPE", "FROM", "STATUS", "STARTED"} buildConfigColumns = []string{"NAME", "TYPE", "FROM", "LATEST"} - imageColumns = []string{"NAME", "DOCKER REF"} + imageColumns = []string{"NAME", "DOCKER REF", "STATUS"} imageStreamTagColumns = []string{"NAME", "DOCKER REF", "UPDATED", "IMAGENAME"} imageStreamImageColumns = []string{"NAME", "DOCKER REF", "UPDATED", "IMAGENAME"} - imageStreamColumns = []string{"NAME", "DOCKER REPO", "TAGS", "UPDATED"} + imageStreamColumns = []string{"NAME", "DOCKER REPO", "TAGS", "UPDATED", "STATUS"} projectColumns = []string{"NAME", "DISPLAY NAME", "STATUS"} routeColumns = []string{"NAME", "HOST/PORT", "PATH", "SERVICE", "LABELS", "TLS TERMINATION"} deploymentColumns = []string{"NAME", "STATUS", "CAUSE"} @@ -71,6 +71,7 @@ func NewHumanReadablePrinter(noHeaders, withNamespace, wide bool, showAll bool, p.Handler(imageColumns, printImage) p.Handler(imageStreamTagColumns, printImageStreamTag) p.Handler(imageStreamImageColumns, printImageStreamImage) + p.Handler(imageStreamImageColumns, printImageStreamImageList) p.Handler(imageColumns, printImageList) p.Handler(imageStreamColumns, printImageStream) p.Handler(imageStreamColumns, printImageStreamList) @@ -282,7 +283,7 @@ func printBuildConfigList(buildList *buildapi.BuildConfigList, w io.Writer, with } func printImage(image *imageapi.Image, w io.Writer, withNamespace, wide, showAll bool, columnLabels []string) error { - _, err := fmt.Fprintf(w, "%s\t%s\n", image.Name, image.DockerImageReference) + _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", image.Name, image.DockerImageReference, image.Status.Phase) return err } @@ -308,6 +309,15 @@ func printImageStreamImage(isi *imageapi.ImageStreamImage, w io.Writer, withName return err } +func printImageStreamImageList(isImages *imageapi.ImageStreamImageList, w io.Writer, withNamespace, wide, showAll bool, columnLabels []string) error { + for _, isi := range isImages.Items { + if err := printImageStreamImage(&isi, w, withNamespace, wide, showAll, columnLabels); err != nil { + return err + } + } + return nil +} + func printImageList(images *imageapi.ImageList, w io.Writer, withNamespace, wide, showAll bool, columnLabels []string) error { for _, image := range images.Items { if err := printImage(&image, w, withNamespace, wide, showAll, columnLabels); err != nil { @@ -348,7 +358,7 @@ func printImageStream(stream *imageapi.ImageStream, w io.Writer, withNamespace, return err } } - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", stream.Name, stream.Status.DockerImageRepository, tags, latestTime) + _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", stream.Name, stream.Status.DockerImageRepository, tags, latestTime, stream.Status.Phase) return err } diff --git a/pkg/cmd/cli/describe/printer_test.go b/pkg/cmd/cli/describe/printer_test.go index 67462de3a082..aa0cce79e2cd 100644 --- a/pkg/cmd/cli/describe/printer_test.go +++ b/pkg/cmd/cli/describe/printer_test.go @@ -33,6 +33,10 @@ var PrinterCoverageExceptions = []reflect.Type{ reflect.TypeOf(&authorizationapi.ResourceAccessReview{}), reflect.TypeOf(&authorizationapi.LocalSubjectAccessReview{}), reflect.TypeOf(&authorizationapi.LocalResourceAccessReview{}), + + // just a marker of deleted image stream for registry pruner + reflect.TypeOf(&imageapi.ImageStreamDeletion{}), + reflect.TypeOf(&imageapi.ImageStreamDeletionList{}), } // MissingPrinterCoverageExceptions is the list of types that were missing printer methods when I started diff --git a/pkg/cmd/dockerregistry/dockerregistry.go b/pkg/cmd/dockerregistry/dockerregistry.go index 37f30616e135..e30e85e829d7 100644 --- a/pkg/cmd/dockerregistry/dockerregistry.go +++ b/pkg/cmd/dockerregistry/dockerregistry.go @@ -12,8 +12,6 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/registry/handlers" _ "github.com/docker/distribution/registry/storage/driver/filesystem" @@ -61,38 +59,16 @@ func Execute(configFile io.Reader) { } app.RegisterRoute( - // DELETE /admin/blobs/ - adminRouter.Path("/blobs/{digest:"+digest.DigestRegexp.String()+"}").Methods("DELETE"), + // POST /admin/prune + adminRouter.Path("/prune").Methods("POST"), // handler - server.BlobDispatcher, + server.PruneHandler, // repo name not required in url handlers.NameNotRequired, // custom access records pruneAccessRecords, ) - app.RegisterRoute( - // DELETE /admin//manifests/ - adminRouter.Path("/{name:"+v2.RepositoryNameRegexp.String()+"}/manifests/{digest:"+digest.DigestRegexp.String()+"}").Methods("DELETE"), - // handler - server.ManifestDispatcher, - // repo name required in url - handlers.NameRequired, - // custom access records - pruneAccessRecords, - ) - - app.RegisterRoute( - // DELETE /admin//layers/ - adminRouter.Path("/{name:"+v2.RepositoryNameRegexp.String()+"}/layers/{digest:"+digest.DigestRegexp.String()+"}").Methods("DELETE"), - // handler - server.LayerDispatcher, - // repo name required in url - handlers.NameRequired, - // custom access records - pruneAccessRecords, - ) - handler := gorillahandlers.CombinedLoggingHandler(os.Stdout, app) if config.HTTP.TLS.Certificate == "" { diff --git a/pkg/cmd/server/origin/master.go b/pkg/cmd/server/origin/master.go index ed2acb9523c2..090da5247e22 100644 --- a/pkg/cmd/server/origin/master.go +++ b/pkg/cmd/server/origin/master.go @@ -44,6 +44,7 @@ import ( imageetcd "github.com/openshift/origin/pkg/image/registry/image/etcd" "github.com/openshift/origin/pkg/image/registry/imagestream" imagestreametcd "github.com/openshift/origin/pkg/image/registry/imagestream/etcd" + imagestreamdeletionetcd "github.com/openshift/origin/pkg/image/registry/imagestreamdeletion/etcd" "github.com/openshift/origin/pkg/image/registry/imagestreamimage" "github.com/openshift/origin/pkg/image/registry/imagestreammapping" "github.com/openshift/origin/pkg/image/registry/imagestreamtag" @@ -374,15 +375,16 @@ func (c *MasterConfig) GetRestStorage() map[string]rest.Storage { resourceAccessReviewRegistry := resourceaccessreview.NewRegistry(resourceAccessReviewStorage) localResourceAccessReviewStorage := localresourceaccessreview.NewREST(resourceAccessReviewRegistry) - imageStorage := imageetcd.NewREST(c.EtcdHelper) - imageRegistry := image.NewRegistry(imageStorage) - imageStreamStorage, imageStreamStatusStorage := imagestreametcd.NewREST(c.EtcdHelper, imagestream.DefaultRegistryFunc(defaultRegistryFunc), subjectAccessReviewRegistry) - imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatusStorage) + imageStorage, imageStatusStorage, imageFinalizeStorage := imageetcd.NewREST(c.EtcdHelper) + imageRegistry := image.NewRegistry(imageStorage, imageStatusStorage, imageFinalizeStorage) + imageStreamStorage, imageStreamStatusStorage, imageStreamFinalizeStorage := imagestreametcd.NewREST(c.EtcdHelper, imagestream.DefaultRegistryFunc(defaultRegistryFunc), subjectAccessReviewRegistry) + imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatusStorage, imageStreamFinalizeStorage) imageStreamMappingStorage := imagestreammapping.NewREST(imageRegistry, imageStreamRegistry) imageStreamTagStorage := imagestreamtag.NewREST(imageRegistry, imageStreamRegistry) imageStreamTagRegistry := imagestreamtag.NewRegistry(imageStreamTagStorage) imageStreamImageStorage := imagestreamimage.NewREST(imageRegistry, imageStreamRegistry) imageStreamImageRegistry := imagestreamimage.NewRegistry(imageStreamImageStorage) + imageStreamDeletionStorage := imagestreamdeletionetcd.NewREST(c.EtcdHelper) buildGenerator := &buildgenerator.BuildGenerator{ Client: buildgenerator.Client{ @@ -434,12 +436,16 @@ func (c *MasterConfig) GetRestStorage() map[string]rest.Storage { ) storage := map[string]rest.Storage{ - "images": imageStorage, - "imageStreams": imageStreamStorage, - "imageStreams/status": imageStreamStatusStorage, - "imageStreamImages": imageStreamImageStorage, - "imageStreamMappings": imageStreamMappingStorage, - "imageStreamTags": imageStreamTagStorage, + "images": imageStorage, + "images/status": imageStatusStorage, + "images/finalize": imageStreamFinalizeStorage, + "imageStreams": imageStreamStorage, + "imageStreams/status": imageStreamStatusStorage, + "imageStreams/finalize": imageStreamFinalizeStorage, + "imageStreamImages": imageStreamImageStorage, + "imageStreamMappings": imageStreamMappingStorage, + "imageStreamTags": imageStreamTagStorage, + "imageStreamDeletions": imageStreamDeletionStorage, "deploymentConfigs": deployConfigStorage, "generateDeploymentConfigs": deployconfiggenerator.NewREST(deployConfigGenerator, c.EtcdHelper.Codec()), diff --git a/pkg/cmd/server/origin/master_config.go b/pkg/cmd/server/origin/master_config.go index bc3136baee00..ee731722ebc1 100644 --- a/pkg/cmd/server/origin/master_config.go +++ b/pkg/cmd/server/origin/master_config.go @@ -427,8 +427,8 @@ func (c *MasterConfig) ImageChangeControllerClient() *osclient.Client { return c.PrivilegedLoopbackOpenShiftClient } -// ImageImportControllerClient returns the deployment client object -func (c *MasterConfig) ImageImportControllerClient() *osclient.Client { +// ImageStreamControllerClient returns the deployment client object +func (c *MasterConfig) ImageStreamControllerClient() *osclient.Client { return c.PrivilegedLoopbackOpenShiftClient } diff --git a/pkg/cmd/server/origin/run_components.go b/pkg/cmd/server/origin/run_components.go index e5d0c8b498ac..ca5f7a181494 100644 --- a/pkg/cmd/server/origin/run_components.go +++ b/pkg/cmd/server/origin/run_components.go @@ -339,10 +339,10 @@ func (c *MasterConfig) RunSDNController() { } } -// RunImageImportController starts the image import trigger controller process. -func (c *MasterConfig) RunImageImportController() { - osclient := c.ImageImportControllerClient() - factory := imagecontroller.ImportControllerFactory{ +// RunImageStreamController starts the image import trigger and stream termination controller process. +func (c *MasterConfig) RunImageStreamController() { + osclient := c.ImageStreamControllerClient() + factory := imagecontroller.ImageStreamControllerFactory{ Client: osclient, } controller := factory.Create() diff --git a/pkg/cmd/server/start/start_master.go b/pkg/cmd/server/start/start_master.go index 53e01ee9ee01..27a8789b41eb 100644 --- a/pkg/cmd/server/start/start_master.go +++ b/pkg/cmd/server/start/start_master.go @@ -558,7 +558,7 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro oc.RunDeploymentConfigController() oc.RunDeploymentConfigChangeController() oc.RunDeploymentImageChangeTriggerController() - oc.RunImageImportController() + oc.RunImageStreamController() oc.RunOriginNamespaceController() oc.RunSDNController() diff --git a/pkg/dockerregistry/server/admin.go b/pkg/dockerregistry/server/admin.go index 2f5419bafaf8..aafb370eb106 100644 --- a/pkg/dockerregistry/server/admin.go +++ b/pkg/dockerregistry/server/admin.go @@ -4,148 +4,59 @@ import ( "fmt" "net/http" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/handlers" - storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" gorillahandlers "github.com/gorilla/handlers" ) -// BlobDispatcher takes the request context and builds the appropriate handler -// for handling blob requests. -func BlobDispatcher(ctx *handlers.Context, r *http.Request) http.Handler { - reference := ctxu.GetStringValue(ctx, "vars.digest") - dgst, _ := digest.ParseDigest(reference) - - blobHandler := &blobHandler{ +// PruneHandler takes the request context and builds an appropriate handler for +// handling prune requests. +func PruneHandler(ctx *handlers.Context, r *http.Request) http.Handler { + pruneHandler := &pruneHandler{ Context: ctx, - Digest: dgst, } return gorillahandlers.MethodHandler{ - "DELETE": http.HandlerFunc(blobHandler.Delete), + "POST": http.HandlerFunc(pruneHandler.Prune), } } -// blobHandler handles http operations on blobs. -type blobHandler struct { +// pruneHandler handles http operations on registry +type pruneHandler struct { *handlers.Context - - Digest digest.Digest } -// Delete deletes the blob from the storage backend. -func (bh *blobHandler) Delete(w http.ResponseWriter, req *http.Request) { +// Prune collects orphaned objects and deletes them from registry's storage +// and OpenShift's etcd store. +func (ph *pruneHandler) Prune(w http.ResponseWriter, req *http.Request) { defer req.Body.Close() - if len(bh.Digest) == 0 { - bh.Errors.Push(v2.ErrorCodeBlobUnknown) - w.WriteHeader(http.StatusNotFound) + sd, err := factory.Create(ph.Config.Storage.Type(), ph.Config.Storage.Parameters()) + if err != nil { + ph.Errors = append(ph.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("failed to create storage driver: %v", err))) return } - err := bh.Registry().Blobs().Delete(bh.Digest) + reg, err := newRegistry(ph, ph.Namespace(), nil) if err != nil { - // Ignore PathNotFoundError - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - bh.Errors.PushErr(fmt.Errorf("error deleting blob %q: %v", bh.Digest, err)) - w.WriteHeader(http.StatusBadRequest) - return - } - } - - w.WriteHeader(http.StatusNoContent) -} - -// LayerDispatcher takes the request context and builds the appropriate handler -// for handling layer requests. -func LayerDispatcher(ctx *handlers.Context, r *http.Request) http.Handler { - reference := ctxu.GetStringValue(ctx, "vars.digest") - dgst, _ := digest.ParseDigest(reference) - - layerHandler := &layerHandler{ - Context: ctx, - Digest: dgst, - } - - return gorillahandlers.MethodHandler{ - "DELETE": http.HandlerFunc(layerHandler.Delete), - } -} - -// layerHandler handles http operations on layers. -type layerHandler struct { - *handlers.Context - - Digest digest.Digest -} - -// Delete deletes the layer link from the repository from the storage backend. -func (lh *layerHandler) Delete(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - - if len(lh.Digest) == 0 { - lh.Errors.Push(v2.ErrorCodeBlobUnknown) - w.WriteHeader(http.StatusNotFound) + ph.Errors = append(ph.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("failed to create registry: %v", err))) return } - err := lh.Repository.Layers().Delete(lh.Digest) + rg, err := LoadRegistryGraph(ph, reg.(*registry), sd) if err != nil { - // Ignore PathNotFoundError - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - lh.Errors.PushErr(fmt.Errorf("error unlinking layer %q from repo %q: %v", lh.Digest, lh.Repository.Name(), err)) - w.WriteHeader(http.StatusBadRequest) - return - } - } - - w.WriteHeader(http.StatusNoContent) -} - -// ManifestDispatcher takes the request context and builds the appropriate -// handler for handling manifest requests. -func ManifestDispatcher(ctx *handlers.Context, r *http.Request) http.Handler { - reference := ctxu.GetStringValue(ctx, "vars.digest") - dgst, _ := digest.ParseDigest(reference) - - manifestHandler := &manifestHandler{ - Context: ctx, - Digest: dgst, - } - - return gorillahandlers.MethodHandler{ - "DELETE": http.HandlerFunc(manifestHandler.Delete), - } -} - -// manifestHandler handles http operations on mainfests. -type manifestHandler struct { - *handlers.Context - - Digest digest.Digest -} - -// Delete deletes the manifest information from the repository from the storage -// backend. -func (mh *manifestHandler) Delete(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - - if len(mh.Digest) == 0 { - mh.Errors.Push(v2.ErrorCodeManifestUnknown) - w.WriteHeader(http.StatusNotFound) + ph.Errors = append(ph.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("failed to read registry storage: %v", err))) return } - err := mh.Repository.Manifests().Delete(mh.Context, mh.Digest) - if err != nil { - // Ignore PathNotFoundError - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - mh.Errors.PushErr(fmt.Errorf("error deleting repo %q, manifest %q: %v", mh.Repository.Name(), mh.Digest, err)) - w.WriteHeader(http.StatusBadRequest) - return + rg.IgnoreErrors = true + errors := rg.PruneOrphanedObjects() + if len(errors) > 0 { + for _, err := range errors { + ph.Errors = append(ph.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("prune error: %v", err))) } + return } w.WriteHeader(http.StatusNoContent) diff --git a/pkg/dockerregistry/server/auth.go b/pkg/dockerregistry/server/auth.go index 457850c59562..f0f5bd9b2264 100644 --- a/pkg/dockerregistry/server/auth.go +++ b/pkg/dockerregistry/server/auth.go @@ -7,14 +7,13 @@ import ( "net/http" "strings" - kerrors "k8s.io/kubernetes/pkg/api/errors" - log "github.com/Sirupsen/logrus" - ctxu "github.com/docker/distribution/context" + context "github.com/docker/distribution/context" registryauth "github.com/docker/distribution/registry/auth" + kerrors "k8s.io/kubernetes/pkg/api/errors" + authorizationapi "github.com/openshift/origin/pkg/authorization/api" "github.com/openshift/origin/pkg/client" - "golang.org/x/net/context" ) func init() { @@ -76,17 +75,15 @@ func (ac *authChallenge) Error() string { return ac.err.Error() } -// ServeHTTP handles writing the challenge response -// by setting the challenge header and status code. -func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// SetHeaders sets the basic challenge header on the response. +func (ac *authChallenge) SetHeaders(w http.ResponseWriter) { // WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 str := fmt.Sprintf("Basic realm=%s", ac.realm) if ac.err != nil { str = fmt.Sprintf("%s,error=%q", str, ac.Error()) } - w.Header().Add("WWW-Authenticate", str) - w.WriteHeader(http.StatusUnauthorized) + w.Header().Set("WWW-Authenticate", str) } // wrapErr wraps errors related to authorization in an authChallenge error that will present a WWW-Authenticate challenge response @@ -110,7 +107,7 @@ func (ac *AccessController) wrapErr(err error) error { // origin/pkg/cmd/dockerregistry/dockerregistry.go#Execute // docker/distribution/registry/handlers/app.go#appendAccessRecords func (ac *AccessController) Authorized(ctx context.Context, accessRecords ...registryauth.Access) (context.Context, error) { - req, err := ctxu.GetRequest(ctx) + req, err := context.GetRequest(ctx) if err != nil { return nil, ac.wrapErr(err) } diff --git a/pkg/dockerregistry/server/graph.go b/pkg/dockerregistry/server/graph.go new file mode 100644 index 000000000000..3701c2f55723 --- /dev/null +++ b/pkg/dockerregistry/server/graph.go @@ -0,0 +1,558 @@ +package server + +import ( + "fmt" + "io" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/util/sets" + + imageapi "github.com/openshift/origin/pkg/image/api" +) + +// graph contains functions for collecting information about orphaned objects +// and their disposal. +// These functions will only reliably work on strongly consistent storage +// systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// RefSet is a set of digests. +type RefSet map[digest.Digest]sets.Empty + +// NewRefSet creates a new RefSet instance with given items. +func NewRefSet(ds ...digest.Digest) RefSet { + rs := RefSet{} + rs.Insert(ds...) + return rs +} + +// Insert adds new digest entries into a set. +func (rs RefSet) Insert(ds ...digest.Digest) { + for _, d := range ds { + rs[d] = sets.Empty{} + } +} + +// Has returns true if given digest is contained in the set. +func (rs RefSet) Has(d digest.Digest) bool { + _, exists := rs[d] + return exists +} + +// Delete removes all given digests from a set if they exist. +func (rs RefSet) Delete(ds ...digest.Digest) { + for _, d := range ds { + delete(rs, d) + } +} + +// Difference returns a new set containing all the items in the rs set that are +// not present in the other. +func (rs RefSet) Difference(other RefSet) RefSet { + res := NewRefSet() + for d := range rs { + if !other.Has(d) { + res.Insert(d) + } + } + return res +} + +// Union returns a new set containting all the items from both sets. +func (rs RefSet) Union(other RefSet) RefSet { + res := NewRefSet() + for d := range rs { + res.Insert(d) + } + for d := range other { + res.Insert(d) + } + return res +} + +// ManifestRevisionGraphInfo stores information about a manifest revision. +type ManifestRevisionGraphInfo struct { + Tag string + Signatures []digest.Digest +} + +// ManifestRevisionReferences maps digests to corresponding manifest revision objects. +type ManifestRevisionReferences map[digest.Digest]*ManifestRevisionGraphInfo + +// RepositoryGraphInfo contains information about a repository. +type RepositoryGraphInfo struct { + registryGraph *RegistryGraph + // LayersToDelete contain digests of repository layer links that are + // referenced only by manifest revisions marked for deletion. + LayersToDelete RefSet + // LayersToKeep contain digests of repository layer links that are + // referenced by at least one manifest revision that will be preserved. + LayersToKeep RefSet + // ManifestsToDelete contains manifest revisions for each image in + // OpenShift marked for deletion, + ManifestsToDelete ManifestRevisionReferences + // ManifestsToKeep contains manifest revisions loaded from OpenShift's etcd + // store not marked for deletion. + ManifestsToKeep ManifestRevisionReferences + + // TODO: collect tags +} + +// IsDirty returns true if the repository contains any orphaned objects. +func (rg *RepositoryGraphInfo) IsDirty() bool { + if len(rg.LayersToDelete) > 0 || len(rg.ManifestsToDelete) > 0 { + return true + } + return false +} + +// deleteLayerRefs marks given layer link and corresponding blob data +// references for deletion unless they are referenced by manifest revision to +// be kept. +func (rg *RepositoryGraphInfo) deleteLayerRefs(refs ...digest.Digest) { + rs := NewRefSet(refs...) + toDelete := rs.Difference(rg.LayersToKeep) + rg.LayersToDelete = rg.LayersToDelete.Union(toDelete) + for lRef := range toDelete { + rg.registryGraph.deleteBlobRefs(lRef) + } +} + +// keepLayerRefs makes sure that given layer link and corresponding blob data +// references will be preserved. Overrides any prior and subsequent calls to +// DeleteLayerRefs on the same reference. +func (rg *RepositoryGraphInfo) keepLayerRefs(refs ...digest.Digest) { + rg.LayersToDelete.Delete(refs...) + rg.LayersToKeep.Insert(refs...) + rg.registryGraph.keepBlobRefs(refs...) +} + +// newManifestRevisionInfo creates an abstraction for manifest revision and +// marks it either for deletion or preservation. +func (rg *RepositoryGraphInfo) newManifestRevisionInfo(ref digest.Digest, tag string, toKeep bool) (*ManifestRevisionGraphInfo, bool) { + var mi *ManifestRevisionGraphInfo + var isNew = false + + if mi, exists := rg.ManifestsToKeep[ref]; exists { + return mi, isNew + } + mi, exists := rg.ManifestsToDelete[ref] + if exists { + if toKeep { + delete(rg.ManifestsToDelete, ref) + } + } else { + mi = &ManifestRevisionGraphInfo{Tag: tag} + isNew = true + } + + if toKeep { + rg.ManifestsToKeep[ref] = mi + rg.registryGraph.keepBlobRefs(ref) + } else { + rg.ManifestsToDelete[ref] = mi + rg.registryGraph.deleteBlobRefs(ref) + } + + return mi, isNew +} + +// RegistryGraph stores registry objects can be deleted or must be preserved. +// Any object once marked as being preserved cannot be marked for deletion, +// neither its dependencies. +type RegistryGraph struct { + ctx context.Context + driver driver.StorageDriver + reg *registry + // IgnoreErrors causes pruning process to continue when error occurs. + // All the errors will be accumulated and returned. + IgnoreErrors bool + + // BlobsToDelete holds digests of registry blobs that can be deleted from + // registry's storage. + BlobsToDelete RefSet + // BlobsToKeep holds digests of registry blobs that must be preserved. + BlobsToKeep RefSet + // ReposToDelete maps names of repositories to their abstraction objects. + // Corresponding image streams of contained entries were marked for + // deletion. + ReposToDelete map[string]*RepositoryGraphInfo + // ReposToKeep maps names of repositories that won't be deleted. + ReposToKeep map[string]*RepositoryGraphInfo +} + +// LoadRegistryGraph walks a filesystem and OpenShift's etcd stora and collects +// all the information needed to clean up a registry. Ir returns a new instance +// of RegistryGraph. +func LoadRegistryGraph(ctx context.Context, reg *registry, driver driver.StorageDriver) (*RegistryGraph, error) { + rg := &RegistryGraph{ + ctx: reg.ctx, + driver: driver, + reg: reg, + BlobsToDelete: NewRefSet(), + BlobsToKeep: NewRefSet(), + ReposToDelete: make(map[string]*RepositoryGraphInfo), + ReposToKeep: make(map[string]*RepositoryGraphInfo), + } + + // load imagestream deletions and their images from etcd + rg.reg.enumRepoKind = enumRepoDeletion + if err := rg.loadRepositories(false); err != nil { + return nil, err + } + + // load regular imagestreams and their images from etcd + rg.reg.enumRepoKind = enumRepoExisting + if err := rg.loadRepositories(true); err != nil { + return nil, err + } + + // load regular repositories from local storage and their images from etcd + rg.reg.enumRepoKind = enumRepoLocal + if err := rg.loadRepositories(true); err != nil { + return nil, err + } + + return rg, nil +} + +// PruneOrphanedObjects deletes: +// +// 1. Manifest revisions and their signatures for their corresponding images +// marked for deletion. +// 2. Repository layers belonging to images marked for deletion not refered +// by any image being preserved. +// 3. Blob data refered by any layer, signature or manifest revision being deleted +// and not refered by any being preserved. +func (rg *RegistryGraph) PruneOrphanedObjects() []error { + errors := []error{} + + // collects all the image stream names whose repositories where successfully + // cleaned up and thus can be removed from imageStreamDeletions + imageStreamDeletionsToClean := make([]string, 0, len(rg.ReposToDelete)) + + for name, ri := range rg.ReposToDelete { + // delete local links and data from etcd + es := rg.cleanRepository(name) + errors = append(errors, es...) + if !rg.IgnoreErrors && len(es) > 0 { + return errors + } + if len(es) > 0 { + continue + } + + // delete repositories from local storage + if len(ri.ManifestsToKeep) == 0 { + v := storage.NewVacuum(rg.ctx, rg.driver) + err := v.RemoveRepository(name) + if err != nil { + errors = append(errors, err) + if !rg.IgnoreErrors { + return errors + } + continue + } + } + + imageStreamDeletionsToClean = append(imageStreamDeletionsToClean, name) + } + + rg.pruneImageStreamDeletions(imageStreamDeletionsToClean...) + + // clean up other repositories + for name := range rg.ReposToKeep { + errors = append(errors, rg.cleanRepository(name)...) + if !rg.IgnoreErrors && len(errors) > 0 { + return errors + } + } + + bd, err := storage.RegistryBlobDeleter(rg.reg.Namespace) + if err != nil { + return append(errors, err) + } + + // delete orphaned blobs + for ref := range rg.BlobsToDelete { + err := bd.Delete(rg.ctx, ref) + if err != nil { + errors = append(errors, err) + if !rg.IgnoreErrors { + return errors + } + } + } + + return nil +} + +// deleteBlobRefs marks given digests of blob data for deletion unless +// they are not being preserved. +func (rg *RegistryGraph) deleteBlobRefs(refs ...digest.Digest) { + rs := NewRefSet(refs...) + toDelete := rs.Difference(rg.BlobsToKeep) + rg.BlobsToDelete = rg.BlobsToDelete.Union(toDelete) +} + +// KeepBlobRefs marks given digests of blob data for preservation. Overrides +// any prior and subsequent call to DeleteBlobRefs on any given digest. +func (rg *RegistryGraph) keepBlobRefs(refs ...digest.Digest) { + rg.BlobsToDelete.Delete(refs...) + rg.BlobsToKeep.Insert(refs...) +} + +// newRepositoryInfo creates a new graph abstration for repository and marks it +// either for deletion or preservation. +func (rg *RegistryGraph) newRepositoryInfo(name string, toKeep bool) (*RepositoryGraphInfo, bool) { + var rgi *RepositoryGraphInfo + var isNew = false + + if rgi, exists := rg.ReposToKeep[name]; exists { + return rgi, isNew + } + rgi, exists := rg.ReposToDelete[name] + if exists { + if toKeep { + delete(rg.ReposToDelete, name) + } + } else { + rgi = &RepositoryGraphInfo{ + registryGraph: rg, + LayersToDelete: NewRefSet(), + LayersToKeep: NewRefSet(), + ManifestsToDelete: make(ManifestRevisionReferences), + ManifestsToKeep: make(ManifestRevisionReferences), + } + isNew = true + } + + if toKeep { + rg.ReposToKeep[name] = rgi + } else { + rg.ReposToDelete[name] = rgi + } + + return rgi, isNew +} + +// loadRepositories iterates over repositories and collects information about +// all their manifest revisions, signatures and layers. Repository names are +// either fetched from etcd or local storage depending on registry's +// configuration. toKeep decides whether repositories will be marked for +// deletion or preservation. It has no influence on loaded manifest revisions +// and their dependent objects. +func (rg *RegistryGraph) loadRepositories(toKeep bool) error { + const bufLen = 512 + var ( + repoNames = make([]string, bufLen) + last = "" + ) + + for { + n, err := rg.reg.Repositories(rg.ctx, repoNames, last) + if err != nil && err != io.EOF { + return err + } + + // success + if err == io.EOF { + break + } + + for _, repoName := range repoNames[0:n] { + if _, exists := rg.ReposToKeep[repoName]; exists { + continue + } + if _, exists := rg.ReposToDelete[repoName]; exists { + continue + } + + repo, err := rg.reg.Repository(rg.ctx, repoName) + if err != nil { + log.Warnf("Failed to load repository %q: %v", repoName, err) + continue + } + _, err = rg.loadRepository(repoName, repo.(*repository), toKeep) + if err != nil { + log.Warnf("Failed to load repository %q: %v", repoName, err) + continue + } + } + + last = repoNames[n-1] + } + + return nil +} + +// loadRepository loads objects contained in given repository. And marks it +// either for deletion or preservation based on toKeep. +func (rg *RegistryGraph) loadRepository(repoName string, repo *repository, toKeep bool) (*RepositoryGraphInfo, error) { + ri, isNew := rg.newRepositoryInfo(repoName, toKeep) + if !isNew { + // all information already loaded + return ri, nil + } + + for _, kind := range []fields.Selector{ + // first enumerate images marked for deletion + enumManifestKindToDelete, + // then the regular ones + enumManifestKindToKeep} { + + manServ, err := repo.Manifests(rg.ctx, + makeGetCheckImageStreamOption(false), + makeChangeEnumKindOption(kind)) + if err != nil { + return nil, err + } + + manRefs, err := manServ.Enumerate() + if err != nil { + return nil, err + } + + for _, ref := range manRefs { + err := rg.loadManifestRevision(repo, repoName, ref, kind == enumManifestKindToKeep) + if err != nil { + log.Warnf("Failed to load manifest revision \"%s@%s\": %v", repoName, ref, err) + } + } + } + + return ri, nil +} + +// loadManifestRevision loads manifest from etcd and marks it either for +// deletion or preservation based on toKeep. All the dependent objects inherit +// this attribute if they are loaded for the first time. +func (rg *RegistryGraph) loadManifestRevision(repo distribution.Repository, repoName string, ref digest.Digest, toKeep bool) error { + var ri *RepositoryGraphInfo + manServ, err := repo.Manifests(rg.ctx, makeGetCheckImageStreamOption(false)) + manifest, err := manServ.Get(ref) + if err != nil { + return err + } + + ri, exists := rg.ReposToKeep[repoName] + if !exists { + ri, exists = rg.ReposToDelete[repoName] + if !exists { + return fmt.Errorf("cannot load manifest revision from unknown repository %q", repoName) + } + } + + mi, _ := ri.newManifestRevisionInfo(ref, manifest.Tag, toKeep) + + for _, layer := range manifest.FSLayers { + if toKeep { + ri.keepLayerRefs(layer.BlobSum) + } else { + ri.deleteLayerRefs(layer.BlobSum) + } + } + + signatures, err := repo.Signatures().Enumerate(ref) + if err != nil { + log.Warnf("Failed to list signatures of %s: %v", repoName+"@"+ref.String(), err) + } else { + if toKeep { + rg.keepBlobRefs(signatures...) + } else { + rg.deleteBlobRefs(signatures...) + } + mi.Signatures = signatures + } + + return nil +} + +// cleanRepository removes orphaned layers, signatures and manifest revisions +// from repository. Manifests will be deleted from etcd and the rest from +// registry's storage. It doesn't remove the repository itself. +func (rg *RegistryGraph) cleanRepository(repoName string) []error { + var ( + errors []error + ri *RepositoryGraphInfo + ) + + repo, err := rg.reg.Repository(rg.ctx, repoName) + if err != nil { + return []error{err} + } + manServ, err := repo.Manifests(rg.ctx) + if err != nil { + return []error{err} + } + + ri, exists := rg.ReposToKeep[repoName] + if !exists { + ri, exists = rg.ReposToDelete[repoName] + } + if ri == nil || !ri.IsDirty() { + return errors + } + + for mRef, mi := range ri.ManifestsToDelete { + log.Infof("Deleting manifest revision %s:%s@%s with %d signatures", repoName, mi.Tag, mRef, len(mi.Signatures)) + err := manServ.Delete(mRef) + if err != nil { + log.Errorf("Failed to delete manifest revision %s:%s@%s: %v", repoName, mi.Tag, mRef, err) + errors = append(errors, err) + if !rg.IgnoreErrors { + return errors + } + // keep all dependent layers and signatures if the delete failed + rg.loadManifestRevision(repo, repoName, mRef, true) + } + } + + blobStore := repo.Blobs(rg.ctx) + for lRef := range ri.LayersToDelete { + err := blobStore.Delete(rg.reg.ctx, lRef) + if err != nil { + errors = append(errors, err) + if !rg.IgnoreErrors && err != distribution.ErrBlobUnknown { + return errors + } + // don't delete layer data if a deletion of its link failed + rg.keepBlobRefs(lRef) + } + } + + return errors +} + +// pruneImageStreamDeletions removes image stream deletions from OpenShift's +// etcd store. Only names of repositories successfully purged should be among +// arguments. +func (rg *RegistryGraph) pruneImageStreamDeletions(names ...string) []error { + errors := []error{} + + for _, name := range names { + nameParts := strings.SplitN(name, "/", 2) + if len(nameParts) != 2 { + errors = append(errors, fmt.Errorf("invalid repository name %q: it must be of the format /", name)) + continue + } + log.Debugf("Deleting image stream deletion %s", name) + deletionName := imageapi.DeletionNameForImageStream(nameParts[0], nameParts[1]) + err := rg.reg.osClient.ImageStreamDeletions().Delete(deletionName) + if err != nil { + errors = append(errors, fmt.Errorf("failed to delete image stream deletion %s: %v", name, err)) + if !rg.IgnoreErrors { + return errors + } + } + } + + return errors +} diff --git a/pkg/dockerregistry/server/openshiftclient.go b/pkg/dockerregistry/server/openshiftclient.go index acbc186ac4ed..26025d60a8ed 100644 --- a/pkg/dockerregistry/server/openshiftclient.go +++ b/pkg/dockerregistry/server/openshiftclient.go @@ -22,23 +22,13 @@ func NewUserOpenShiftClient(bearerToken string) (*osclient.Client, error) { return client, nil } -func NewRegistryOpenShiftClient() (*osclient.Client, error) { +// NewRegistryOpenShiftClient returns an client interface for OpenShift +// resources built from environment variables. +func NewRegistryOpenShiftClient() (osclient.Interface, error) { config, err := openShiftClientConfig() if err != nil { return nil, err } - if !config.Insecure { - certData := os.Getenv("OPENSHIFT_CERT_DATA") - if len(certData) == 0 { - return nil, errors.New("OPENSHIFT_CERT_DATA is required") - } - certKeyData := os.Getenv("OPENSHIFT_KEY_DATA") - if len(certKeyData) == 0 { - return nil, errors.New("OPENSHIFT_KEY_DATA is required") - } - config.TLSClientConfig.CertData = []byte(certData) - config.TLSClientConfig.KeyData = []byte(certKeyData) - } client, err := osclient.New(config) if err != nil { return nil, fmt.Errorf("error creating Origin client: %s", err) @@ -46,6 +36,20 @@ func NewRegistryOpenShiftClient() (*osclient.Client, error) { return client, nil } +// NewRegistryOpenShiftClient returns an client interface for Kubernetes +// resources built from environment variables. +func NewRegistryKubernetesClient() (kclient.Interface, error) { + config, err := openShiftClientConfig() + if err != nil { + return nil, err + } + client, err := kclient.New(config) + if err != nil { + return nil, fmt.Errorf("error creating Origin client: %s", err) + } + return client, nil +} + func openShiftClientConfig() (*kclient.Config, error) { openshiftAddr := os.Getenv("OPENSHIFT_MASTER") if len(openshiftAddr) == 0 { @@ -59,9 +63,19 @@ func openShiftClientConfig() (*kclient.Config, error) { if len(caData) == 0 { return nil, errors.New("OPENSHIFT_CA_DATA is required") } - tlsClientConfig = kclient.TLSClientConfig{ - CAData: []byte(caData), + tlsClientConfig.CAData = []byte(caData) + + certData := os.Getenv("OPENSHIFT_CERT_DATA") + if len(certData) == 0 { + return nil, errors.New("OPENSHIFT_CERT_DATA is required") + } + tlsClientConfig.CertData = []byte(certData) + + certKeyData := os.Getenv("OPENSHIFT_KEY_DATA") + if len(certKeyData) == 0 { + return nil, errors.New("OPENSHIFT_KEY_DATA is required") } + tlsClientConfig.KeyData = []byte(certKeyData) } return &kclient.Config{ diff --git a/pkg/dockerregistry/server/registrymiddleware.go b/pkg/dockerregistry/server/registrymiddleware.go new file mode 100644 index 000000000000..131bb2082cbd --- /dev/null +++ b/pkg/dockerregistry/server/registrymiddleware.go @@ -0,0 +1,212 @@ +package server + +import ( + "errors" + "fmt" + "os" + "path" + "sort" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/v2" + regmw "github.com/docker/distribution/registry/middleware/registry" + kapi "k8s.io/kubernetes/pkg/api" + kclient "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + "github.com/openshift/origin/pkg/client" +) + +// enumRepoKind determines what kind of repositories will registry enumerate. +type enumRepoKind int + +const ( + // enumRepoExisting makes Repositories fetch repository names from + // imageStreams folder in etcd store. + enumRepoExisting = iota + // enumRepoDeletion makes Repositories fetch repository names from + // imageStreamDeletions folder in etcd store. + enumRepoDeletion + // enumRepoLocal makes Repositories walk registry's storage. + enumRepoLocal +) + +func init() { + regmw.Register("openshift", regmw.InitFunc(newRegistry)) +} + +// Sort namespaces by name +type ByNSName []kapi.Namespace + +func (a ByNSName) Len() int { return len(a) } +func (a ByNSName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByNSName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +type registry struct { + distribution.Namespace + + ctx context.Context + kubeClient kclient.Interface + osClient client.Interface + registryAddr string + enumRepoKind enumRepoKind +} + +// newRegistry returns a new registry middleware. +func newRegistry(ctx context.Context, reg distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) { + registryAddr := os.Getenv("REGISTRY_URL") + if len(registryAddr) == 0 { + return nil, errors.New("REGISTRY_URL is required") + } + + kclient, err := NewRegistryKubernetesClient() + if err != nil { + return nil, err + } + osClient, err := NewRegistryOpenShiftClient() + if err != nil { + return nil, err + } + + return ®istry{ + Namespace: reg, + kubeClient: kclient, + osClient: osClient, + registryAddr: registryAddr, + }, nil +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + if err := v2.ValidateRepositoryName(name); err != nil { + return nil, distribution.ErrRepositoryNameInvalid{ + Name: name, + Reason: err, + } + } + + repo, err := reg.Namespace.Repository(ctx, name) + if err != nil { + return nil, err + } + + nameParts := strings.SplitN(repo.Name(), "/", 2) + if len(nameParts) != 2 { + return nil, fmt.Errorf("invalid repository name %q: it must be of the format /", repo.Name()) + } + + return &repository{ + Repository: repo, + ctx: ctx, + registryClient: reg.osClient, + registryAddr: reg.registryAddr, + namespace: nameParts[0], + name: nameParts[1], + }, nil +} + +// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories +// up to the size of 'repos' and returns the value 'n' for the number of entries +// which were filled. 'last' contains an offset in the catalog, and 'err' will be +// set to io.EOF if there are no more entries to obtain. +// Returned repository names will either be fetched from etcd or local storage based +// on enumRepoKind setting. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + if reg.enumRepoKind == enumRepoLocal { + return reg.Namespace.Repositories(ctx, repos, last) + } + + lastNS := "" + lastName := "" + if last != "" { + nameParts := strings.SplitN(last, "/", 2) + if len(nameParts) != 2 { + return 0, fmt.Errorf("invalid repository name %q: it must be of the format /", last) + } + lastNS = nameParts[0] + lastName = nameParts[1] + } + + if len(repos) == 0 { + return + } + + if reg.enumRepoKind == enumRepoExisting { + nsList, err := reg.kubeClient.Namespaces().List(nil, nil) + if err != nil { + return 0, err + } + + sort.Sort(ByNSName(nsList.Items)) + + for _, ns := range nsList.Items { + if lastNS != "" && ns.Name < lastNS { + continue + } + + isList, err := reg.osClient.ImageStreams(ns.Name).List(labels.Everything(), fields.Everything()) + if err != nil { + log.Warnf("Failed to list image streams of %q namespace: %v", ns.Name, err) + continue + } + isNames := make([]string, 0, len(isList.Items)) + for _, is := range isList.Items { + if lastNS == ns.Name && is.Name <= lastName { + continue + } + isNames = append(isNames, is.Name) + } + + sort.Sort(sort.StringSlice(isNames)) + + for _, name := range isNames { + repos[n] = path.Join(ns.Name, name) + n++ + if n >= len(repos) { + break + } + } + } + } else { + isdList, err := reg.osClient.ImageStreamDeletions().List(labels.Everything(), fields.Everything()) + if err != nil { + return 0, fmt.Errorf("Failed to list image stream deletions: %v", err) + } + isNames := make([]string, 0, len(isdList.Items)) + for _, isd := range isdList.Items { + isNameParts := strings.SplitN(isd.Name, ":", 2) + if len(isNameParts) != 2 { + log.Warnf("invalid name of image stream deletion %q expected :", isd.Name) + continue + } + name := path.Join(isNameParts...) + if name > last { + isNames = append(isNames, name) + } + } + + sort.Sort(sort.StringSlice(isNames)) + + for _, name := range isNames { + repos[n] = path.Join(name, name) + n++ + if n >= len(repos) { + break + } + } + } + + return +} diff --git a/pkg/dockerregistry/server/repositorymiddleware.go b/pkg/dockerregistry/server/repositorymiddleware.go index 82c44a10da24..70a95a25ae97 100644 --- a/pkg/dockerregistry/server/repositorymiddleware.go +++ b/pkg/dockerregistry/server/repositorymiddleware.go @@ -10,32 +10,62 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" repomw "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/libtrust" - "github.com/openshift/origin/pkg/client" - imageapi "github.com/openshift/origin/pkg/image/api" - "golang.org/x/net/context" kapi "k8s.io/kubernetes/pkg/api" kerrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + "github.com/openshift/origin/pkg/client" + imageapi "github.com/openshift/origin/pkg/image/api" +) + +var ( + // enumManifestKindToKeep filters images having DelectionTimestamp unset. + enumManifestKindToKeep fields.Selector + // enumManifestKindToKeep filters images having DelectionTimestamp set. + enumManifestKindToDelete fields.Selector + // enumManifestKindToKeep makes Enumerate method return all images found. + enumManifestKindAll fields.Selector ) func init() { repomw.Register("openshift", repomw.InitFunc(newRepository)) + + var err error + enumManifestKindToKeep, err = fields.ParseSelector("image.status.phase!=" + imageapi.ImagePurging) + if err != nil { + panic(err.Error()) + } + + enumManifestKindToDelete, err = fields.ParseSelector("image.status.phase==" + imageapi.ImagePurging) + if err != nil { + panic(err.Error()) + } + + enumManifestKindAll = fields.Everything() } type repository struct { distribution.Repository - registryClient *client.Client + ctx context.Context + registryClient client.Interface registryAddr string namespace string name string + // getNoCheckImageStream prevents Get() function from performing an + // existence check on image stream. + getNoCheckImageStream bool + imageFieldSelector fields.Selector } // newRepository returns a new repository middleware. -func newRepository(repo distribution.Repository, options map[string]interface{}) (distribution.Repository, error) { +func newRepository(ctx context.Context, repo distribution.Repository, options map[string]interface{}) (distribution.Repository, error) { registryAddr := os.Getenv("REGISTRY_URL") if len(registryAddr) == 0 { return nil, errors.New("REGISTRY_URL is required") @@ -60,14 +90,52 @@ func newRepository(repo distribution.Repository, options map[string]interface{}) }, nil } -// Manifests returns r, which implements distribution.ManifestService. -func (r *repository) Manifests() distribution.ManifestService { - return r +// makeChangeEnumKindOption constructs a manifest service option causing the +// service to enumerate chosen kind of manifest revisions. +func makeChangeEnumKindOption(kind fields.Selector) distribution.ManifestServiceOption { + return func(manServ distribution.ManifestService) error { + repo, ok := manServ.(*repository) + if !ok { + return fmt.Errorf("unsupported type of manifest service (%T != %T)", manServ, &repository{}) + } + repo.imageFieldSelector = enumManifestKindToKeep + return nil + } +} + +// makeChangeEnumKindOption constructs a manifest service option causing the +// service to enumerate chosen kind of manifest revisions. +func makeGetCheckImageStreamOption(check bool) distribution.ManifestServiceOption { + return func(manServ distribution.ManifestService) error { + repo, ok := manServ.(*repository) + if !ok { + return fmt.Errorf("unsupported type of manifest service (%T != %T)", manServ, &repository{}) + } + repo.getNoCheckImageStream = !check + return nil + } +} + +// Manifests returns manifest service with given options applied. +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + if r.ctx != ctx && len(options) == 0 { + return r, nil + } + repo := repository(*r) + repo.ctx = ctx + + for _, opt := range options { + if err := opt(&repo); err != nil { + return nil, err + } + } + + return &repo, nil } // Tags lists the tags under the named repository. -func (r *repository) Tags(ctx context.Context) ([]string, error) { - imageStream, err := r.getImageStream(ctx) +func (r *repository) Tags() ([]string, error) { + imageStream, err := r.getImageStream() if err != nil { return []string{}, nil } @@ -80,7 +148,7 @@ func (r *repository) Tags(ctx context.Context) ([]string, error) { } // Exists returns true if the manifest specified by dgst exists. -func (r *repository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { +func (r *repository) Exists(dgst digest.Digest) (bool, error) { image, err := r.getImage(dgst) if err != nil { return false, err @@ -89,8 +157,8 @@ func (r *repository) Exists(ctx context.Context, dgst digest.Digest) (bool, erro } // ExistsByTag returns true if the manifest with tag `tag` exists. -func (r *repository) ExistsByTag(ctx context.Context, tag string) (bool, error) { - imageStream, err := r.getImageStream(ctx) +func (r *repository) ExistsByTag(tag string) (bool, error) { + imageStream, err := r.getImageStream() if err != nil { return false, err } @@ -99,10 +167,12 @@ func (r *repository) ExistsByTag(ctx context.Context, tag string) (bool, error) } // Get retrieves the manifest with digest `dgst`. -func (r *repository) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { - if _, err := r.getImageStreamImage(ctx, dgst); err != nil { - log.Errorf("Error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) - return nil, err +func (r *repository) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + if !r.getNoCheckImageStream { + if _, err := r.getImageStreamImage(dgst); err != nil { + log.Errorf("Error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) + return nil, err + } } image, err := r.getImage(dgst) @@ -114,9 +184,35 @@ func (r *repository) Get(ctx context.Context, dgst digest.Digest) (*manifest.Sig return r.manifestFromImage(image) } +// Enumerate retrieves digests of manifest revisions in this repository. +func (r *repository) Enumerate() ([]digest.Digest, error) { + images, err := r.getImages() + if err != nil { + log.Errorf("Error enumerating images: %v", err) + return nil, err + } + + res := make([]digest.Digest, 0, len(images.Items)) + for _, img := range images.Items { + dgst, err := digest.ParseDigest(img.Image.Name) + if err != nil { + log.Warnf("Failed to parse image name \"%s\" into digest: %v", img.Name, err) + } else { + res = append(res, dgst) + } + } + + return res, nil +} + // GetByTag retrieves the named manifest with the provided tag -func (r *repository) GetByTag(ctx context.Context, tag string) (*manifest.SignedManifest, error) { - imageStreamTag, err := r.getImageStreamTag(ctx, tag) +func (r *repository) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, opt := range options { + if err := opt(r); err != nil { + return nil, err + } + } + imageStreamTag, err := r.getImageStreamTag(tag) if err != nil { log.Errorf("Error getting ImageStreamTag %q: %v", tag, err) return nil, err @@ -139,7 +235,7 @@ func (r *repository) GetByTag(ctx context.Context, tag string) (*manifest.Signed } // Put creates or updates the named manifest. -func (r *repository) Put(ctx context.Context, manifest *manifest.SignedManifest) error { +func (r *repository) Put(manifest *manifest.SignedManifest) error { // Resolve the payload in the manifest. payload, err := manifest.Payload() if err != nil { @@ -191,7 +287,7 @@ func (r *repository) Put(ctx context.Context, manifest *manifest.SignedManifest) }, } - client, ok := UserClientFrom(ctx) + client, ok := UserClientFrom(r.ctx) if !ok { log.Errorf("Error creating user client to auto provision image stream: Origin user client unavailable") return statusErr @@ -228,12 +324,17 @@ func (r *repository) Put(ctx context.Context, manifest *manifest.SignedManifest) // Delete deletes the manifest with digest `dgst`. Note: Image resources // in OpenShift are deleted via 'oadm prune images'. This function deletes // the content related to the manifest in the registry's storage (signatures). -func (r *repository) Delete(ctx context.Context, dgst digest.Digest) error { - return r.Repository.Manifests().Delete(ctx, dgst) +func (r *repository) Delete(dgst digest.Digest) error { + manServ, err := r.Repository.Manifests(r.ctx) + if err != nil { + return err + } + // TODO: run finalize on image object + return manServ.Delete(dgst) } // getImageStream retrieves the ImageStream for r. -func (r *repository) getImageStream(ctx context.Context) (*imageapi.ImageStream, error) { +func (r *repository) getImageStream() (*imageapi.ImageStream, error) { return r.registryClient.ImageStreams(r.namespace).Get(r.name) } @@ -242,15 +343,20 @@ func (r *repository) getImage(dgst digest.Digest) (*imageapi.Image, error) { return r.registryClient.Images().Get(dgst.String()) } +// getImages retrieves repository's ImageStreamImageList. +func (r *repository) getImages() (*imageapi.ImageStreamImageList, error) { + return r.registryClient.ImageStreamImages(r.namespace).List(labels.Everything(), r.imageFieldSelector) +} + // getImageStreamTag retrieves the Image with tag `tag` for the ImageStream // associated with r. -func (r *repository) getImageStreamTag(ctx context.Context, tag string) (*imageapi.ImageStreamTag, error) { +func (r *repository) getImageStreamTag(tag string) (*imageapi.ImageStreamTag, error) { return r.registryClient.ImageStreamTags(r.namespace).Get(r.name, tag) } // getImageStreamImage retrieves the Image with digest `dgst` for the ImageStream // associated with r. This ensures the image belongs to the image stream. -func (r *repository) getImageStreamImage(ctx context.Context, dgst digest.Digest) (*imageapi.ImageStreamImage, error) { +func (r *repository) getImageStreamImage(dgst digest.Digest) (*imageapi.ImageStreamImage, error) { return r.registryClient.ImageStreamImages(r.namespace).Get(r.name, dgst.String()) } diff --git a/pkg/image/api/helper.go b/pkg/image/api/helper.go index 0d40eba502b7..144eb2ca83ae 100644 --- a/pkg/image/api/helper.go +++ b/pkg/image/api/helper.go @@ -5,9 +5,11 @@ import ( "fmt" "strings" + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/sets" + "github.com/docker/distribution/digest" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/sets" ) // DockerDefaultNamespace is the value for namespace when a single segment name is provided. @@ -394,3 +396,19 @@ func ResolveImageID(stream *ImageStream, imageID string) sets.String { } return set } + +func DeletionNameForImageStream(namespace string, name string) string { + return fmt.Sprintf("%s:%s", namespace, name) +} + +func NewDeletionForImageStream(stream *ImageStream) (*ImageStreamDeletion, error) { + if stream.Namespace == "" || stream.Name == "" { + return nil, fmt.Errorf("cannot create ImageStreamDeletion for stream with namespace or name unset") + } + return &ImageStreamDeletion{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: stream.Namespace, + Name: DeletionNameForImageStream(stream.Namespace, stream.Name), + }, + }, nil +} diff --git a/pkg/image/api/register.go b/pkg/image/api/register.go index 006d3e531026..194fdd977eb6 100644 --- a/pkg/image/api/register.go +++ b/pkg/image/api/register.go @@ -13,15 +13,21 @@ func init() { &ImageStreamMapping{}, &ImageStreamTag{}, &ImageStreamImage{}, + &ImageStreamImageList{}, + &ImageStreamDeletion{}, + &ImageStreamDeletionList{}, &DockerImage{}, ) } -func (*Image) IsAnAPIObject() {} -func (*ImageList) IsAnAPIObject() {} -func (*DockerImage) IsAnAPIObject() {} -func (*ImageStream) IsAnAPIObject() {} -func (*ImageStreamList) IsAnAPIObject() {} -func (*ImageStreamMapping) IsAnAPIObject() {} -func (*ImageStreamTag) IsAnAPIObject() {} -func (*ImageStreamImage) IsAnAPIObject() {} +func (*Image) IsAnAPIObject() {} +func (*ImageList) IsAnAPIObject() {} +func (*DockerImage) IsAnAPIObject() {} +func (*ImageStream) IsAnAPIObject() {} +func (*ImageStreamList) IsAnAPIObject() {} +func (*ImageStreamMapping) IsAnAPIObject() {} +func (*ImageStreamTag) IsAnAPIObject() {} +func (*ImageStreamImage) IsAnAPIObject() {} +func (*ImageStreamImageList) IsAnAPIObject() {} +func (*ImageStreamDeletion) IsAnAPIObject() {} +func (*ImageStreamDeletionList) IsAnAPIObject() {} diff --git a/pkg/image/api/types.go b/pkg/image/api/types.go index 69d86bfce601..cd0efa6b6a3a 100644 --- a/pkg/image/api/types.go +++ b/pkg/image/api/types.go @@ -29,6 +29,20 @@ const ( DefaultImageTag = "latest" ) +// ImageStatus is information about the current status of an Image. +type ImageStatus struct { + // Phase is the current lifecycle phase of the image. + Phase string +} + +// These are the valid phases of an image. +const ( + // ImageActive means the image is available for use in the system + ImageAvailable string = "Available" + // ImagePurging means the image is going to be deleted during next run of registry's pruner + ImagePurging string = "Purging" +) + // Image is an immutable representation of a Docker image and metadata at a point in time. type Image struct { kapi.TypeMeta @@ -42,6 +56,10 @@ type Image struct { DockerImageMetadataVersion string // The raw JSON of the manifest DockerImageManifest string + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []kapi.FinalizerName + // Status describes the current status of an Image + Status ImageStatus } // ImageStreamList is a list of ImageStream objects. @@ -71,6 +89,8 @@ type ImageStreamSpec struct { DockerImageRepository string // Tags map arbitrary string values to specific image locators Tags map[string]TagReference + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []kapi.FinalizerName } // TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. @@ -81,6 +101,14 @@ type TagReference struct { From *kapi.ObjectReference } +// These are the valid phases of an image stream. +const ( + // ImageStreamActive means the image stream is available for use in the system + ImageStreamAvailable string = "Available" + // ImageStreamTerminating means the image stream is being deleted + ImageStreamTerminating string = "Terminating" +) + // ImageStreamStatus contains information about the state of this image stream. type ImageStreamStatus struct { // DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server @@ -89,6 +117,8 @@ type ImageStreamStatus struct { // A historical record of images associated with each tag. The first entry in the TagEvent array is // the currently tagged image. Tags map[string]TagEventList + // Phase is the current lifecycle phase of the image stream. + Phase string } // TagEventList contains a historical record of images associated with a tag. @@ -129,6 +159,14 @@ type ImageStreamTag struct { Image Image } +// ImageStreamImageList is a list of image stream image objects. +type ImageStreamImageList struct { + kapi.TypeMeta + kapi.ListMeta + + Items []ImageStreamImage +} + // ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. type ImageStreamImage struct { kapi.TypeMeta @@ -138,6 +176,21 @@ type ImageStreamImage struct { Image Image } +// ImageStreamDeletionList is a list of image stream deletion objects. +type ImageStreamDeletionList struct { + kapi.TypeMeta + kapi.ListMeta + + Items []ImageStreamDeletion +} + +// ImageStreamDeletion represents an ImageStream that have been deleted from +// etcd store and is awaiting a garbage collection in internal registry. +type ImageStreamDeletion struct { + kapi.TypeMeta + kapi.ObjectMeta +} + // DockerImageReference points to a Docker image. type DockerImageReference struct { Registry string diff --git a/pkg/image/api/v1/conversion.go b/pkg/image/api/v1/conversion.go index 942be8673fda..691b9d4728a9 100644 --- a/pkg/image/api/v1/conversion.go +++ b/pkg/image/api/v1/conversion.go @@ -5,8 +5,10 @@ import ( "sort" kapi "k8s.io/kubernetes/pkg/api" + kapiv1 "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/conversion" + oapi "github.com/openshift/origin/pkg/api" newer "github.com/openshift/origin/pkg/image/api" ) @@ -30,7 +32,11 @@ func convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco out.DockerImageMetadata.RawJSON = data out.DockerImageMetadataVersion = version - return nil + out.Finalizers = make([]kapiv1.FinalizerName, 0, 1) + if err := s.Convert(&in.Finalizers, &out.Finalizers, 0); err != nil { + return err + } + return s.Convert(&in.Status, &out.Status, 0) } func convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Scope) error { @@ -60,13 +66,46 @@ func convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco } out.DockerImageMetadataVersion = version + out.Finalizers = make([]kapi.FinalizerName, 0, 1) + if err := s.Convert(&in.Finalizers, &out.Finalizers, 0); err != nil { + return err + } + + if err := s.Convert(&in.Status, &out.Status, 0); err != nil { + return err + } + + if out.DeletionTimestamp == nil { + // make sure that finalizers contain origin + hasOriginFinalizer := false + for _, finalizer := range out.Finalizers { + if finalizer == oapi.FinalizerOrigin { + hasOriginFinalizer = true + break + } + } + if !hasOriginFinalizer { + out.Finalizers = append(out.Finalizers, oapi.FinalizerOrigin) + } + // phase need to match DeletionTimestamp + out.Status.Phase = newer.ImageAvailable + } else { + out.Status.Phase = newer.ImagePurging + } return nil } -func convert_v1_ImageStreamSpec_To_api_ImageStreamSpec(in *ImageStreamSpec, out *newer.ImageStreamSpec, s conversion.Scope) error { - out.DockerImageRepository = in.DockerImageRepository - out.Tags = make(map[string]newer.TagReference) - return s.Convert(&in.Tags, &out.Tags, 0) +func convert_api_ImageStatus_To_v1_ImageStatus(in *newer.ImageStatus, out *ImageStatus, s conversion.Scope) error { + out.Phase = in.Phase + return nil +} + +func convert_v1_ImageStatus_To_api_ImageStatus(in *ImageStatus, out *newer.ImageStatus, s conversion.Scope) error { + out.Phase = in.Phase + if out.Phase == "" { + out.Phase = newer.ImageAvailable + } + return nil } func convert_api_ImageStreamSpec_To_v1_ImageStreamSpec(in *newer.ImageStreamSpec, out *ImageStreamSpec, s conversion.Scope) error { @@ -81,11 +120,29 @@ func convert_api_ImageStreamSpec_To_v1_ImageStreamSpec(in *newer.ImageStreamSpec } } out.Tags = make([]NamedTagReference, 0, 0) - return s.Convert(&in.Tags, &out.Tags, 0) + if err := s.Convert(&in.Tags, &out.Tags, 0); err != nil { + return err + } + out.Finalizers = make([]kapiv1.FinalizerName, 0, 1) + return s.Convert(&in.Finalizers, &out.Finalizers, 0) +} + +func convert_v1_ImageStreamSpec_To_api_ImageStreamSpec(in *ImageStreamSpec, out *newer.ImageStreamSpec, s conversion.Scope) error { + out.DockerImageRepository = in.DockerImageRepository + out.Tags = make(map[string]newer.TagReference) + if err := s.Convert(&in.Tags, &out.Tags, 0); err != nil { + return err + } + out.Finalizers = make([]kapi.FinalizerName, 0, 1) + return s.Convert(&in.Finalizers, &out.Finalizers, 0) } func convert_v1_ImageStreamStatus_To_api_ImageStreamStatus(in *ImageStreamStatus, out *newer.ImageStreamStatus, s conversion.Scope) error { out.DockerImageRepository = in.DockerImageRepository + out.Phase = in.Phase + if out.Phase == "" { + out.Phase = newer.ImageStreamAvailable + } out.Tags = make(map[string]newer.TagEventList) return s.Convert(&in.Tags, &out.Tags, 0) } @@ -101,10 +158,51 @@ func convert_api_ImageStreamStatus_To_v1_ImageStreamStatus(in *newer.ImageStream } } } + out.Phase = in.Phase out.Tags = make([]NamedTagEventList, 0, 0) return s.Convert(&in.Tags, &out.Tags, 0) } +func convert_v1_ImageStream_To_api_ImageStream(in *ImageStream, out *newer.ImageStream, s conversion.Scope) error { + if err := s.DefaultConvert(in, out, conversion.IgnoreMissingFields); err != nil { + return err + } + if err := s.Convert(&in.Spec, &out.Spec, 0); err != nil { + return err + } + if err := s.Convert(&in.Status, &out.Status, 0); err != nil { + return err + } + if out.DeletionTimestamp == nil { + // make sure that finalizers contain origin + hasOriginFinalizer := false + for _, finalizer := range out.Spec.Finalizers { + if finalizer == oapi.FinalizerOrigin { + hasOriginFinalizer = true + break + } + } + if !hasOriginFinalizer { + out.Spec.Finalizers = append(out.Spec.Finalizers, oapi.FinalizerOrigin) + } + // phase need to match DeletionTimestamp + out.Status.Phase = newer.ImageStreamAvailable + } else { + out.Status.Phase = newer.ImageStreamTerminating + } + return nil +} + +func convert_api_ImageStream_To_v1_ImageStream(in *newer.ImageStream, out *ImageStream, s conversion.Scope) error { + if err := s.DefaultConvert(in, out, conversion.IgnoreMissingFields); err != nil { + return err + } + if err := s.Convert(&in.Spec, &out.Spec, 0); err != nil { + return err + } + return s.Convert(&in.Status, &out.Status, 0) +} + func convert_api_ImageStreamMapping_To_v1_ImageStreamMapping(in *newer.ImageStreamMapping, out *ImageStreamMapping, s conversion.Scope) error { return s.DefaultConvert(in, out, conversion.DestFromSource) } @@ -113,6 +211,22 @@ func convert_v1_ImageStreamMapping_To_api_ImageStreamMapping(in *ImageStreamMapp return s.DefaultConvert(in, out, conversion.SourceToDest) } +func convert_api_ImageStreamImage_To_v1_ImageStreamImage(in *newer.ImageStreamImage, out *ImageStreamImage, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.DestFromSource) +} + +func convert_v1_ImageStreamImage_To_api_ImageStreamImage(in *ImageStreamImage, out *newer.ImageStreamImage, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.SourceToDest) +} + +func convert_api_ImageStreamDeletion_To_v1_ImageStreamDeletion(in *newer.ImageStreamDeletion, out *ImageStreamDeletion, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.DestFromSource) +} + +func convert_v1_ImageStreamDeletion_To_api_ImageStreamDeletion(in *ImageStreamDeletion, out *newer.ImageStreamDeletion, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.SourceToDest) +} + func init() { err := kapi.Scheme.AddConversionFuncs( func(in *[]NamedTagEventList, out *map[string]newer.TagEventList, s conversion.Scope) error { @@ -180,12 +294,20 @@ func init() { convert_api_Image_To_v1_Image, convert_v1_Image_To_api_Image, - convert_v1_ImageStreamSpec_To_api_ImageStreamSpec, + convert_api_ImageStatus_To_v1_ImageStatus, + convert_v1_ImageStatus_To_api_ImageStatus, + convert_api_ImageStream_To_v1_ImageStream, + convert_v1_ImageStream_To_api_ImageStream, convert_api_ImageStreamSpec_To_v1_ImageStreamSpec, - convert_v1_ImageStreamStatus_To_api_ImageStreamStatus, + convert_v1_ImageStreamSpec_To_api_ImageStreamSpec, convert_api_ImageStreamStatus_To_v1_ImageStreamStatus, + convert_v1_ImageStreamStatus_To_api_ImageStreamStatus, + convert_api_ImageStreamImage_To_v1_ImageStreamImage, + convert_v1_ImageStreamImage_To_api_ImageStreamImage, convert_api_ImageStreamMapping_To_v1_ImageStreamMapping, convert_v1_ImageStreamMapping_To_api_ImageStreamMapping, + convert_api_ImageStreamDeletion_To_v1_ImageStreamDeletion, + convert_v1_ImageStreamDeletion_To_api_ImageStreamDeletion, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. @@ -197,7 +319,18 @@ func init() { switch label { case "name": return "metadata.name", value, nil - case "metadata.name", "spec.dockerImageRepository", "status.dockerImageRepository": + case "metadata.name", "spec.dockerImageRepository", "status.dockerImageRepository", "status.phase": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + err = kapi.Scheme.AddFieldLabelConversionFunc("v1", "ImageStreamImage", + func(label, value string) (string, string, error) { + switch label { + case "name": + return "metadata.name", value, nil + case "metadata.name", "imagestream.name", "image.name", "image.status.phase": return label, value, nil default: return "", "", fmt.Errorf("field label not supported: %s", label) diff --git a/pkg/image/api/v1/conversion_test.go b/pkg/image/api/v1/conversion_test.go index a909e7192774..30a997bb11e3 100644 --- a/pkg/image/api/v1/conversion_test.go +++ b/pkg/image/api/v1/conversion_test.go @@ -7,6 +7,7 @@ import ( kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + oapi "github.com/openshift/origin/pkg/api" _ "github.com/openshift/origin/pkg/api/latest" newer "github.com/openshift/origin/pkg/image/api" ) @@ -24,6 +25,10 @@ func TestRoundTripVersionedObject(t *testing.T) { DockerImageMetadata: *d, DockerImageReference: "foo/bar/baz", + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + Status: newer.ImageStatus{ + Phase: newer.ImageAvailable, + }, } data, err := kapi.Scheme.EncodeToVersion(i, "v1") diff --git a/pkg/image/api/v1/register.go b/pkg/image/api/v1/register.go index d08d8b56f416..068b21b99630 100644 --- a/pkg/image/api/v1/register.go +++ b/pkg/image/api/v1/register.go @@ -16,13 +16,19 @@ func init() { &ImageStreamMapping{}, &ImageStreamTag{}, &ImageStreamImage{}, + &ImageStreamImageList{}, + &ImageStreamDeletion{}, + &ImageStreamDeletionList{}, ) } -func (*Image) IsAnAPIObject() {} -func (*ImageList) IsAnAPIObject() {} -func (*ImageStream) IsAnAPIObject() {} -func (*ImageStreamList) IsAnAPIObject() {} -func (*ImageStreamMapping) IsAnAPIObject() {} -func (*ImageStreamTag) IsAnAPIObject() {} -func (*ImageStreamImage) IsAnAPIObject() {} +func (*Image) IsAnAPIObject() {} +func (*ImageList) IsAnAPIObject() {} +func (*ImageStream) IsAnAPIObject() {} +func (*ImageStreamList) IsAnAPIObject() {} +func (*ImageStreamMapping) IsAnAPIObject() {} +func (*ImageStreamTag) IsAnAPIObject() {} +func (*ImageStreamImage) IsAnAPIObject() {} +func (*ImageStreamImageList) IsAnAPIObject() {} +func (*ImageStreamDeletion) IsAnAPIObject() {} +func (*ImageStreamDeletionList) IsAnAPIObject() {} diff --git a/pkg/image/api/v1/types.go b/pkg/image/api/v1/types.go index a6461e78079e..f88d941e2073 100644 --- a/pkg/image/api/v1/types.go +++ b/pkg/image/api/v1/types.go @@ -15,6 +15,20 @@ type ImageList struct { Items []Image `json:"items" description:"list of image objects"` } +// ImageStatus is an information about the current status of an Image. +type ImageStatus struct { + // Phase is the current lifecycle phase of the image. + Phase string `json:"phase,omitempty" description:"current lifecycle phase of the image"` +} + +// These are the valid phases of an image. +const ( + // ImageActive means the image is available for use in the system + ImageAvailable string = "Available" + // ImagePurging means the image is going to be deleted during next run of registry's pruner + ImagePurging string = "Purging" +) + // Image is an immutable representation of a Docker image and metadata at a point in time. type Image struct { kapi.TypeMeta `json:",inline"` @@ -28,6 +42,10 @@ type Image struct { DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" description:"conveys version of the object, if empty defaults to '1.0'"` // DockerImageManifest is the raw JSON of the manifest DockerImageManifest string `json:"dockerImageManifest,omitempty" description:"raw JSON of the manifest"` + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []kapi.FinalizerName `json:"finalizers,omitempty" description:"opaque list of values that must be empty to permanently remove object from storage"` + // Status describes the current status of an Image + Status ImageStatus `json:"status,omitempty" description:"current status of an Image"` } // ImageStreamList is a list of ImageStream objects. @@ -58,6 +76,8 @@ type ImageStreamSpec struct { DockerImageRepository string `json:"dockerImageRepository,omitempty" description:"optional field if specified this stream is backed by a Docker repository on this server"` // Tags map arbitrary string values to specific image locators Tags []NamedTagReference `json:"tags,omitempty" description:"map arbitrary string values to specific image locators"` + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []kapi.FinalizerName `json:"finalizers,omitempty" description:"opaque list of values that must be empty to permanently remove object from storage"` } // NamedTagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. @@ -70,6 +90,14 @@ type NamedTagReference struct { From *kapi.ObjectReference `json:"from,omitempty" description:"a reference to an image stream tag or image stream this tag should track"` } +// These are the valid phases of an image stream. +const ( + // ImageStreamActive means the image stream is available for use in the system + ImageStreamAvailable string = "Available" + // ImageStreamTerminating means the image stream is being deleted + ImageStreamTerminating string = "Terminating" +) + // ImageStreamStatus contains information about the state of this image stream. type ImageStreamStatus struct { // DockerImageRepository represents the effective location this stream may be accessed at. @@ -78,6 +106,8 @@ type ImageStreamStatus struct { // Tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. Tags []NamedTagEventList `json:"tags,omitempty" description:"historical record of images associated with each tag, the first entry is the currently tagged image"` + // Phase is the current lifecycle phase of the image stream. + Phase string `json:"phase,omitempty" description:"phase is the current lifecycle phase of the image stream"` } // NamedTagEventList relates a tag to its image history. @@ -117,6 +147,15 @@ type ImageStreamTag struct { Image Image `json:"image" description:"the image associated with the ImageStream and tag"` } +// ImageStreamImageList is a list of image stream image objects. +type ImageStreamImageList struct { + kapi.TypeMeta `json:",inline"` + kapi.ListMeta `json:"metadata,omitempty"` + + // Items is a list of images stream images + Items []ImageStreamImage `json:"items" description:"list of image stream image objects"` +} + // ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. type ImageStreamImage struct { kapi.TypeMeta `json:",inline"` @@ -126,6 +165,22 @@ type ImageStreamImage struct { Image Image `json:"image" description:"the image associated with the ImageStream and image name"` } +// ImageStreamDeletionList is a list of image stream deletion objects. +type ImageStreamDeletionList struct { + kapi.TypeMeta `json:",inline"` + kapi.ListMeta `json:"metadata,omitempty"` + + // Items is a list of images stream images + Items []ImageStreamDeletion `json:"items" description:"list of image stream deletion objects"` +} + +// ImageStreamDeletion represents an ImageStream that have been deleted from +// etcd store and is awaiting a garbage collection in internal registry. +type ImageStreamDeletion struct { + kapi.TypeMeta `json:",inline"` + kapi.ObjectMeta `json:"metadata,omitempty"` +} + // DockerImageReference points to a Docker image. type DockerImageReference struct { Registry string diff --git a/pkg/image/api/v1beta3/conversion.go b/pkg/image/api/v1beta3/conversion.go index d39ccee7a5d6..5e3c784859e2 100644 --- a/pkg/image/api/v1beta3/conversion.go +++ b/pkg/image/api/v1beta3/conversion.go @@ -5,8 +5,10 @@ import ( "sort" kapi "k8s.io/kubernetes/pkg/api" + kapiv1beta3 "k8s.io/kubernetes/pkg/api/v1beta3" "k8s.io/kubernetes/pkg/conversion" + oapi "github.com/openshift/origin/pkg/api" newer "github.com/openshift/origin/pkg/image/api" ) @@ -30,7 +32,11 @@ func convert_api_Image_To_v1beta3_Image(in *newer.Image, out *Image, s conversio out.DockerImageMetadata.RawJSON = data out.DockerImageMetadataVersion = version - return nil + out.Finalizers = make([]kapiv1beta3.FinalizerName, 0, 1) + if err := s.Convert(&in.Finalizers, &out.Finalizers, 0); err != nil { + return err + } + return s.Convert(&in.Status, &out.Status, 0) } func convert_v1beta3_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Scope) error { @@ -60,10 +66,49 @@ func convert_v1beta3_Image_To_api_Image(in *Image, out *newer.Image, s conversio } out.DockerImageMetadataVersion = version + out.Finalizers = make([]kapi.FinalizerName, 0, 1) + if err := s.Convert(&in.Finalizers, &out.Finalizers, 0); err != nil { + return err + } + + if err := s.Convert(&in.Status, &out.Status, 0); err != nil { + return err + } + + if out.DeletionTimestamp == nil { + // make sure that finalizers contain origin + hasOriginFinalizer := false + for _, finalizer := range out.Finalizers { + if finalizer == oapi.FinalizerOrigin { + hasOriginFinalizer = true + break + } + } + if !hasOriginFinalizer { + out.Finalizers = append(out.Finalizers, oapi.FinalizerOrigin) + } + // phase need to match DeletionTimestamp + out.Status.Phase = newer.ImageAvailable + } else { + out.Status.Phase = newer.ImagePurging + } + return nil +} + +func convert_api_ImageStatus_To_v1beta3_ImageStatus(in *newer.ImageStatus, out *ImageStatus, s conversion.Scope) error { + out.Phase = in.Phase return nil } -func convert_v1beta3_ImageStreamSpec_To_api_ImageStreamSpec(in *ImageStreamSpec, out *newer.ImageStreamSpec, s conversion.Scope) error { +func convert_v1beta3_ImageStatus_To_api_ImageStatus(in *ImageStatus, out *newer.ImageStatus, s conversion.Scope) error { + out.Phase = in.Phase + if out.Phase == "" { + out.Phase = newer.ImageAvailable + } + return nil +} + +func convert_api_ImageStreamSpec_To_v1beta3_ImageStreamSpec(in *newer.ImageStreamSpec, out *ImageStreamSpec, s conversion.Scope) error { out.DockerImageRepository = in.DockerImageRepository if len(in.DockerImageRepository) > 0 { // ensure that stored image references have no tag or ID, which was possible from 1.0.0 until 1.0.7 @@ -74,14 +119,22 @@ func convert_v1beta3_ImageStreamSpec_To_api_ImageStreamSpec(in *ImageStreamSpec, } } } - out.Tags = make(map[string]newer.TagReference) - return s.Convert(&in.Tags, &out.Tags, 0) + out.Tags = make([]NamedTagReference, 0, 0) + if err := s.Convert(&in.Tags, &out.Tags, 0); err != nil { + return err + } + out.Finalizers = make([]kapiv1beta3.FinalizerName, 0, 1) + return s.Convert(&in.Finalizers, &out.Finalizers, 0) } -func convert_api_ImageStreamSpec_To_v1beta3_ImageStreamSpec(in *newer.ImageStreamSpec, out *ImageStreamSpec, s conversion.Scope) error { +func convert_v1beta3_ImageStreamSpec_To_api_ImageStreamSpec(in *ImageStreamSpec, out *newer.ImageStreamSpec, s conversion.Scope) error { out.DockerImageRepository = in.DockerImageRepository - out.Tags = make([]NamedTagReference, 0, 0) - return s.Convert(&in.Tags, &out.Tags, 0) + out.Tags = make(map[string]newer.TagReference) + if err := s.Convert(&in.Tags, &out.Tags, 0); err != nil { + return err + } + out.Finalizers = make([]kapi.FinalizerName, 0, 1) + return s.Convert(&in.Finalizers, &out.Finalizers, 0) } func convert_v1beta3_ImageStreamStatus_To_api_ImageStreamStatus(in *ImageStreamStatus, out *newer.ImageStreamStatus, s conversion.Scope) error { @@ -95,12 +148,14 @@ func convert_v1beta3_ImageStreamStatus_To_api_ImageStreamStatus(in *ImageStreamS } } } + out.Phase = in.Phase out.Tags = make(map[string]newer.TagEventList) return s.Convert(&in.Tags, &out.Tags, 0) } func convert_api_ImageStreamStatus_To_v1beta3_ImageStreamStatus(in *newer.ImageStreamStatus, out *ImageStreamStatus, s conversion.Scope) error { out.DockerImageRepository = in.DockerImageRepository + out.Phase = in.Phase out.Tags = make([]NamedTagEventList, 0, 0) return s.Convert(&in.Tags, &out.Tags, 0) } @@ -130,7 +185,28 @@ func convert_v1beta3_ImageStream_To_api_ImageStream(in *ImageStream, out *newer. if err := s.Convert(&in.Spec, &out.Spec, 0); err != nil { return err } - return s.Convert(&in.Status, &out.Status, 0) + if err := s.Convert(&in.Status, &out.Status, 0); err != nil { + return err + } + + if out.DeletionTimestamp == nil { + // make sure that finalizers contain origin + hasOriginFinalizer := false + for _, finalizer := range out.Spec.Finalizers { + if finalizer == oapi.FinalizerOrigin { + hasOriginFinalizer = true + break + } + } + if !hasOriginFinalizer { + out.Spec.Finalizers = append(out.Spec.Finalizers, oapi.FinalizerOrigin) + } + // phase need to match DeletionTimestamp + out.Status.Phase = newer.ImageStreamAvailable + } else { + out.Status.Phase = newer.ImageStreamTerminating + } + return nil } func convert_api_ImageStreamImage_To_v1beta3_ImageStreamImage(in *newer.ImageStreamImage, out *ImageStreamImage, s conversion.Scope) error { @@ -198,6 +274,15 @@ func convert_v1beta3_ImageStreamTag_To_api_ImageStreamTag(in *ImageStreamTag, ou return nil } + +func convert_api_ImageStreamDeletion_To_v1beta3_ImageStreamDeletion(in *newer.ImageStreamDeletion, out *ImageStreamDeletion, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.DestFromSource) +} + +func convert_v1beta3_ImageStreamDeletion_To_api_ImageStreamDeletion(in *ImageStreamDeletion, out *newer.ImageStreamDeletion, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.SourceToDest) +} + func init() { err := kapi.Scheme.AddConversionFuncs( func(in *[]NamedTagEventList, out *map[string]newer.TagEventList, s conversion.Scope) error { @@ -265,6 +350,8 @@ func init() { convert_api_Image_To_v1beta3_Image, convert_v1beta3_Image_To_api_Image, + convert_api_ImageStatus_To_v1beta3_ImageStatus, + convert_v1beta3_ImageStatus_To_api_ImageStatus, convert_v1beta3_ImageStreamSpec_To_api_ImageStreamSpec, convert_api_ImageStreamSpec_To_v1beta3_ImageStreamSpec, convert_v1beta3_ImageStreamStatus_To_api_ImageStreamStatus, @@ -277,6 +364,8 @@ func init() { convert_v1beta3_ImageStreamImage_To_api_ImageStreamImage, convert_api_ImageStreamTag_To_v1beta3_ImageStreamTag, convert_v1beta3_ImageStreamTag_To_api_ImageStreamTag, + convert_api_ImageStreamDeletion_To_v1beta3_ImageStreamDeletion, + convert_v1beta3_ImageStreamDeletion_To_api_ImageStreamDeletion, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. diff --git a/pkg/image/api/v1beta3/conversion_test.go b/pkg/image/api/v1beta3/conversion_test.go index 9161215c517f..dc3daccddd9a 100644 --- a/pkg/image/api/v1beta3/conversion_test.go +++ b/pkg/image/api/v1beta3/conversion_test.go @@ -7,6 +7,7 @@ import ( kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + oapi "github.com/openshift/origin/pkg/api" _ "github.com/openshift/origin/pkg/api/latest" newer "github.com/openshift/origin/pkg/image/api" ) @@ -24,6 +25,10 @@ func TestRoundTripVersionedObject(t *testing.T) { DockerImageMetadata: *d, DockerImageReference: "foo/bar/baz", + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + Status: newer.ImageStatus{ + Phase: newer.ImageAvailable, + }, } data, err := kapi.Scheme.EncodeToVersion(i, "v1beta3") diff --git a/pkg/image/api/v1beta3/register.go b/pkg/image/api/v1beta3/register.go index 453bb8e2bdab..c65930376f88 100644 --- a/pkg/image/api/v1beta3/register.go +++ b/pkg/image/api/v1beta3/register.go @@ -16,12 +16,18 @@ func init() { &ImageStreamMapping{}, &ImageStreamTag{}, &ImageStreamImage{}, + &ImageStreamImageList{}, + &ImageStreamDeletion{}, + &ImageStreamDeletionList{}, ) } -func (*Image) IsAnAPIObject() {} -func (*ImageList) IsAnAPIObject() {} -func (*ImageStream) IsAnAPIObject() {} -func (*ImageStreamList) IsAnAPIObject() {} -func (*ImageStreamMapping) IsAnAPIObject() {} -func (*ImageStreamTag) IsAnAPIObject() {} +func (*Image) IsAnAPIObject() {} +func (*ImageList) IsAnAPIObject() {} +func (*ImageStream) IsAnAPIObject() {} +func (*ImageStreamList) IsAnAPIObject() {} +func (*ImageStreamImageList) IsAnAPIObject() {} +func (*ImageStreamMapping) IsAnAPIObject() {} +func (*ImageStreamTag) IsAnAPIObject() {} +func (*ImageStreamDeletion) IsAnAPIObject() {} +func (*ImageStreamDeletionList) IsAnAPIObject() {} diff --git a/pkg/image/api/v1beta3/types.go b/pkg/image/api/v1beta3/types.go index 6ea98458aa49..d597f23cf6bb 100644 --- a/pkg/image/api/v1beta3/types.go +++ b/pkg/image/api/v1beta3/types.go @@ -14,6 +14,12 @@ type ImageList struct { Items []Image `json:"items"` } +// ImageStatus is an information about the current status of an Image. +type ImageStatus struct { + // Phase is the current lifecycle phase of the image. + Phase string `json:"phase,omitempty" description:"current lifecycle phase of the image"` +} + // Image is an immutable representation of a Docker image and metadata at a point in time. type Image struct { kapi.TypeMeta `json:",inline"` @@ -27,6 +33,10 @@ type Image struct { DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` // The raw JSON of the manifest DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []kapi.FinalizerName `json:"finalizers,omitempty" description:"opaque list of values that must be empty to permanently remove object from storage"` + // Status describes the current status of an Image + Status ImageStatus `json:"status,omitempty" description:"current status of an Image"` } // ImageStreamList is a list of ImageStream objects. @@ -56,6 +66,8 @@ type ImageStreamSpec struct { DockerImageRepository string `json:"dockerImageRepository,omitempty"` // Tags map arbitrary string values to specific image locators Tags []NamedTagReference `json:"tags,omitempty"` + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []kapi.FinalizerName `json:"finalizers,omitempty"` } // NamedTagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. @@ -65,6 +77,14 @@ type NamedTagReference struct { From *kapi.ObjectReference `json:"from,omitempty"` } +// These are the valid phases of an image stream. +const ( + // ImageStreamActive means the image stream is available for use in the system + ImageStreamAvailable string = "Available" + // ImageStreamTerminating means the image stream is being deleted + ImageStreamTerminating string = "Terminating" +) + // ImageStreamStatus contains information about the state of this image stream. type ImageStreamStatus struct { // Represents the effective location this stream may be accessed at. May be empty until the server @@ -73,6 +93,8 @@ type ImageStreamStatus struct { // A historical record of images associated with each tag. The first entry in the TagEvent array is // the currently tagged image. Tags []NamedTagEventList `json:"tags,omitempty"` + // Phase is the current lifecycle phase of the image stream. + Phase string `json:"phase" description:"phase is the current lifecycle phase of the image stream"` } // NamedTagEventList relates a tag to its image history. @@ -109,12 +131,37 @@ type ImageStreamTag struct { ImageName string `json:"imageName"` } +// ImageStreamImageList is a list of image stream image objects. +type ImageStreamImageList struct { + kapi.TypeMeta `json:",inline"` + kapi.ListMeta `json:"metadata,omitempty"` + + // Items is a list of images stream images + Items []ImageStreamImage `json:"items" description:"list of image stream image objects"` +} + // ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. type ImageStreamImage struct { Image `json:",inline"` ImageName string `json:"imageName"` } +// ImageStreamDeletionList is a list of image stream deletion objects. +type ImageStreamDeletionList struct { + kapi.TypeMeta `json:",inline"` + kapi.ListMeta `json:"metadata,omitempty"` + + // Items is a list of images stream images + Items []ImageStreamDeletion `json:"items" description:"list of image stream deletion objects"` +} + +// ImageStreamDeletion represents an ImageStream that have been deleted from +// etcd store and is awaiting a garbage collection in internal registry. +type ImageStreamDeletion struct { + kapi.TypeMeta `json:",inline"` + kapi.ObjectMeta `json:"metadata,omitempty"` +} + // DockerImageReference points to a Docker image. type DockerImageReference struct { Registry string diff --git a/pkg/image/api/validation/validation.go b/pkg/image/api/validation/validation.go index 8b326496a905..9a025bda01b5 100644 --- a/pkg/image/api/validation/validation.go +++ b/pkg/image/api/validation/validation.go @@ -2,23 +2,25 @@ package validation import ( "fmt" + "strings" "github.com/docker/distribution/registry/api/v2" + kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/fielderrors" oapi "github.com/openshift/origin/pkg/api" "github.com/openshift/origin/pkg/image/api" ) +var qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, util.QualifiedNameMaxLength, util.QualifiedNameFmt, util.DNS1123SubdomainMaxLength, util.DNS1123SubdomainFmt) + func ValidateImageStreamName(name string, prefix bool) (bool, string) { if ok, reason := oapi.MinimalNameRequirements(name, prefix); !ok { return ok, reason } - if len(name) < v2.RepositoryNameComponentMinLength { - return false, fmt.Sprintf("must be at least %d characters long", v2.RepositoryNameComponentMinLength) - } if !v2.RepositoryNameComponentAnchoredRegexp.MatchString(name) { return false, fmt.Sprintf("must match %q", v2.RepositoryNameComponentRegexp.String()) } @@ -38,10 +40,18 @@ func ValidateImage(image *api.Image) fielderrors.ValidationErrorList { result = append(result, fielderrors.NewFieldInvalid("dockerImageReference", image.DockerImageReference, err.Error())) } } + if image.Status.Phase != api.ImageAvailable && image.Status.Phase != api.ImagePurging { + result = append(result, fielderrors.NewFieldInvalid("status.phase", image.Status.Phase, fmt.Sprintf("phase is not one of valid phases (%s, %s)", api.ImageAvailable, api.ImagePurging))) + } + if image.DeletionTimestamp == nil && image.Status.Phase == api.ImagePurging { + result = append(result, fielderrors.NewFieldInvalid("status.phase", image.Status.Phase, fmt.Sprintf("%s phase is valid only when DeletionTimestamp is set", api.ImagePurging))) + + } return result } +// ValidateImageUpdate validates an update of image func ValidateImageUpdate(newImage, oldImage *api.Image) fielderrors.ValidationErrorList { result := fielderrors.ValidationErrorList{} @@ -51,15 +61,48 @@ func ValidateImageUpdate(newImage, oldImage *api.Image) fielderrors.ValidationEr return result } +// ValidateImageStatusUpdate tests required fields for an Image status update. +func ValidateImageStatusUpdate(newImage, oldImage *api.Image) fielderrors.ValidationErrorList { + result := fielderrors.ValidationErrorList{} + result = append(result, validation.ValidateObjectMetaUpdate(&newImage.ObjectMeta, &oldImage.ObjectMeta).Prefix("metadata")...) + if newImage.Status.Phase != api.ImageAvailable && newImage.Status.Phase != api.ImagePurging { + result = append(result, fielderrors.NewFieldInvalid("status.phase", newImage.Status.Phase, fmt.Sprintf("phase is not one of valid phases (%s, %s)", api.ImageAvailable, api.ImagePurging))) + } + if newImage.DeletionTimestamp == nil && newImage.Status.Phase == api.ImagePurging { + result = append(result, fielderrors.NewFieldInvalid("status.phase", newImage.Status.Phase, fmt.Sprintf("%s phase is valid only when DeletionTimestamp is set", api.ImagePurging))) + + } + + newImage.DockerImageReference = oldImage.DockerImageReference + newImage.DockerImageMetadata = oldImage.DockerImageMetadata + newImage.DockerImageMetadataVersion = oldImage.DockerImageMetadataVersion + newImage.DockerImageManifest = oldImage.DockerImageManifest + newImage.Finalizers = oldImage.Finalizers + return result +} + +// ValidateImageFinalizeUpdate tests to see if the update is legal for an end user to make. +// newImage is updated with fields that cannot be changed. +func ValidateImageFinalizeUpdate(newImage, oldImage *api.Image) fielderrors.ValidationErrorList { + allErrs := fielderrors.ValidationErrorList{} + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&newImage.ObjectMeta, &oldImage.ObjectMeta).Prefix("metadata")...) + for i := range newImage.Finalizers { + allErrs = append(allErrs, validateFinalizerName(string(newImage.Finalizers[i]))...) + } + newImage.DockerImageReference = oldImage.DockerImageReference + newImage.DockerImageMetadata = oldImage.DockerImageMetadata + newImage.DockerImageMetadataVersion = oldImage.DockerImageMetadataVersion + newImage.DockerImageManifest = oldImage.DockerImageManifest + newImage.Status = oldImage.Status + return allErrs +} + // ValidateImageStream tests required fields for an ImageStream. func ValidateImageStream(stream *api.ImageStream) fielderrors.ValidationErrorList { result := fielderrors.ValidationErrorList{} result = append(result, validation.ValidateObjectMeta(&stream.ObjectMeta, true, ValidateImageStreamName).Prefix("metadata")...) // Ensure we can generate a valid docker image repository from namespace/name - if len(stream.Namespace) > 0 && len(stream.Namespace) < v2.RepositoryNameComponentMinLength { - result = append(result, fielderrors.NewFieldInvalid("metadata.namespace", stream.Namespace, fmt.Sprintf("must be at least %d characters long", v2.RepositoryNameComponentMinLength))) - } if len(stream.Namespace+"/"+stream.Name) > v2.RepositoryNameTotalLengthMax { result = append(result, fielderrors.NewFieldInvalid("metadata.name", stream.Name, fmt.Sprintf("'namespace/name' cannot be longer than %d characters", v2.RepositoryNameTotalLengthMax))) } @@ -97,6 +140,14 @@ func ValidateImageStream(stream *api.ImageStream) fielderrors.ValidationErrorLis } } + if stream.Status.Phase != api.ImageStreamAvailable && stream.Status.Phase != api.ImageStreamTerminating { + result = append(result, fielderrors.NewFieldInvalid("status.phase", stream.Status.Phase, fmt.Sprintf("phase is not one of valid phases (%s, %s)", api.ImageStreamAvailable, api.ImageStreamTerminating))) + } + if stream.DeletionTimestamp == nil && stream.Status.Phase == api.ImageStreamTerminating { + result = append(result, fielderrors.NewFieldInvalid("status.phase", stream.Status.Phase, fmt.Sprintf("%s phase is valid only when DeletionTimestamp is set", api.ImageStreamTerminating))) + + } + return result } @@ -105,6 +156,8 @@ func ValidateImageStreamUpdate(newStream, oldStream *api.ImageStream) fielderror result = append(result, validation.ValidateObjectMetaUpdate(&newStream.ObjectMeta, &oldStream.ObjectMeta).Prefix("metadata")...) result = append(result, ValidateImageStream(newStream)...) + newStream.Status.Phase = oldStream.Status.Phase + newStream.Spec.Finalizers = oldStream.Spec.Finalizers return result } @@ -113,8 +166,29 @@ func ValidateImageStreamUpdate(newStream, oldStream *api.ImageStream) fielderror func ValidateImageStreamStatusUpdate(newStream, oldStream *api.ImageStream) fielderrors.ValidationErrorList { result := fielderrors.ValidationErrorList{} result = append(result, validation.ValidateObjectMetaUpdate(&newStream.ObjectMeta, &oldStream.ObjectMeta).Prefix("metadata")...) + if newStream.Status.Phase != api.ImageStreamAvailable && newStream.Status.Phase != api.ImageStreamTerminating { + result = append(result, fielderrors.NewFieldInvalid("status.phase", newStream.Status.Phase, fmt.Sprintf("phase is not one of valid phases (%s, %s)", api.ImageStreamAvailable, api.ImageStreamTerminating))) + } + if newStream.DeletionTimestamp == nil && newStream.Status.Phase == api.ImageStreamTerminating { + result = append(result, fielderrors.NewFieldInvalid("status.phase", newStream.Status.Phase, fmt.Sprintf("%s phase is valid only when DeletionTimestamp is set", api.ImageStreamTerminating))) + + } + newStream.Spec.Tags = oldStream.Spec.Tags + newStream.Spec.DockerImageRepository = oldStream.Spec.DockerImageRepository + newStream.Spec.Finalizers = oldStream.Spec.Finalizers + return result +} + +// ValidateImageStreamFinalizeUpdate tests whether stream's finalizers contain proper values. +func ValidateImageStreamFinalizeUpdate(newStream, oldStream *api.ImageStream) fielderrors.ValidationErrorList { + result := fielderrors.ValidationErrorList{} + result = append(result, validation.ValidateObjectMetaUpdate(&newStream.ObjectMeta, &oldStream.ObjectMeta).Prefix("metadata")...) + for i := range newStream.Spec.Finalizers { + result = append(result, validateFinalizerName(string(newStream.Spec.Finalizers[i]))...) + } newStream.Spec.Tags = oldStream.Spec.Tags newStream.Spec.DockerImageRepository = oldStream.Spec.DockerImageRepository + newStream.Status = oldStream.Status return result } @@ -147,3 +221,39 @@ func ValidateImageStreamMapping(mapping *api.ImageStreamMapping) fielderrors.Val } return result } + +// ValidateImageStreamDeletion tests required fields for an ImageStreamDeletion. +func ValidateImageStreamDeletion(isd *api.ImageStreamDeletion) fielderrors.ValidationErrorList { + result := fielderrors.ValidationErrorList{} + result = append(result, validation.ValidateObjectMeta(&isd.ObjectMeta, false, oapi.MinimalNameRequirements).Prefix("metadata")...) + if isd.Name != "" { + nameParts := strings.SplitN(isd.Name, ":", 2) + if len(nameParts) != 2 { + result = append(result, fielderrors.NewFieldInvalid("name", isd.Name, "must have following format: namespace:name")) + } else { + if ok, msg := validation.ValidateNamespaceName(nameParts[0], false); !ok { + result = append(result, fielderrors.NewFieldInvalid("name", isd.Name, fmt.Sprintf("part before ':' is not a valid namespace: %v", msg))) + } + if len(isd.Namespace+"/"+isd.Name) > v2.RepositoryNameTotalLengthMax { + result = append(result, fielderrors.NewFieldInvalid("metadata.name", isd.Name, fmt.Sprintf("'namespace/name' cannot be longer than %d characters", v2.RepositoryNameTotalLengthMax))) + } + } + } + return result +} + +// validateFinalizerName checks whether finalizer name is valid +func validateFinalizerName(stringValue string) fielderrors.ValidationErrorList { + allErrs := fielderrors.ValidationErrorList{} + if !util.IsQualifiedName(stringValue) { + return append(allErrs, fielderrors.NewFieldInvalid("finalizers", stringValue, qualifiedNameErrorMsg)) + } + + if strings.Index(stringValue, "/") < 0 && !kapi.IsStandardFinalizerName(stringValue) { + allErrs = append(allErrs, fielderrors.NewFieldInvalid("finalizers", stringValue, fmt.Sprintf("finalizer name is neither a standard finalizer name nor is it fully qualified"))) + } else if strings.Index(stringValue, "/") >= 0 && stringValue != string(oapi.FinalizerOrigin) { + allErrs = append(allErrs, fielderrors.NewFieldInvalid("finalizers", stringValue, fmt.Sprintf("is fully qualified and doesn't match %s", oapi.FinalizerOrigin))) + } + + return allErrs +} diff --git a/pkg/image/api/validation/validation_test.go b/pkg/image/api/validation/validation_test.go index aefe53770f88..9509abec548c 100644 --- a/pkg/image/api/validation/validation_test.go +++ b/pkg/image/api/validation/validation_test.go @@ -15,6 +15,9 @@ func TestValidateImageOK(t *testing.T) { errs := ValidateImage(&api.Image{ ObjectMeta: kapi.ObjectMeta{Name: "foo"}, DockerImageReference: "openshift/ruby-19-centos", + Status: api.ImageStatus{ + Phase: api.ImageAvailable, + }, }) if len(errs) > 0 { t.Errorf("Unexpected non-empty error list: %#v", errs) @@ -186,23 +189,28 @@ func TestValidateImageStream(t *testing.T) { missingNameErr := fielderrors.NewFieldRequired("metadata.name") missingNameErr.Detail = "name or generateName is required" + now := util.Now() tests := map[string]struct { namespace string name string + deletionTimestamp *util.Time dockerImageRepository string specTags map[string]api.TagReference statusTags map[string]api.TagEventList + phase string expected fielderrors.ValidationErrorList }{ "missing name": { namespace: "foo", name: "", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{missingNameErr}, }, "no slash in Name": { namespace: "foo", name: "foo/bar", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("metadata.name", "foo/bar", `name may not contain "/"`), }, @@ -210,6 +218,7 @@ func TestValidateImageStream(t *testing.T) { "no percent in Name": { namespace: "foo", name: "foo%%bar", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("metadata.name", "foo%%bar", `name may not contain "%"`), }, @@ -217,6 +226,7 @@ func TestValidateImageStream(t *testing.T) { "other invalid name": { namespace: "foo", name: "foo bar", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("metadata.name", "foo bar", `must match "[a-z0-9]+(?:[._-][a-z0-9]+)*"`), }, @@ -224,6 +234,7 @@ func TestValidateImageStream(t *testing.T) { "missing namespace": { namespace: "", name: "foo", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldRequired("metadata.namespace"), }, @@ -231,21 +242,16 @@ func TestValidateImageStream(t *testing.T) { "invalid namespace": { namespace: "!$", name: "foo", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("metadata.namespace", "!$", `must be a DNS label (at most 63 characters, matching regex [a-z0-9]([-a-z0-9]*[a-z0-9])?): e.g. "my-name"`), }, }, - "short namespace": { - namespace: "f", - name: "foo", - expected: fielderrors.ValidationErrorList{ - fielderrors.NewFieldInvalid("metadata.namespace", "f", `must be at least 2 characters long`), - }, - }, "invalid dockerImageRepository": { namespace: "namespace", name: "foo", dockerImageRepository: "a-|///bbb", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("spec.dockerImageRepository", "a-|///bbb", "the docker pull spec \"a-|///bbb\" must be two or three segments separated by slashes"), }, @@ -254,21 +260,25 @@ func TestValidateImageStream(t *testing.T) { namespace: "namespace", name: "foo", dockerImageRepository: "a/b:tag", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("spec.dockerImageRepository", "a/b:tag", "the repository name may not contain a tag"), }, }, "invalid dockerImageRepository with ID": { - namespace: "namespace", - name: "foo", + namespace: "namespace", + name: "foo", + deletionTimestamp: &now, dockerImageRepository: "a/b@sha256:something", + phase: api.ImageStreamTerminating, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("spec.dockerImageRepository", "a/b@sha256:something", "the repository name may not contain an ID"), }, }, "status tag missing dockerImageReference": { - namespace: "namespace", - name: "foo", + namespace: "namespace", + name: "foo", + deletionTimestamp: &now, statusTags: map[string]api.TagEventList{ "tag": { Items: []api.TagEvent{ @@ -278,11 +288,28 @@ func TestValidateImageStream(t *testing.T) { }, }, }, + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldRequired("status.tags[tag].Items[0].dockerImageReference"), fielderrors.NewFieldRequired("status.tags[tag].Items[2].dockerImageReference"), }, }, + "invalid phase": { + namespace: "namespace", + name: "foo", + phase: "InvalidPhase", + expected: fielderrors.ValidationErrorList{ + fielderrors.NewFieldInvalid("status.phase", "InvalidPhase", "phase is not one of valid phases (Available, Terminating)"), + }, + }, + "phase doesn't match deletion timestamp": { + namespace: "namespace", + name: "foo", + phase: api.ImageStreamTerminating, + expected: fielderrors.ValidationErrorList{ + fielderrors.NewFieldInvalid("status.phase", api.ImageStreamTerminating, "Terminating phase is valid only when DeletionTimestamp is set"), + }, + }, "valid": { namespace: "namespace", name: "foo", @@ -307,21 +334,25 @@ func TestValidateImageStream(t *testing.T) { }, }, }, + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{}, }, "all possible characters used": { namespace: "abcdefghijklmnopqrstuvwxyz-1234567890", name: "abcdefghijklmnopqrstuvwxyz-1234567890.dot_underscore-dash", + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{}, }, "max name and namespace length met": { namespace: namespace63Char, name: name191Char, + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{}, }, "max name and namespace length exceeded": { namespace: namespace63Char, name: name192Char, + phase: api.ImageStreamAvailable, expected: fielderrors.ValidationErrorList{ fielderrors.NewFieldInvalid("metadata.name", name192Char, "'namespace/name' cannot be longer than 255 characters"), }, @@ -331,15 +362,17 @@ func TestValidateImageStream(t *testing.T) { for name, test := range tests { stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ - Namespace: test.namespace, - Name: test.name, + Namespace: test.namespace, + Name: test.name, + DeletionTimestamp: test.deletionTimestamp, }, Spec: api.ImageStreamSpec{ DockerImageRepository: test.dockerImageRepository, Tags: test.specTags, }, Status: api.ImageStreamStatus{ - Tags: test.statusTags, + Tags: test.statusTags, + Phase: test.phase, }, } diff --git a/pkg/image/controller/controller.go b/pkg/image/controller/controller.go index af3af73bf3cc..58b8eec81bb2 100644 --- a/pkg/image/controller/controller.go +++ b/pkg/image/controller/controller.go @@ -2,20 +2,45 @@ package controller import ( "fmt" + "strings" "time" + "github.com/golang/glog" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "github.com/openshift/origin/pkg/client" "github.com/openshift/origin/pkg/dockerregistry" "github.com/openshift/origin/pkg/image/api" + imageutil "github.com/openshift/origin/pkg/image/util" ) -type ImportController struct { - streams client.ImageStreamsNamespacer - mappings client.ImageStreamMappingsNamespacer +var ( + availableSelector fields.Selector + availableStreamSelector fields.Selector +) + +func init() { + var err error + availableSelector, err = fields.ParseSelector("image.status.phase!=" + api.ImagePurging) + if err != nil { + glog.Fatal(err.Error()) + } + availableStreamSelector, err = fields.ParseSelector("status.phase==" + api.ImageStreamAvailable) + if err != nil { + glog.Fatal(err.Error()) + } +} + +type ImageStreamController struct { + streams client.ImageStreamsNamespacer + streamDeletions client.ImageStreamDeletionsInterfacer + streamImages client.ImageStreamImagesNamespacer + images client.ImagesInterfacer + mappings client.ImageStreamMappingsNamespacer // injected for testing client dockerregistry.Client } @@ -39,7 +64,12 @@ const retryCount = 2 // the image stream is not modified (so it will be tried again later). If a permanent // failure occurs the image is marked with an annotation. The tags of the original spec image // are left as is (those are updated through status). -func (c *ImportController) Next(stream *api.ImageStream) error { +func (c *ImageStreamController) Next(stream *api.ImageStream) error { + + if stream.Status.Phase == api.ImageStreamTerminating { + return c.terminateImageStream(stream) + } + if !needsImport(stream) { return nil } @@ -156,7 +186,7 @@ func (c *ImportController) Next(stream *api.ImageStream) error { } // done marks the stream as being processed due to an error or failure condition -func (c *ImportController) done(stream *api.ImageStream, reason string, retry int) error { +func (c *ImageStreamController) done(stream *api.ImageStream, reason string, retry int) error { if len(reason) == 0 { reason = util.Now().UTC().Format(time.RFC3339) } @@ -183,3 +213,117 @@ func hasTag(tags []string, tag string) bool { } return false } + +// terminateImageStream handles terminating image stream. It does following: +// +// 1. marks for deletion all its images that aren't referred by any other +// available image streams +// 2. creates an instance of ImageStreamDeletion +// 3. removes origin finalizer from its finalizers +// 4. deletes it if there are no other finalizers left +func (c *ImageStreamController) terminateImageStream(stream *api.ImageStream) (err error) { + glog.V(4).Infof("Handling termination of image stream %s/%s (%s)", stream.Namespace, stream.Name, stream.Status.Phase) + // if stream is not terminating, ignore it + if stream.Status.Phase != api.ImageStreamTerminating { + return nil + } + + if !imageutil.ImageStreamFinalized(stream) { + // finalize image stream + if err := c.markOrphanedImagesForDeletion(stream); err != nil { + return err + } + + glog.V(4).Infof("Creating image stream deletion for stream %s/%s", stream.Namespace, stream.Name) + isd, err := api.NewDeletionForImageStream(stream) + if err != nil { + return err + } + _, err = c.streamDeletions.ImageStreamDeletions().Create(isd) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + glog.V(4).Infof("Finalizing image stream %s/%s", stream.Namespace, stream.Name) + stream, err = imageutil.FinalizeImageStream(c.streams, stream) + if err != nil { + return err + } + } + + if len(stream.Spec.Finalizers) == 0 { + glog.V(4).Infof("Deleting image stream %s/%s", stream.Namespace, stream.Name) + return c.streams.ImageStreams(stream.Namespace).Delete(stream.Name) + } + + return nil +} + +// markOrphanedImagesForDeletion marks for deletion each image if given image +// stream if it meets following conditions +// +// 1. it's an internally managed image (lives in internal registry) +// 2. it's available (not marked for deletion already) +// 3. it's referenced only by terminating image streams +func (c *ImageStreamController) markOrphanedImagesForDeletion(stream *api.ImageStream) (err error) { + glog.V(4).Infof("Marking images of image stream %s/%s for deletion", stream.Namespace, stream.Name) + + candidates := make(map[string]*api.Image) + images, err := c.streamImages.ImageStreamImages(stream.Namespace).List(labels.Everything(), availableSelector) + if err != nil { + return fmt.Errorf("Failed to list image stream images: %v", err) + } + for _, isi := range images.Items { + nameParts := strings.Split(isi.Name, "@") + imageStreamName := nameParts[0] + + if isi.Image.Annotations == nil || isi.Image.Annotations[api.ManagedByOpenShiftAnnotation] != "true" { + // skip images not managed internally + continue + } + if isi.Image.Status.Phase == api.ImagePurging { + // shouldn't happen + glog.V(4).Infof("Received image %q belonging to stream %s/%s which is already being purged", isi.Image.Name, stream.Namespace, imageStreamName) + continue + } + + if imageStreamName == stream.Name { + candidates[isi.Image.Name] = &isi.Image + } + } + + if len(candidates) == 0 { + return nil + } + + // remove all candidates referred by available image streams + availableStreams, err := c.streams.ImageStreams("").List(labels.Everything(), availableStreamSelector) + if err != nil { + return fmt.Errorf("Failed to list available image streams: %v", err) + } + + for _, stream := range availableStreams.Items { + if stream.Status.Phase == api.ImageStreamTerminating { + // shouldn't happen + glog.V(4).Infof("Skipping available image stream %s/%s", stream.Namespace, stream.Name) + continue + } + images, err := c.streamImages.ImageStreamImages(stream.Namespace).List(labels.Everything(), availableSelector) + if err != nil { + return fmt.Errorf("Failed to list images of image stream %s/%s: %v", stream.Namespace, stream.Name, err) + } + + for _, isi := range images.Items { + delete(candidates, isi.Image.Name) + } + } + + for _, img := range candidates { + glog.V(4).Infof("Marking image %q for deletion", img.Name) + if err := c.images.Images().Delete(img.Name); err != nil { + return fmt.Errorf("Failed to delete image %s: %v", img.Name, err) + } + } + + return nil +} diff --git a/pkg/image/controller/controller_test.go b/pkg/image/controller/controller_test.go index 59bcdbef9aaa..38eb170f6564 100644 --- a/pkg/image/controller/controller_test.go +++ b/pkg/image/controller/controller_test.go @@ -68,7 +68,7 @@ func (f *fakeDockerRegistryClient) ImageByID(namespace, name, id string) (*docke func TestControllerNoDockerRepo(t *testing.T) { cli, fake := &fakeDockerRegistryClient{}, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ @@ -87,7 +87,7 @@ func TestControllerNoDockerRepo(t *testing.T) { func TestControllerRepoHandled(t *testing.T) { cli, fake := &fakeDockerRegistryClient{}, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ @@ -111,7 +111,7 @@ func TestControllerRepoHandled(t *testing.T) { func TestControllerTagRetrievalFails(t *testing.T) { cli, fake := &fakeDockerRegistryClient{Err: fmt.Errorf("test error")}, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test", Namespace: "other"}, @@ -132,7 +132,7 @@ func TestControllerTagRetrievalFails(t *testing.T) { func TestControllerRetrievesInsecure(t *testing.T) { cli, fake := &fakeDockerRegistryClient{Err: fmt.Errorf("test error")}, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ @@ -162,7 +162,7 @@ func TestControllerRetrievesInsecure(t *testing.T) { func TestControllerImageNotFoundError(t *testing.T) { cli, fake := &fakeDockerRegistryClient{Tags: map[string]string{api.DefaultImageTag: "not_found"}}, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test", Namespace: "other"}, Spec: api.ImageStreamSpec{ @@ -190,7 +190,7 @@ func TestControllerImageWithGenericError(t *testing.T) { }, }, }, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test", Namespace: "other"}, Spec: api.ImageStreamSpec{ @@ -223,7 +223,7 @@ func TestControllerWithImage(t *testing.T) { }, }, }, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test", Namespace: "other"}, Spec: api.ImageStreamSpec{ @@ -288,7 +288,7 @@ func TestControllerWithSpecTags(t *testing.T) { }, }, }, &client.Fake{} - c := ImportController{client: cli, streams: fake, mappings: fake} + c := ImageStreamController{client: cli, streams: fake, mappings: fake} stream := api.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "test", Namespace: "other"}, Spec: api.ImageStreamSpec{ diff --git a/pkg/image/controller/factory.go b/pkg/image/controller/factory.go index 181d1dc524e0..c28f3dc0a070 100644 --- a/pkg/image/controller/factory.go +++ b/pkg/image/controller/factory.go @@ -17,13 +17,13 @@ import ( "github.com/openshift/origin/pkg/image/api" ) -// ImportControllerFactory can create an ImportController. -type ImportControllerFactory struct { +// ImageStreamControllerFactory can create an ImageStreamController. +type ImageStreamControllerFactory struct { Client client.Interface } -// Create creates an ImportController. -func (f *ImportControllerFactory) Create() controller.RunnableController { +// Create creates an ImageStreamController. +func (f *ImageStreamControllerFactory) Create() controller.RunnableController { lw := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything()) @@ -35,9 +35,12 @@ func (f *ImportControllerFactory) Create() controller.RunnableController { q := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run() - c := &ImportController{ - streams: f.Client, - mappings: f.Client, + c := &ImageStreamController{ + streams: f.Client, + streamDeletions: f.Client, + streamImages: f.Client, + images: f.Client, + mappings: f.Client, } return &controller.RetryController{ diff --git a/pkg/image/prune/imagepruner.go b/pkg/image/prune/imagepruner.go index 6ce41984b31f..8bd831852498 100644 --- a/pkg/image/prune/imagepruner.go +++ b/pkg/image/prune/imagepruner.go @@ -1,14 +1,12 @@ package prune import ( - "encoding/json" "fmt" - "net/http" "time" - "github.com/docker/distribution/registry/api/v2" "github.com/golang/glog" gonum "github.com/gonum/graph" + "github.com/openshift/origin/pkg/api/graph" kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" buildapi "github.com/openshift/origin/pkg/build/api" @@ -36,14 +34,10 @@ const ( // an ImageNode, with weak indicating that this particular edge does // not keep an ImageNode from being a candidate for pruning. WeakReferencedImageEdgeKind = "WeakReferencedImage" - - // ReferencedImageLayerEdgeKind defines an edge from an ImageStreamNode or an - // ImageNode to an ImageLayerNode. - ReferencedImageLayerEdgeKind = "ReferencedImageLayer" ) // pruneAlgorithm contains the various settings to use when evaluating images -// and layers for pruning. +// for pruning. type pruneAlgorithm struct { keepYoungerThan time.Duration keepTagRevisions int @@ -63,29 +57,6 @@ type ImageStreamPruner interface { PruneImageStream(stream *imageapi.ImageStream, image *imageapi.Image, updatedTags []string) (*imageapi.ImageStream, error) } -// BlobPruner knows how to delete a blob from the Docker registry. -type BlobPruner interface { - // PruneBlob uses registryClient to ask the registry at registryURL to delete - // the blob. - PruneBlob(registryClient *http.Client, registryURL, blob string) error -} - -// LayerPruner knows how to delete a repository layer link from the Docker -// registry. -type LayerPruner interface { - // PruneLayer uses registryClient to ask the registry at registryURL to - // delete the repository layer link. - PruneLayer(registryClient *http.Client, registryURL, repo, layer string) error -} - -// ManifestPruner knows how to delete image manifest data for a repository from -// the Docker registry. -type ManifestPruner interface { - // PruneManifest uses registryClient to ask the registry at registryURL to - // delete the repository's image manifest data. - PruneManifest(registryClient *http.Client, registryURL, repo, manifest string) error -} - // ImageRegistryPrunerOptions contains the fields used to initialize a new // ImageRegistryPruner. type ImageRegistryPrunerOptions struct { @@ -117,79 +88,25 @@ type ImageRegistryPrunerOptions struct { // DryRun indicates that no changes will be made to the cluster and nothing // will be removed. DryRun bool - // RegistryClient is the http.Client to use when contacting the registry. - RegistryClient *http.Client - // RegistryURL is the URL for the registry. - RegistryURL string } -// ImageRegistryPruner knows how to prune images and layers. +// ImageRegistryPruner knows how to prune images. type ImageRegistryPruner interface { - // Prune uses imagePruner, streamPruner, layerPruner, blobPruner, and - // manifestPruner to remove images that have been identified as candidates - // for pruning based on the ImageRegistryPruner's internal pruning algorithm. - // Please see NewImageRegistryPruner for details on the algorithm. - Prune(imagePruner ImagePruner, streamPruner ImageStreamPruner, layerPruner LayerPruner, blobPruner BlobPruner, manifestPruner ManifestPruner) error + // Prune uses imagePruner and streamPruner to remove images that have been + // identified as candidates for pruning based on the ImageRegistryPruner's + // internal pruning algorithm. Please see NewImageRegistryPruner for details + // on the algorithm. + Prune(imagePruner ImagePruner, streamPruner ImageStreamPruner) error } // imageRegistryPruner implements ImageRegistryPruner. type imageRegistryPruner struct { - g graph.Graph - algorithm pruneAlgorithm - registryPinger registryPinger - registryClient *http.Client - registryURL string + g graph.Graph + algorithm pruneAlgorithm } var _ ImageRegistryPruner = &imageRegistryPruner{} -// registryPinger performs a health check against a registry. -type registryPinger interface { - // ping performs a health check against registry. - ping(registry string) error -} - -// defaultRegistryPinger implements registryPinger. -type defaultRegistryPinger struct { - client *http.Client -} - -func (drp *defaultRegistryPinger) ping(registry string) error { - healthzCheck := func(proto, registry string) error { - healthzResponse, err := drp.client.Get(fmt.Sprintf("%s://%s/healthz", proto, registry)) - if err != nil { - return err - } - defer healthzResponse.Body.Close() - - if healthzResponse.StatusCode != http.StatusOK { - return fmt.Errorf("unexpected status code %d", healthzResponse.StatusCode) - } - - return nil - } - - var err error - for _, proto := range []string{"https", "http"} { - glog.V(4).Infof("Trying %s for %s", proto, registry) - err = healthzCheck(proto, registry) - if err == nil { - break - } - glog.V(4).Infof("Error with %s for %s: %v", proto, registry, err) - } - - return err -} - -// dryRunRegistryPinger implements registryPinger. -type dryRunRegistryPinger struct { -} - -func (*dryRunRegistryPinger) ping(registry string) error { - return nil -} - /* NewImageRegistryPruner creates a new ImageRegistryPruner. @@ -224,9 +141,6 @@ minutes ago and is *not* currently referenced by: When removing an image, remove all references to the image from all ImageStreams having a reference to the image in `status.tags`. - -Also automatically remove any image layer that is no longer referenced by any -images. */ func NewImageRegistryPruner(options ImageRegistryPrunerOptions) ImageRegistryPruner { g := graph.New() @@ -246,26 +160,15 @@ func NewImageRegistryPruner(options ImageRegistryPrunerOptions) ImageRegistryPru addBuildsToGraph(g, options.Builds) addDeploymentConfigsToGraph(g, options.DCs) - var rp registryPinger - if options.DryRun { - rp = &dryRunRegistryPinger{} - } else { - rp = &defaultRegistryPinger{options.RegistryClient} - } - return &imageRegistryPruner{ - g: g, - algorithm: algorithm, - registryPinger: rp, - registryClient: options.RegistryClient, - registryURL: options.RegistryURL, + g: g, + algorithm: algorithm, } } // addImagesToGraph adds all images to the graph that belong to one of the // registries in the algorithm and are at least as old as the minimum age -// threshold as specified by the algorithm. It also adds all the images' layers -// to the graph. +// threshold as specified by the algorithm. func addImagesToGraph(g graph.Graph, images *imageapi.ImageList, algorithm pruneAlgorithm) { for i := range images.Items { image := &images.Items[i] @@ -280,6 +183,10 @@ func addImagesToGraph(g graph.Graph, images *imageapi.ImageList, algorithm prune glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference) continue } + if image.Status.Phase == imageapi.ImagePurging { + glog.V(4).Infof("Image %q with DockerImageReference %q already marked for deletion", image.Name, image.DockerImageReference) + continue + } age := util.Now().Sub(image.CreationTimestamp.Time) if age < algorithm.keepYoungerThan { @@ -288,19 +195,7 @@ func addImagesToGraph(g graph.Graph, images *imageapi.ImageList, algorithm prune } glog.V(4).Infof("Adding image %q to graph", image.Name) - imageNode := imagegraph.EnsureImageNode(g, image) - - manifest := imageapi.DockerImageManifest{} - if err := json.Unmarshal([]byte(image.DockerImageManifest), &manifest); err != nil { - util.HandleError(fmt.Errorf("unable to extract manifest from image: %v. This image's layers won't be pruned if the image is pruned now.", err)) - continue - } - - for _, layer := range manifest.FSLayers { - glog.V(4).Infof("Adding image layer %q to graph", layer.DockerBlobSum) - layerNode := imagegraph.EnsureImageLayerNode(g, layer.DockerBlobSum) - g.AddEdge(imageNode, layerNode, ReferencedImageLayerEdgeKind) - } + imagegraph.EnsureImageNode(g, image) } } @@ -310,9 +205,6 @@ func addImagesToGraph(g graph.Graph, images *imageapi.ImageList, algorithm prune // for pruning. if the image stream's age is at least as old as the minimum // threshold in algorithm. Otherwise, if the image stream is younger than the // threshold, all image revisions for that stream are ineligible for pruning. -// -// addImageStreamsToGraph also adds references from each stream to all the -// layers it references (via each image a stream references). func addImageStreamsToGraph(g graph.Graph, streams *imageapi.ImageStreamList, algorithm pruneAlgorithm) { for i := range streams.Items { stream := &streams.Items[i] @@ -358,16 +250,6 @@ func addImageStreamsToGraph(g graph.Graph, streams *imageapi.ImageStreamList, al glog.V(4).Infof("Adding edge (kind=%d) from %q to %q", kind, imageStreamNode.UniqueName.UniqueName(), imageNode.UniqueName.UniqueName()) g.AddEdge(imageStreamNode, imageNode, kind) - - glog.V(4).Infof("Adding stream->layer references") - // add stream -> layer references so we can prune them later - for _, s := range g.From(imageNode) { - if g.Kind(s) != imagegraph.ImageLayerNodeKind { - continue - } - glog.V(4).Infof("Adding reference from stream %q to layer %q", stream.Name, s.(*imagegraph.ImageLayerNode).Layer) - g.AddEdge(imageStreamNode, s, ReferencedImageLayerEdgeKind) - } } } } @@ -553,7 +435,7 @@ func edgeKind(g graph.Graph, from, to gonum.Node, desiredKind string) bool { return kinds.Has(desiredKind) } -// imageIsPrunable returns true iff the image node only has weak references +// imageIsPrunable returns true if the image node only has weak references // from its predecessors to it. A weak reference to an image is a reference // from an image stream to an image where the image is not the current image // for a tag and the image stream is at least as old as the minimum pruning @@ -593,47 +475,6 @@ func calculatePrunableImages(g graph.Graph, imageNodes []*imagegraph.ImageNode) return prunable, ids } -// subgraphWithoutPrunableImages creates a subgraph from g with prunable image -// nodes excluded. -func subgraphWithoutPrunableImages(g graph.Graph, prunableImageIDs graph.NodeSet) graph.Graph { - return g.Subgraph( - func(g graph.Interface, node gonum.Node) bool { - return !prunableImageIDs.Has(node.ID()) - }, - func(g graph.Interface, head, tail gonum.Node, edgeKinds sets.String) bool { - if prunableImageIDs.Has(head.ID()) { - return false - } - if prunableImageIDs.Has(tail.ID()) { - return false - } - return true - }, - ) -} - -// calculatePrunableLayers returns the list of prunable layers. -func calculatePrunableLayers(g graph.Graph) []*imagegraph.ImageLayerNode { - prunable := []*imagegraph.ImageLayerNode{} - - nodes := g.Nodes() - for i := range nodes { - layerNode, ok := nodes[i].(*imagegraph.ImageLayerNode) - if !ok { - continue - } - - glog.V(4).Infof("Examining layer %q", layerNode.Layer) - - if layerIsPrunable(g, layerNode) { - glog.V(4).Infof("Layer %q is prunable", layerNode.Layer) - prunable = append(prunable, layerNode) - } - } - - return prunable -} - // pruneStreams removes references from all image streams' status.tags entries // to prunable images, invoking streamPruner.PruneImageStream for each updated // stream. @@ -698,62 +539,21 @@ func pruneImages(g graph.Graph, imageNodes []*imagegraph.ImageNode, imagePruner return errs } -func (p *imageRegistryPruner) determineRegistry(imageNodes []*imagegraph.ImageNode) (string, error) { - if len(p.registryURL) > 0 { - return p.registryURL, nil - } - - // we only support a single internal registry, and all images have the same registry - // so we just take the 1st one and use it - pullSpec := imageNodes[0].Image.DockerImageReference - - ref, err := imageapi.ParseDockerImageReference(pullSpec) - if err != nil { - return "", fmt.Errorf("unable to parse %q: %v", pullSpec, err) - } - - if len(ref.Registry) == 0 { - return "", fmt.Errorf("%s does not include a registry", pullSpec) - } - - return ref.Registry, nil -} - // Run identifies images eligible for pruning, invoking imagePruneFunc for each -// image, and then it identifies layers eligible for pruning, invoking -// layerPruneFunc for each registry URL that has layers that can be pruned. -func (p *imageRegistryPruner) Prune(imagePruner ImagePruner, streamPruner ImageStreamPruner, layerPruner LayerPruner, blobPruner BlobPruner, manifestPruner ManifestPruner) error { - allNodes := p.g.Nodes() - - imageNodes := getImageNodes(allNodes) +// image. +func (p *imageRegistryPruner) Prune(imagePruner ImagePruner, streamPruner ImageStreamPruner) error { + imageNodes := getImageNodes(p.g.Nodes()) if len(imageNodes) == 0 { return nil } - registryURL, err := p.determineRegistry(imageNodes) - if err != nil { - return fmt.Errorf("unable to determine registry: %v", err) - } - glog.V(1).Infof("Using registry: %s", registryURL) - - if err := p.registryPinger.ping(registryURL); err != nil { - return fmt.Errorf("error communicating with registry: %v", err) - } - - prunableImageNodes, prunableImageIDs := calculatePrunableImages(p.g, imageNodes) - graphWithoutPrunableImages := subgraphWithoutPrunableImages(p.g, prunableImageIDs) - prunableLayers := calculatePrunableLayers(graphWithoutPrunableImages) + prunableImageNodes, _ := calculatePrunableImages(p.g, imageNodes) errs := []error{} - errs = append(errs, pruneStreams(p.g, prunableImageNodes, streamPruner)...) - errs = append(errs, pruneLayers(p.g, p.registryClient, registryURL, prunableLayers, layerPruner)...) - errs = append(errs, pruneBlobs(p.g, p.registryClient, registryURL, prunableLayers, blobPruner)...) - errs = append(errs, pruneManifests(p.g, p.registryClient, registryURL, prunableImageNodes, manifestPruner)...) if len(errs) > 0 { - // If we had any errors removing image references from image streams or deleting - // layers, blobs, or manifest data from the registry, stop here and don't + // If we had any errors removing image references from image streams stop here and don't // delete any images. This way, you can rerun prune and retry things that failed. return kerrors.NewAggregate(errs) } @@ -762,98 +562,6 @@ func (p *imageRegistryPruner) Prune(imagePruner ImagePruner, streamPruner ImageS return kerrors.NewAggregate(errs) } -// layerIsPrunable returns true if the layer is not referenced by any images. -func layerIsPrunable(g graph.Graph, layerNode *imagegraph.ImageLayerNode) bool { - for _, predecessor := range g.To(layerNode) { - glog.V(4).Infof("Examining layer predecessor %#v", predecessor) - if g.Kind(predecessor) == imagegraph.ImageNodeKind { - glog.V(4).Infof("Layer has an image predecessor") - return false - } - } - - return true -} - -// streamLayerReferences returns a list of ImageStreamNodes that reference a -// given ImageLayerNode. -func streamLayerReferences(g graph.Graph, layerNode *imagegraph.ImageLayerNode) []*imagegraph.ImageStreamNode { - ret := []*imagegraph.ImageStreamNode{} - - for _, predecessor := range g.To(layerNode) { - if g.Kind(predecessor) != imagegraph.ImageStreamNodeKind { - continue - } - - ret = append(ret, predecessor.(*imagegraph.ImageStreamNode)) - } - - return ret -} - -// pruneLayers invokes layerPruner.PruneLayer for each repository layer link to -// be deleted from the registry. -func pruneLayers(g graph.Graph, registryClient *http.Client, registryURL string, layerNodes []*imagegraph.ImageLayerNode, layerPruner LayerPruner) []error { - errs := []error{} - - for _, layerNode := range layerNodes { - // get streams that reference layer - streamNodes := streamLayerReferences(g, layerNode) - - for _, streamNode := range streamNodes { - stream := streamNode.ImageStream - streamName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name) - - glog.V(4).Infof("Pruning registry=%q, repo=%q, layer=%q", registryURL, streamName, layerNode.Layer) - if err := layerPruner.PruneLayer(registryClient, registryURL, streamName, layerNode.Layer); err != nil { - errs = append(errs, fmt.Errorf("error pruning repo %q layer link %q: %v", streamName, layerNode.Layer, err)) - } - } - } - - return errs -} - -// pruneBlobs invokes blobPruner.PruneBlob for each blob to be deleted from the -// registry. -func pruneBlobs(g graph.Graph, registryClient *http.Client, registryURL string, layerNodes []*imagegraph.ImageLayerNode, blobPruner BlobPruner) []error { - errs := []error{} - - for _, layerNode := range layerNodes { - glog.V(4).Infof("Pruning registry=%q, blob=%q", registryURL, layerNode.Layer) - if err := blobPruner.PruneBlob(registryClient, registryURL, layerNode.Layer); err != nil { - errs = append(errs, fmt.Errorf("error pruning blob %q: %v", layerNode.Layer, err)) - } - } - - return errs -} - -// pruneManifests invokes manifestPruner.PruneManifest for each repository -// manifest to be deleted from the registry. -func pruneManifests(g graph.Graph, registryClient *http.Client, registryURL string, imageNodes []*imagegraph.ImageNode, manifestPruner ManifestPruner) []error { - errs := []error{} - - for _, imageNode := range imageNodes { - for _, n := range g.To(imageNode) { - streamNode, ok := n.(*imagegraph.ImageStreamNode) - if !ok { - continue - } - - stream := streamNode.ImageStream - repoName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name) - - glog.V(4).Infof("Pruning manifest for registry %q, repo %q, image %q", registryURL, repoName, imageNode.Image.Name) - if err := manifestPruner.PruneManifest(registryClient, registryURL, repoName, imageNode.Image.Name); err != nil { - errs = append(errs, fmt.Errorf("error pruning manifest for registry %q, repo %q, image %q: %v", registryURL, repoName, imageNode.Image.Name, err)) - } - } - } - - return errs -} - // deletingImagePruner deletes an image from OpenShift. type deletingImagePruner struct { images client.ImageInterface @@ -869,7 +577,7 @@ func NewDeletingImagePruner(images client.ImageInterface) ImagePruner { } func (p *deletingImagePruner) PruneImage(image *imageapi.Image) error { - glog.V(4).Infof("Deleting image %q", image.Name) + glog.V(4).Infof("Marking image %q for deletion", image.Name) return p.images.Delete(image.Name) } @@ -892,100 +600,3 @@ func (p *deletingImageStreamPruner) PruneImageStream(stream *imageapi.ImageStrea glog.V(5).Infof("Updated stream: %#v", stream) return p.streams.ImageStreams(stream.Namespace).UpdateStatus(stream) } - -// deleteFromRegistry uses registryClient to send a DELETE request to the -// provided url. It attempts an https request first; if that fails, it fails -// back to http. -func deleteFromRegistry(registryClient *http.Client, url string) error { - deleteFunc := func(proto, url string) error { - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - glog.Errorf("Error creating request: %v", err) - return fmt.Errorf("error creating request: %v", err) - } - - glog.V(4).Infof("Sending request to registry") - resp, err := registryClient.Do(req) - if err != nil { - return fmt.Errorf("error sending request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusNoContent { - glog.V(1).Infof("Unexpected status code in response: %d", resp.StatusCode) - decoder := json.NewDecoder(resp.Body) - var response v2.Errors - decoder.Decode(&response) - glog.V(1).Infof("Response: %#v", response) - return &response - } - - return nil - } - - var err error - for _, proto := range []string{"https", "http"} { - glog.V(4).Infof("Trying %s for %s", proto, url) - err = deleteFunc(proto, fmt.Sprintf("%s://%s", proto, url)) - if err == nil { - return nil - } - - if _, ok := err.(*v2.Errors); ok { - // we got a response back from the registry, so return it - return err - } - - // we didn't get a success or a v2.Errors response back from the registry - glog.V(4).Infof("Error with %s for %s: %v", proto, url, err) - } - return err -} - -// deletingLayerPruner deletes a repository layer link from the registry. -type deletingLayerPruner struct { -} - -var _ LayerPruner = &deletingLayerPruner{} - -// NewDeletingLayerPruner creates a new deletingLayerPruner. -func NewDeletingLayerPruner() LayerPruner { - return &deletingLayerPruner{} -} - -func (p *deletingLayerPruner) PruneLayer(registryClient *http.Client, registryURL, repoName, layer string) error { - glog.V(4).Infof("Pruning registry %q, repo %q, layer %q", registryURL, repoName, layer) - return deleteFromRegistry(registryClient, fmt.Sprintf("%s/admin/%s/layers/%s", registryURL, repoName, layer)) -} - -// deletingBlobPruner deletes a blob from the registry. -type deletingBlobPruner struct { -} - -var _ BlobPruner = &deletingBlobPruner{} - -// NewDeletingLayerPruner creates a new deletingBlobPruner. -func NewDeletingBlobPruner() BlobPruner { - return &deletingBlobPruner{} -} - -func (p *deletingBlobPruner) PruneBlob(registryClient *http.Client, registryURL, blob string) error { - glog.V(4).Infof("Pruning registry %q, blob %q", registryURL, blob) - return deleteFromRegistry(registryClient, fmt.Sprintf("%s/admin/blobs/%s", registryURL, blob)) -} - -// deletingManifestPruner deletes repository manifest data from the registry. -type deletingManifestPruner struct { -} - -var _ ManifestPruner = &deletingManifestPruner{} - -// NewDeletingManifestPruner creates a new deletingManifestPruner. -func NewDeletingManifestPruner() ManifestPruner { - return &deletingManifestPruner{} -} - -func (p *deletingManifestPruner) PruneManifest(registryClient *http.Client, registryURL, repoName, manifest string) error { - glog.V(4).Infof("Pruning manifest for registry %q, repo %q, manifest %q", registryURL, repoName, manifest) - return deleteFromRegistry(registryClient, fmt.Sprintf("%s/admin/%s/manifests/%s", registryURL, repoName, manifest)) -} diff --git a/pkg/image/prune/imagepruner_test.go b/pkg/image/prune/imagepruner_test.go index d48a3eab6aa7..996b2205911c 100644 --- a/pkg/image/prune/imagepruner_test.go +++ b/pkg/image/prune/imagepruner_test.go @@ -2,10 +2,8 @@ package prune import ( "encoding/json" - "errors" "flag" "fmt" - "net/http" "reflect" "testing" "time" @@ -350,37 +348,6 @@ type fakeBlobPruner struct { err error } -var _ BlobPruner = &fakeBlobPruner{} - -func (p *fakeBlobPruner) PruneBlob(registryClient *http.Client, registryURL, blob string) error { - p.invocations.Insert(fmt.Sprintf("%s|%s", registryURL, blob)) - return p.err -} - -type fakeLayerPruner struct { - invocations sets.String - err error -} - -var _ LayerPruner = &fakeLayerPruner{} - -func (p *fakeLayerPruner) PruneLayer(registryClient *http.Client, registryURL, repo, layer string) error { - p.invocations.Insert(fmt.Sprintf("%s|%s|%s", registryURL, repo, layer)) - return p.err -} - -type fakeManifestPruner struct { - invocations sets.String - err error -} - -var _ ManifestPruner = &fakeManifestPruner{} - -func (p *fakeManifestPruner) PruneManifest(registryClient *http.Client, registryURL, repo, manifest string) error { - p.invocations.Insert(fmt.Sprintf("%s|%s|%s", registryURL, repo, manifest)) - return p.err -} - var logLevel = flag.Int("loglevel", 0, "") var testCase = flag.String("testcase", "", "") @@ -682,15 +649,11 @@ func TestImagePruning(t *testing.T) { DCs: &test.dcs, } p := NewImageRegistryPruner(options) - p.(*imageRegistryPruner).registryPinger = &fakeRegistryPinger{} imagePruner := &fakeImagePruner{invocations: sets.NewString()} streamPruner := &fakeImageStreamPruner{invocations: sets.NewString()} - layerPruner := &fakeLayerPruner{invocations: sets.NewString()} - blobPruner := &fakeBlobPruner{invocations: sets.NewString()} - manifestPruner := &fakeManifestPruner{invocations: sets.NewString()} - p.Prune(imagePruner, streamPruner, layerPruner, blobPruner, manifestPruner) + p.Prune(imagePruner, streamPruner) expectedDeletions := sets.NewString(test.expectedDeletions...) if !reflect.DeepEqual(expectedDeletions, imagePruner.invocations) { @@ -747,10 +710,7 @@ func TestRegistryPruning(t *testing.T) { tests := map[string]struct { images imageapi.ImageList streams imageapi.ImageStreamList - expectedLayerDeletions sets.String - expectedBlobDeletions sets.String expectedManifestDeletions sets.String - pingErr error }{ "layers unique to id1 pruned": { images: imageList( @@ -770,17 +730,6 @@ func TestRegistryPruning(t *testing.T) { ), )), ), - expectedLayerDeletions: sets.NewString( - "registry1|foo/bar|layer1", - "registry1|foo/bar|layer2", - ), - expectedBlobDeletions: sets.NewString( - "registry1|layer1", - "registry1|layer2", - ), - expectedManifestDeletions: sets.NewString( - "registry1|foo/bar|id1", - ), }, "no pruning when no images are pruned": { images: imageList( @@ -793,8 +742,6 @@ func TestRegistryPruning(t *testing.T) { ), )), ), - expectedLayerDeletions: sets.NewString(), - expectedBlobDeletions: sets.NewString(), expectedManifestDeletions: sets.NewString(), }, "blobs pruned when streams have already been deleted": { @@ -802,16 +749,6 @@ func TestRegistryPruning(t *testing.T) { imageWithLayers("id1", "registry1/foo/bar@id1", "layer1", "layer2", "layer3", "layer4"), imageWithLayers("id2", "registry1/foo/bar@id2", "layer3", "layer4", "layer5", "layer6"), ), - expectedLayerDeletions: sets.NewString(), - expectedBlobDeletions: sets.NewString( - "registry1|layer1", - "registry1|layer2", - "registry1|layer3", - "registry1|layer4", - "registry1|layer5", - "registry1|layer6", - ), - expectedManifestDeletions: sets.NewString(), }, "ping error": { images: imageList( @@ -831,10 +768,6 @@ func TestRegistryPruning(t *testing.T) { ), )), ), - expectedLayerDeletions: sets.NewString(), - expectedBlobDeletions: sets.NewString(), - expectedManifestDeletions: sets.NewString(), - pingErr: errors.New("foo"), }, } @@ -858,24 +791,10 @@ func TestRegistryPruning(t *testing.T) { DCs: &deployapi.DeploymentConfigList{}, } p := NewImageRegistryPruner(options) - p.(*imageRegistryPruner).registryPinger = &fakeRegistryPinger{err: test.pingErr} imagePruner := &fakeImagePruner{invocations: sets.NewString()} streamPruner := &fakeImageStreamPruner{invocations: sets.NewString()} - layerPruner := &fakeLayerPruner{invocations: sets.NewString()} - blobPruner := &fakeBlobPruner{invocations: sets.NewString()} - manifestPruner := &fakeManifestPruner{invocations: sets.NewString()} - - p.Prune(imagePruner, streamPruner, layerPruner, blobPruner, manifestPruner) - if !reflect.DeepEqual(test.expectedLayerDeletions, layerPruner.invocations) { - t.Errorf("%s: expected layer deletions %#v, got %#v", name, test.expectedLayerDeletions, layerPruner.invocations) - } - if !reflect.DeepEqual(test.expectedBlobDeletions, blobPruner.invocations) { - t.Errorf("%s: expected blob deletions %#v, got %#v", name, test.expectedBlobDeletions, blobPruner.invocations) - } - if !reflect.DeepEqual(test.expectedManifestDeletions, manifestPruner.invocations) { - t.Errorf("%s: expected manifest deletions %#v, got %#v", name, test.expectedManifestDeletions, manifestPruner.invocations) - } + p.Prune(imagePruner, streamPruner) } } diff --git a/pkg/image/registry/image/etcd/etcd.go b/pkg/image/registry/image/etcd/etcd.go index 5d63feed6bd0..f9437ec516ec 100644 --- a/pkg/image/registry/image/etcd/etcd.go +++ b/pkg/image/registry/image/etcd/etcd.go @@ -2,25 +2,30 @@ package etcd import ( "errors" + "fmt" - "github.com/openshift/origin/pkg/image/api" - "github.com/openshift/origin/pkg/image/registry/image" kapi "k8s.io/kubernetes/pkg/api" + kapierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/watch" + + "github.com/openshift/origin/pkg/image/api" + "github.com/openshift/origin/pkg/image/registry/image" ) // REST implements a RESTStorage for images against etcd. type REST struct { - store *etcdgeneric.Etcd + store *etcdgeneric.Etcd + status *etcdgeneric.Etcd } // NewREST returns a new REST. -func NewREST(s storage.Interface) *REST { +func NewREST(s storage.Interface) (*REST, *StatusREST, *FinalizeREST) { prefix := "/images" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.Image{} }, @@ -41,11 +46,18 @@ func NewREST(s storage.Interface) *REST { CreateStrategy: image.Strategy, UpdateStrategy: image.Strategy, - ReturnDeletedObject: false, + ReturnDeletedObject: true, Storage: s, } - return &REST{store: store} + + statusStore := *store + statusStore.UpdateStrategy = image.StatusStrategy + + finalizeStore := *store + finalizeStore.UpdateStrategy = image.FinalizeStrategy + + return &REST{store: store, status: &statusStore}, &StatusREST{store: &statusStore}, &FinalizeREST{store: &finalizeStore} } // New returns a new object @@ -88,5 +100,64 @@ func (r *REST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, boo // Delete deletes an existing image specified by its ID. func (r *REST) Delete(ctx kapi.Context, name string, options *kapi.DeleteOptions) (runtime.Object, error) { - return r.store.Delete(ctx, name, options) + imgObj, err := r.Get(ctx, name) + if err != nil { + return nil, err + } + + img := imgObj.(*api.Image) + // TODO: don't treat external images differently here -- take care of them with image controller + isExternal := false + if img.Annotations == nil { + isExternal = true + } else if value, ok := img.Annotations[api.ManagedByOpenShiftAnnotation]; !ok || value != "true" { + isExternal = true + } + if isExternal { + return r.store.Delete(ctx, name, nil) + } + + // upon first request to delete an internally managed image, we switch the phase to start image termination + if img.DeletionTimestamp.IsZero() { + now := util.Now() + img.DeletionTimestamp = &now + img.Status.Phase = api.ImagePurging + result, _, err := r.status.Update(ctx, img) + return result, err + } + + // prior to final deletion, we must ensure that finalizers is empty + if len(img.Finalizers) != 0 { + err = kapierrors.NewConflict("Image", img.Name, fmt.Errorf("The system is deleting image layers. Upon completion, this image will automatically be purged.")) + return nil, err + } + return r.store.Delete(ctx, name, nil) +} + +// StatusREST implements the REST endpoint for changing the status of an image. +type StatusREST struct { + store *etcdgeneric.Etcd +} + +func (r *StatusREST) New() runtime.Object { + return r.store.New() +} + +// Update alters the status subset of an object. +func (r *StatusREST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) { + return r.store.Update(ctx, obj) +} + +// FinalizeREST implements the REST endpoint for finalizing an image. +type FinalizeREST struct { + store *etcdgeneric.Etcd +} + +func (r *FinalizeREST) New() runtime.Object { + return r.store.New() +} + +// Update alters the status finalizers subset of an object. +func (r *FinalizeREST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) { + return r.store.Update(ctx, obj) } diff --git a/pkg/image/registry/image/etcd/etcd_test.go b/pkg/image/registry/image/etcd/etcd_test.go index 3e671e9fa335..fe59b42586cf 100644 --- a/pkg/image/registry/image/etcd/etcd_test.go +++ b/pkg/image/registry/image/etcd/etcd_test.go @@ -7,9 +7,6 @@ import ( "time" "github.com/coreos/go-etcd/etcd" - "github.com/openshift/origin/pkg/api/latest" - "github.com/openshift/origin/pkg/image/api" - "github.com/openshift/origin/pkg/image/registry/image" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/rest" @@ -21,8 +18,14 @@ import ( etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" - "k8s.io/kubernetes/pkg/util" + kutil "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/watch" + + oapi "github.com/openshift/origin/pkg/api" + "github.com/openshift/origin/pkg/api/latest" + "github.com/openshift/origin/pkg/image/api" + "github.com/openshift/origin/pkg/image/registry/image" + "github.com/openshift/origin/pkg/image/util" ) // This copy and paste is not pure ignorance. This is that we can be sure that the key is getting made as we @@ -67,15 +70,15 @@ func newHelper(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { return fakeEtcdClient, helper } -func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { +func newStorage(t *testing.T) (*REST, *StatusREST, *FinalizeREST, *tools.FakeEtcdClient) { etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") - return NewREST(etcdStorage), fakeClient + rest, statusRest, finalizeRest := NewREST(etcdStorage) + return rest, statusRest, finalizeRest, fakeClient } func TestStorage(t *testing.T) { _, helper := newHelper(t) - storage := NewREST(helper) - image.NewRegistry(storage) + image.NewRegistry(NewREST(helper)) } func validNewImage() *api.Image { @@ -84,11 +87,13 @@ func validNewImage() *api.Image { Name: "foo", }, DockerImageReference: "openshift/origin", + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + Status: api.ImageStatus{Phase: api.ImageAvailable}, } } func TestCreate(t *testing.T) { - storage, fakeClient := newStorage(t) + storage, _, _, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.store).ClusterScope() image := validNewImage() image.ObjectMeta = kapi.ObjectMeta{GenerateName: "foo"} @@ -103,7 +108,7 @@ func TestCreate(t *testing.T) { func TestCreateRegistryError(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("test error") - storage := NewREST(helper) + storage, _, _ := NewREST(helper) image := validNewImage() _, err := storage.Create(kapi.NewDefaultContext(), image) @@ -116,7 +121,7 @@ func TestCreateAlreadyExists(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.TestIndex = true - storage := NewREST(helper) + storage, _, _ := NewREST(helper) existingImage := &api.Image{ ObjectMeta: kapi.ObjectMeta{ @@ -152,7 +157,7 @@ func TestCreateAlreadyExists(t *testing.T) { func TestListError(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("test error") - storage := NewREST(helper) + storage, _, _ := NewREST(helper) images, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything()) if err != fakeEtcdClient.Err { t.Fatalf("Expected %#v, Got %#v", fakeEtcdClient.Err, err) @@ -169,7 +174,7 @@ func TestListEmptyList(t *testing.T) { R: &etcd.Response{}, E: fakeEtcdClient.NewError(tools.EtcdErrorCodeNotFound), } - storage := NewREST(helper) + storage, _, _ := NewREST(helper) images, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything()) if err != nil { t.Errorf("Unexpected non-nil error: %#v", err) @@ -197,7 +202,7 @@ func TestListPopulatedList(t *testing.T) { }, } - storage := NewREST(helper) + storage, _, _ := NewREST(helper) list, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything()) if err != nil { @@ -239,7 +244,7 @@ func TestListFiltered(t *testing.T) { }, E: nil, } - storage := NewREST(helper) + storage, _, _ := NewREST(helper) list, err := storage.List(kapi.NewDefaultContext(), labels.SelectorFromSet(labels.Set{"env": "dev"}), fields.Everything()) if err != nil { t.Errorf("unexpected error: %v", err) @@ -253,7 +258,7 @@ func TestListFiltered(t *testing.T) { func TestCreateMissingID(t *testing.T) { _, helper := newHelper(t) - storage := NewREST(helper) + storage, _, _ := NewREST(helper) obj, err := storage.Create(kapi.NewDefaultContext(), &api.Image{}) if obj != nil { @@ -266,7 +271,7 @@ func TestCreateMissingID(t *testing.T) { func TestCreateOK(t *testing.T) { _, helper := newHelper(t) - storage := NewREST(helper) + storage, _, _ := NewREST(helper) obj, err := storage.Create(kapi.NewDefaultContext(), &api.Image{ ObjectMeta: kapi.ObjectMeta{Name: "foo"}, @@ -291,7 +296,7 @@ func TestCreateOK(t *testing.T) { func TestGetError(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("bad") - storage := NewREST(helper) + storage, _, _ := NewREST(helper) image, err := storage.Get(kapi.NewDefaultContext(), "foo") if image != nil { @@ -304,7 +309,7 @@ func TestGetError(t *testing.T) { func TestGetNotFound(t *testing.T) { fakeEtcdClient, helper := newHelper(t) - storage := NewREST(helper) + storage, _, _ := NewREST(helper) fakeEtcdClient.Data["/images/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, @@ -334,7 +339,7 @@ func TestGetOK(t *testing.T) { }, }, } - storage := NewREST(helper) + storage, _, _ := NewREST(helper) image, err := storage.Get(kapi.NewDefaultContext(), "foo") if image == nil { @@ -353,27 +358,114 @@ func TestDelete(t *testing.T) { fakeEtcdClient.Data["/images/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(latest.Codec, &api.Image{}), + Value: runtime.EncodeOrDie(latest.Codec, validNewImage()), }, }, } - storage := NewREST(helper) + storage, _, _ := NewREST(helper) obj, err := storage.Delete(kapi.NewDefaultContext(), "foo", nil) + if obj == nil { + t.Error("Unexpected nil obj") + } + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + + _, ok := obj.(*api.Image) + if !ok { + t.Fatalf("Expected image type, got: %#v", obj) + } + + // try to delete it second time + obj, err = storage.Delete(kapi.NewDefaultContext(), "foo", nil) + if obj != nil { + t.Errorf("Unexpected non-nil obj: %#v", obj) + } + if err == nil { + t.Errorf("Unexpected non error") + } + if !errors.IsNotFound(err) { + t.Errorf("Expected 'not found' error, got %#v", err) + } + + if len(fakeEtcdClient.DeletedKeys) != 1 { + t.Errorf("Expected 1 delete, found %#v", fakeEtcdClient.DeletedKeys) + } else if key := "/images/foo"; fakeEtcdClient.DeletedKeys[0] != key { + t.Errorf("Unexpected key: %s, expected %s", fakeEtcdClient.DeletedKeys[0], key) + } +} +func TestDeleteInternallyManagedImage(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + img := validNewImage() + img.Annotations = map[string]string{api.ManagedByOpenShiftAnnotation: "true"} + fakeEtcdClient.Data["/images/foo"] = tools.EtcdResponseWithError{ + R: &etcd.Response{ + Node: &etcd.Node{ + Value: runtime.EncodeOrDie(latest.Codec, img), + ModifiedIndex: 2, + }, + }, + } + storage, _, finalizeStorage := NewREST(helper) + + // first delete - causes just an update of image's status + obj, err := storage.Delete(kapi.NewDefaultContext(), "foo", nil) if obj == nil { t.Error("Unexpected nil obj") } if err != nil { t.Errorf("Unexpected non-nil error: %#v", err) } + image, ok := obj.(*api.Image) + if !ok { + t.Fatalf("Expected image type, got: %#v", obj) + } - status, ok := obj.(*kapi.Status) + // second delete without finalization shall result in conflict + obj, err = storage.Delete(kapi.NewDefaultContext(), "foo", nil) + if obj != nil { + t.Error("Unexpected non-nil obj") + } + if err == nil { + t.Fatalf("Unexpected nil error") + } + if !errors.IsConflict(err) { + t.Fatalf("Expected conflict error, got: %#v", err) + } + + // finalize the image + if util.ImageFinalized(image) { + t.Fatalf("Expected image to have incomplete finalizers") + } + image.Finalizers = nil + obj, _, err = finalizeStorage.Update(kapi.NewDefaultContext(), image) + if obj == nil { + t.Error("Unexpected nil obj") + } + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + image, ok = obj.(*api.Image) if !ok { - t.Fatalf("Expected status type, got: %#v", obj) + t.Fatalf("Expected image type, got: %#v", obj) + } + if !util.ImageFinalized(image) { + t.Fatalf("Expected image to be finalized") } - if status.Status != kapi.StatusSuccess { - t.Errorf("Expected status=success, got: %#v", status) + + // delete after finalization shall succeed + obj, err = storage.Delete(kapi.NewDefaultContext(), "foo", nil) + if obj == nil { + t.Error("Unexpected nil obj") + } + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + _, ok = obj.(*api.Image) + if !ok { + t.Fatalf("Expected image type, got: %#v", obj) } if len(fakeEtcdClient.DeletedKeys) != 1 { t.Errorf("Expected 1 delete, found %#v", fakeEtcdClient.DeletedKeys) @@ -385,7 +477,7 @@ func TestDelete(t *testing.T) { func TestDeleteNotFound(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = tools.EtcdErrorNotFound - storage := NewREST(helper) + storage, _, _ := NewREST(helper) _, err := storage.Delete(kapi.NewDefaultContext(), "foo", nil) if err == nil { t.Error("Unexpected non-error") @@ -398,7 +490,7 @@ func TestDeleteNotFound(t *testing.T) { func TestDeleteImageError(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("Some error") - storage := NewREST(helper) + storage, _, _ := NewREST(helper) _, err := storage.Delete(kapi.NewDefaultContext(), "foo", nil) if err == nil { t.Error("Unexpected non-error") @@ -407,7 +499,7 @@ func TestDeleteImageError(t *testing.T) { func TestWatchErrorWithFieldSet(t *testing.T) { _, helper := newHelper(t) - storage := NewREST(helper) + storage, _, _ := NewREST(helper) _, err := storage.Watch(kapi.NewDefaultContext(), labels.Everything(), fields.SelectorFromSet(fields.Set{"foo": "bar"}), "1") if err == nil { @@ -420,7 +512,7 @@ func TestWatchErrorWithFieldSet(t *testing.T) { func TestWatchOK(t *testing.T) { fakeEtcdClient, helper := newHelper(t) - storage := NewREST(helper) + storage, _, _ := NewREST(helper) var tests = []struct { label labels.Selector @@ -430,9 +522,9 @@ func TestWatchOK(t *testing.T) { { labels.Everything(), []*api.Image{ - {ObjectMeta: kapi.ObjectMeta{Name: "a"}, DockerImageMetadata: api.DockerImage{}}, - {ObjectMeta: kapi.ObjectMeta{Name: "b"}, DockerImageMetadata: api.DockerImage{}}, - {ObjectMeta: kapi.ObjectMeta{Name: "c"}, DockerImageMetadata: api.DockerImage{}}, + {ObjectMeta: kapi.ObjectMeta{Name: "a"}, DockerImageMetadata: api.DockerImage{}, Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, Status: api.ImageStatus{Phase: api.ImageAvailable}}, + {ObjectMeta: kapi.ObjectMeta{Name: "b"}, DockerImageMetadata: api.DockerImage{}, Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, Status: api.ImageStatus{Phase: api.ImageAvailable}}, + {ObjectMeta: kapi.ObjectMeta{Name: "c"}, DockerImageMetadata: api.DockerImage{}, Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, Status: api.ImageStatus{Phase: api.ImageAvailable}}, }, []bool{ true, @@ -443,9 +535,9 @@ func TestWatchOK(t *testing.T) { { labels.SelectorFromSet(labels.Set{"color": "blue"}), []*api.Image{ - {ObjectMeta: kapi.ObjectMeta{Name: "a", Labels: map[string]string{"color": "blue"}}, DockerImageMetadata: api.DockerImage{}}, - {ObjectMeta: kapi.ObjectMeta{Name: "b", Labels: map[string]string{"color": "green"}}, DockerImageMetadata: api.DockerImage{}}, - {ObjectMeta: kapi.ObjectMeta{Name: "c", Labels: map[string]string{"color": "blue"}}, DockerImageMetadata: api.DockerImage{}}, + {ObjectMeta: kapi.ObjectMeta{Name: "a", Labels: map[string]string{"color": "blue"}}, DockerImageMetadata: api.DockerImage{}, Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, Status: api.ImageStatus{Phase: api.ImageAvailable}}, + {ObjectMeta: kapi.ObjectMeta{Name: "b", Labels: map[string]string{"color": "green"}}, DockerImageMetadata: api.DockerImage{}, Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, Status: api.ImageStatus{Phase: api.ImageAvailable}}, + {ObjectMeta: kapi.ObjectMeta{Name: "c", Labels: map[string]string{"color": "blue"}}, DockerImageMetadata: api.DockerImage{}, Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, Status: api.ImageStatus{Phase: api.ImageAvailable}}, }, []bool{ true, @@ -483,7 +575,7 @@ func TestWatchOK(t *testing.T) { } image.DockerImageMetadataVersion = "1.0" if e, a := image, event.Object; !reflect.DeepEqual(e, a) { - t.Errorf("Objects did not match: %s", util.ObjectDiff(e, a)) + t.Errorf("Objects did not match: %s", kutil.ObjectDiff(e, a)) } case <-time.After(50 * time.Millisecond): if tt.expected[testIndex] { @@ -520,7 +612,7 @@ func (fakeStrategy) PrepareForCreate(obj runtime.Object) { func TestStrategyPrepareMethods(t *testing.T) { _, helper := newHelper(t) - storage := NewREST(helper) + storage, _, _ := NewREST(helper) img := validNewImage() strategy := fakeStrategy{image.Strategy} diff --git a/pkg/image/registry/image/registry.go b/pkg/image/registry/image/registry.go index bc1154007aba..bed7f4b975d6 100644 --- a/pkg/image/registry/image/registry.go +++ b/pkg/image/registry/image/registry.go @@ -18,10 +18,14 @@ type Registry interface { GetImage(ctx kapi.Context, id string) (*api.Image, error) // CreateImage creates a new image. CreateImage(ctx kapi.Context, image *api.Image) error + // UpdateImageStatus updates status of an image + UpdateImageStatus(ctx kapi.Context, image *api.Image) (*api.Image, error) // DeleteImage deletes an image. DeleteImage(ctx kapi.Context, id string) error // WatchImages watches for new or deleted images. WatchImages(ctx kapi.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) + // FinalizeImage updates image stream's finalizers + FinalizeImage(ctx kapi.Context, image *api.Image) (*api.Image, error) } // Storage is an interface for a standard REST Storage backend @@ -37,12 +41,14 @@ type Storage interface { // storage puts strong typing around storage calls type storage struct { Storage + status rest.Updater + finalizer rest.Updater } // NewRegistry returns a new Registry interface for the given Storage. Any mismatched // types will panic. -func NewRegistry(s Storage) Registry { - return &storage{s} +func NewRegistry(s Storage, status rest.Updater, finalizer rest.Updater) Registry { + return &storage{s, status, finalizer} } func (s *storage) ListImages(ctx kapi.Context, label labels.Selector) (*api.ImageList, error) { @@ -66,6 +72,14 @@ func (s *storage) CreateImage(ctx kapi.Context, image *api.Image) error { return err } +func (s *storage) UpdateImageStatus(ctx kapi.Context, image *api.Image) (*api.Image, error) { + obj, _, err := s.status.Update(ctx, image) + if err != nil { + return nil, err + } + return obj.(*api.Image), nil +} + func (s *storage) DeleteImage(ctx kapi.Context, imageID string) error { _, err := s.Delete(ctx, imageID, nil) return err @@ -74,3 +88,11 @@ func (s *storage) DeleteImage(ctx kapi.Context, imageID string) error { func (s *storage) WatchImages(ctx kapi.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { return s.Watch(ctx, label, field, resourceVersion) } + +func (s *storage) FinalizeImage(ctx kapi.Context, image *api.Image) (*api.Image, error) { + obj, _, err := s.finalizer.Update(ctx, image) + if err != nil { + return nil, err + } + return obj.(*api.Image), nil +} diff --git a/pkg/image/registry/image/strategy.go b/pkg/image/registry/image/strategy.go index 020b7760f5c2..402aab01abec 100644 --- a/pkg/image/registry/image/strategy.go +++ b/pkg/image/registry/image/strategy.go @@ -32,6 +32,10 @@ func (imageStrategy) NamespaceScoped() bool { // PrepareForCreate clears fields that are not allowed to be set by end users on creation. func (imageStrategy) PrepareForCreate(obj runtime.Object) { + image := obj.(*api.Image) + image.Status = api.ImageStatus{ + Phase: api.ImageAvailable, + } } // Validate validates a new image. @@ -57,6 +61,8 @@ func (imageStrategy) PrepareForUpdate(obj, old runtime.Object) { newImage.DockerImageMetadata = oldImage.DockerImageMetadata newImage.DockerImageManifest = oldImage.DockerImageManifest newImage.DockerImageMetadataVersion = oldImage.DockerImageMetadataVersion + newImage.Finalizers = oldImage.Finalizers + newImage.Status = newImage.Status } // ValidateUpdate is the default update validation for an end user. @@ -64,6 +70,47 @@ func (imageStrategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) e return validation.ValidateImageUpdate(old.(*api.Image), obj.(*api.Image)) } +type imageStatusStrategy struct { + imageStrategy +} + +var StatusStrategy = imageStatusStrategy{Strategy} + +func (imageStatusStrategy) PrepareForUpdate(obj, old runtime.Object) { + newImage := obj.(*api.Image) + oldImage := old.(*api.Image) + newImage.DockerImageReference = oldImage.DockerImageReference + newImage.DockerImageMetadata = oldImage.DockerImageMetadata + newImage.DockerImageMetadataVersion = oldImage.DockerImageMetadataVersion + newImage.DockerImageManifest = oldImage.DockerImageManifest + newImage.Finalizers = oldImage.Finalizers +} + +func (imageStatusStrategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { + return validation.ValidateImageStatusUpdate(obj.(*api.Image), old.(*api.Image)) +} + +type imageFinalizeStrategy struct { + imageStrategy +} + +var FinalizeStrategy = imageFinalizeStrategy{Strategy} + +func (imageFinalizeStrategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { + return validation.ValidateImageFinalizeUpdate(obj.(*api.Image), old.(*api.Image)) +} + +// PrepareForUpdate clears fields that are not allowed to be set by end users on update. +func (imageFinalizeStrategy) PrepareForUpdate(obj, old runtime.Object) { + newImage := obj.(*api.Image) + oldImage := old.(*api.Image) + newImage.DockerImageReference = oldImage.DockerImageReference + newImage.DockerImageMetadata = oldImage.DockerImageMetadata + newImage.DockerImageMetadataVersion = oldImage.DockerImageMetadataVersion + newImage.DockerImageManifest = oldImage.DockerImageManifest + newImage.Status = oldImage.Status +} + // MatchImage returns a generic matcher for a given label and field selector. func MatchImage(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { @@ -79,6 +126,7 @@ func MatchImage(label labels.Selector, field fields.Selector) generic.Matcher { // ImageToSelectableFields returns a label set that represents the object. func ImageToSelectableFields(image *api.Image) labels.Set { return labels.Set{ - "name": image.Name, + "name": image.Name, + "status.phase": image.Status.Phase, } } diff --git a/pkg/image/registry/imagestream/etcd/etcd.go b/pkg/image/registry/imagestream/etcd/etcd.go index d7b63510e7c6..770d1ad6cafa 100644 --- a/pkg/image/registry/imagestream/etcd/etcd.go +++ b/pkg/image/registry/imagestream/etcd/etcd.go @@ -1,28 +1,34 @@ package etcd import ( - "github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview" - "github.com/openshift/origin/pkg/image/api" - "github.com/openshift/origin/pkg/image/registry/imagestream" + "fmt" + kapi "k8s.io/kubernetes/pkg/api" + kapierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/watch" + + "github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview" + "github.com/openshift/origin/pkg/image/api" + "github.com/openshift/origin/pkg/image/registry/imagestream" ) // REST implements a RESTStorage for image streams against etcd. type REST struct { store *etcdgeneric.Etcd + status *etcdgeneric.Etcd subjectAccessReviewRegistry subjectaccessreview.Registry } // NewREST returns a new REST. -func NewREST(s storage.Interface, defaultRegistry imagestream.DefaultRegistry, subjectAccessReviewRegistry subjectaccessreview.Registry) (*REST, *StatusREST) { +func NewREST(s storage.Interface, defaultRegistry imagestream.DefaultRegistry, subjectAccessReviewRegistry subjectaccessreview.Registry) (*REST, *StatusREST, *FinalizeREST) { prefix := "/imagestreams" - store := etcdgeneric.Etcd{ + store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.ImageStream{} }, NewListFunc: func() runtime.Object { return &api.ImageStreamList{} }, KeyRootFunc: func(ctx kapi.Context) string { @@ -36,7 +42,7 @@ func NewREST(s storage.Interface, defaultRegistry imagestream.DefaultRegistry, s }, EndpointName: "imageStream", - ReturnDeletedObject: false, + ReturnDeletedObject: true, Storage: s, } @@ -44,16 +50,20 @@ func NewREST(s storage.Interface, defaultRegistry imagestream.DefaultRegistry, s rest := &REST{subjectAccessReviewRegistry: subjectAccessReviewRegistry} strategy.ImageStreamGetter = rest - statusStore := store + statusStore := *store statusStore.UpdateStrategy = imagestream.NewStatusStrategy(strategy) store.CreateStrategy = strategy store.UpdateStrategy = strategy store.Decorator = strategy.Decorate - rest.store = &store + finalizeStore := *store + finalizeStore.UpdateStrategy = imagestream.NewFinalizeStrategy(strategy) + + rest.store = store + rest.status = &statusStore - return rest, &StatusREST{store: &statusStore} + return rest, &StatusREST{store: &statusStore}, &FinalizeREST{store: &finalizeStore} } // New returns a new object @@ -93,7 +103,27 @@ func (r *REST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, boo // Delete deletes an existing image stream specified by its ID. func (r *REST) Delete(ctx kapi.Context, name string, options *kapi.DeleteOptions) (runtime.Object, error) { - return r.store.Delete(ctx, name, options) + streamObj, err := r.Get(ctx, name) + if err != nil { + return nil, err + } + stream := streamObj.(*api.ImageStream) + + // upon first request to delete an image, we switch the phase to start namespace termination + if stream.DeletionTimestamp.IsZero() { + now := util.Now() + stream.DeletionTimestamp = &now + stream.Status.Phase = api.ImageStreamTerminating + result, _, err := r.status.Update(ctx, stream) + return result, err + } + + // prior to final deletion, we must ensure that finalizers is empty + if len(stream.Spec.Finalizers) != 0 { + err = kapierrors.NewConflict("ImageStream", stream.Name, fmt.Errorf("The system is scheduling dependent images for deletion. Upon completion, this image stream will automatically be purged by the system.")) + return nil, err + } + return r.store.Delete(ctx, name, nil) } // StatusREST implements the REST endpoint for changing the status of an image stream. @@ -109,3 +139,17 @@ func (r *StatusREST) New() runtime.Object { func (r *StatusREST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) { return r.store.Update(ctx, obj) } + +// FinalizeREST implements the REST endpoint for finalizing an image stream. +type FinalizeREST struct { + store *etcdgeneric.Etcd +} + +func (r *FinalizeREST) New() runtime.Object { + return r.store.New() +} + +// Update alters the status finalizers subset of an object. +func (r *FinalizeREST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) { + return r.store.Update(ctx, obj) +} diff --git a/pkg/image/registry/imagestream/etcd/etcd_test.go b/pkg/image/registry/imagestream/etcd/etcd_test.go index 649582ec0a44..dc57560deca1 100644 --- a/pkg/image/registry/imagestream/etcd/etcd_test.go +++ b/pkg/image/registry/imagestream/etcd/etcd_test.go @@ -6,11 +6,6 @@ import ( "testing" "github.com/coreos/go-etcd/etcd" - "github.com/openshift/origin/pkg/api/latest" - authorizationapi "github.com/openshift/origin/pkg/authorization/api" - "github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview" - "github.com/openshift/origin/pkg/image/api" - "github.com/openshift/origin/pkg/image/registry/imagestream" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/auth/user" @@ -21,6 +16,15 @@ import ( etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" + kutil "k8s.io/kubernetes/pkg/util" + + oapi "github.com/openshift/origin/pkg/api" + "github.com/openshift/origin/pkg/api/latest" + authorizationapi "github.com/openshift/origin/pkg/authorization/api" + "github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview" + "github.com/openshift/origin/pkg/image/api" + "github.com/openshift/origin/pkg/image/registry/imagestream" + "github.com/openshift/origin/pkg/image/util" ) var ( @@ -50,18 +54,25 @@ func newHelper(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { return fakeEtcdClient, helper } -func validNewStream() *api.ImageStream { +func validNewStream(namespace string) *api.ImageStream { return &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ - Name: "foo", + Name: "foo", + Namespace: namespace, + }, + Spec: api.ImageStreamSpec{ + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + }, + Status: api.ImageStreamStatus{ + Phase: api.ImageStreamAvailable, }, } } func TestCreate(t *testing.T) { _, helper := newHelper(t) - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) - stream := validNewStream() + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + stream := validNewStream("default") ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{}) _, err := storage.Create(ctx, stream) if err != nil { @@ -69,10 +80,43 @@ func TestCreate(t *testing.T) { } } +func TestCreateSetsFields(t *testing.T) { + _, helper := newHelper(t) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + stream := &api.ImageStream{ + ObjectMeta: kapi.ObjectMeta{ + Name: "foo", + }, + } + + ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{}) + _, err := storage.Create(ctx, stream) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + object, err := storage.Get(ctx, "foo") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + actual := object.(*api.ImageStream) + if actual.Name != stream.Name { + t.Errorf("unexpected image stream: %#v", actual) + } + if actual.Status.Phase != api.ImageStreamAvailable { + t.Errorf("expected image stream phase to be set to available, but %v", actual.Status.Phase) + } + if len(actual.Spec.Finalizers) != 1 { + t.Errorf("unexpected length of image stream finalizers (%d != %d): %v", len(actual.Spec.Finalizers), 1, actual.Spec.Finalizers) + } else if actual.Spec.Finalizers[0] != oapi.FinalizerOrigin { + t.Errorf("expected image stream finalizers to contain %q, not %q", oapi.FinalizerOrigin, actual.Spec.Finalizers[0]) + } +} + func TestGetImageStreamError(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("foo") - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) image, err := storage.Get(kapi.NewDefaultContext(), "image1") if image != nil { @@ -85,7 +129,7 @@ func TestGetImageStreamError(t *testing.T) { func TestGetImageStreamOK(t *testing.T) { fakeEtcdClient, helper := newHelper(t) - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) ctx := kapi.NewDefaultContext() repoName := "foo" @@ -108,7 +152,7 @@ func TestGetImageStreamOK(t *testing.T) { func TestListImageStreamsError(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("foo") - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) imageStreams, err := storage.List(kapi.NewDefaultContext(), nil, nil) if err != fakeEtcdClient.Err { @@ -127,7 +171,7 @@ func TestListImageStreamsEmptyList(t *testing.T) { R: &etcd.Response{}, E: fakeEtcdClient.NewError(tools.EtcdErrorCodeNotFound), } - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) imageStreams, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything()) if err != nil { @@ -143,7 +187,7 @@ func TestListImageStreamsEmptyList(t *testing.T) { func TestListImageStreamsPopulatedList(t *testing.T) { fakeEtcdClient, helper := newHelper(t) - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) fakeEtcdClient.Data["/imagestreams/default"] = tools.EtcdResponseWithError{ R: &etcd.Response{ @@ -187,7 +231,7 @@ func (u *fakeUser) GetGroups() []string { func TestCreateImageStreamOK(t *testing.T) { _, helper := newHelper(t) - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) stream := &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "foo"}} ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{}) @@ -244,7 +288,7 @@ func TestCreateImageStreamSpecTagsFromSet(t *testing.T) { sarRegistry := &fakeSubjectAccessReviewRegistry{ allow: test.sarAllowed, } - storage, _ := NewREST(helper, noDefaultRegistry, sarRegistry) + storage, _, _ := NewREST(helper, noDefaultRegistry, sarRegistry) otherNamespace := test.otherNamespace if len(otherNamespace) == 0 { @@ -319,7 +363,7 @@ func TestCreateImageStreamSpecTagsFromSet(t *testing.T) { func TestCreateRegistryErrorSaving(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("foo") - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{}) _, err := storage.Create(ctx, &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "foo"}}) @@ -330,7 +374,7 @@ func TestCreateRegistryErrorSaving(t *testing.T) { func TestUpdateImageStreamMissingID(t *testing.T) { _, helper := newHelper(t) - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) obj, created, err := storage.Update(kapi.NewDefaultContext(), &api.ImageStream{}) if obj != nil || created { @@ -344,7 +388,7 @@ func TestUpdateImageStreamMissingID(t *testing.T) { func TestUpdateRegistryErrorSaving(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Err = fmt.Errorf("foo") - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) _, created, err := storage.Update(kapi.NewDefaultContext(), &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "bar"}}) if err != fakeEtcdClient.Err || created { @@ -364,7 +408,7 @@ func TestUpdateImageStreamOK(t *testing.T) { }, }, } - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{}) obj, created, err := storage.Update(ctx, &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "bar", ResourceVersion: "1"}}) @@ -414,7 +458,7 @@ func TestUpdateImageStreamSpecTagsFromSet(t *testing.T) { sarRegistry := &fakeSubjectAccessReviewRegistry{ allow: test.sarAllowed, } - storage, _ := NewREST(helper, noDefaultRegistry, sarRegistry) + storage, _, _ := NewREST(helper, noDefaultRegistry, sarRegistry) fakeEtcdClient.Data["/imagestreams/default/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ @@ -502,25 +546,70 @@ func TestDeleteImageStream(t *testing.T) { fakeEtcdClient.Data["/imagestreams/default/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStream{ - ObjectMeta: kapi.ObjectMeta{Name: "foo", Namespace: "default"}, - }), + Value: runtime.EncodeOrDie(latest.Codec, validNewStream("default")), ModifiedIndex: 2, }, }, } - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, finalizeStorage := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) obj, err := storage.Delete(kapi.NewDefaultContext(), "foo", nil) if err != nil { t.Fatalf("Unexpected non-nil error: %#v", err) } - status, ok := obj.(*kapi.Status) + stream, ok := obj.(*api.ImageStream) if !ok { - t.Fatalf("Expected status, got %#v", obj) + t.Fatalf("Expected image stream, got %#v", obj) } - if status.Status != kapi.StatusSuccess { - t.Errorf("Expected status=success, got %#v", status) + + // second delete without finalization shall result in conflict + obj, err = storage.Delete(kapi.NewDefaultContext(), "foo", nil) + if obj != nil { + t.Error("Unexpected non-nil obj") + } + if err == nil { + t.Fatalf("Unexpected nil error") + } + if !errors.IsConflict(err) { + t.Fatalf("Expected conflict error, got: %#v", err) + } + + // finalize the image stream + if util.ImageStreamFinalized(stream) { + t.Fatalf("Expected image stream to have incomplete finalizers") + } + stream.Spec.Finalizers = nil + obj, _, err = finalizeStorage.Update(kapi.NewDefaultContext(), stream) + if obj == nil { + t.Error("Unexpected nil obj") + } + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + stream, ok = obj.(*api.ImageStream) + if !ok { + t.Fatalf("Expected image stream type, got: %#v", obj) + } + if !util.ImageStreamFinalized(stream) { + t.Fatalf("Expected image stream to be finalized") + } + + // delete after finalization shall succeed + obj, err = storage.Delete(kapi.NewDefaultContext(), "foo", nil) + if obj == nil { + t.Error("Unexpected nil obj") + } + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + _, ok = obj.(*api.ImageStream) + if !ok { + t.Fatalf("Expected image stream type, got: %#v", obj) + } + if len(fakeEtcdClient.DeletedKeys) != 1 { + t.Errorf("Expected 1 delete, found %#v", fakeEtcdClient.DeletedKeys) + } else if key := "/imagestreams/default/foo"; fakeEtcdClient.DeletedKeys[0] != key { + t.Errorf("Unexpected key: %s, expected %s", fakeEtcdClient.DeletedKeys[0], key) } } @@ -536,7 +625,7 @@ func TestUpdateImageStreamConflictingNamespace(t *testing.T) { }, }, } - storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "legal-name"), &fakeUser{}) obj, created, err := storage.Update(ctx, &api.ImageStream{ @@ -561,6 +650,56 @@ func checkExpectedNamespaceError(t *testing.T, err error) { } +func TestDeleteImageStreamWithIncompleteFinalizers(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + key := etcdtest.AddPrefix("imagestreams/foo") + ctx := kapi.NewContext() + now := kutil.Now() + stream := &api.ImageStream{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: "some-value", + Name: "foo", + DeletionTimestamp: &now, + }, + Spec: api.ImageStreamSpec{ + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + }, + Status: api.ImageStreamStatus{Phase: api.ImageStreamTerminating}, + } + if _, err := fakeEtcdClient.Set(key, runtime.EncodeOrDie(latest.Codec, stream), 0); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := storage.Delete(ctx, "foo", nil); err == nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestDeleteImageStreamWithCompleteFinalizers(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + storage, _, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + key := etcdtest.AddPrefix("imagestreams/default/foo") + ctx := kapi.NewDefaultContext() + now := kutil.Now() + stream := &api.ImageStream{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: "default", + Name: "foo", + DeletionTimestamp: &now, + }, + Spec: api.ImageStreamSpec{ + Finalizers: []kapi.FinalizerName{}, + }, + Status: api.ImageStreamStatus{Phase: api.ImageStreamTerminating}, + } + if _, err := fakeEtcdClient.Set(key, runtime.EncodeOrDie(latest.Codec, stream), 0); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := storage.Delete(ctx, "foo", nil); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + /* func TestEtcdListImagesStreamsEmpty(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) @@ -1069,8 +1208,8 @@ func (fakeStrategy) PrepareForUpdate(obj, old runtime.Object) { func TestStrategyPrepareMethods(t *testing.T) { _, helper := newHelper(t) - storage, _ := NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) - stream := validNewStream() + storage, _, _ := NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + stream := validNewStream("default") strategy := fakeStrategy{imagestream.NewStrategy(testDefaultRegistry, &fakeSubjectAccessReviewRegistry{})} storage.store.CreateStrategy = strategy diff --git a/pkg/image/registry/imagestream/registry.go b/pkg/image/registry/imagestream/registry.go index 5a024edcba17..5249c3305b53 100644 --- a/pkg/image/registry/imagestream/registry.go +++ b/pkg/image/registry/imagestream/registry.go @@ -27,6 +27,8 @@ type Registry interface { DeleteImageStream(ctx kapi.Context, id string) (*kapi.Status, error) // WatchImageStreams watches for new/changed/deleted image streams. WatchImageStreams(ctx kapi.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) + // FinalizeImageStream updates image stream's finalizers + FinalizeImageStream(ctx kapi.Context, repo *api.ImageStream) (*api.ImageStream, error) } // Storage is an interface for a standard REST Storage backend @@ -43,13 +45,14 @@ type Storage interface { // storage puts strong typing around storage calls type storage struct { Storage - status rest.Updater + status rest.Updater + finalizer rest.Updater } // NewRegistry returns a new Registry interface for the given Storage. Any mismatched // types will panic. -func NewRegistry(s Storage, status rest.Updater) Registry { - return &storage{s, status} +func NewRegistry(s Storage, status rest.Updater, finalizer rest.Updater) Registry { + return &storage{s, status, finalizer} } func (s *storage) ListImageStreams(ctx kapi.Context, label labels.Selector) (*api.ImageStreamList, error) { @@ -103,3 +106,11 @@ func (s *storage) DeleteImageStream(ctx kapi.Context, imageStreamID string) (*ka func (s *storage) WatchImageStreams(ctx kapi.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { return s.Watch(ctx, label, field, resourceVersion) } + +func (s *storage) FinalizeImageStream(ctx kapi.Context, imageStream *api.ImageStream) (*api.ImageStream, error) { + obj, _, err := s.finalizer.Update(ctx, imageStream) + if err != nil { + return nil, err + } + return obj.(*api.ImageStream), nil +} diff --git a/pkg/image/registry/imagestream/strategy.go b/pkg/image/registry/imagestream/strategy.go index e17ef87c82c8..7164be7e2aea 100644 --- a/pkg/image/registry/imagestream/strategy.go +++ b/pkg/image/registry/imagestream/strategy.go @@ -58,7 +58,8 @@ func (s Strategy) PrepareForCreate(obj runtime.Object) { stream := obj.(*api.ImageStream) stream.Status = api.ImageStreamStatus{ DockerImageRepository: s.dockerImageRepository(stream), - Tags: make(map[string]api.TagEventList), + Tags: make(map[string]api.TagEventList), + Phase: api.ImageStreamAvailable, } } @@ -335,6 +336,7 @@ func (s Strategy) PrepareForUpdate(obj, old runtime.Object) { stream.Status = oldStream.Status stream.Status.DockerImageRepository = s.dockerImageRepository(stream) + stream.Spec.Finalizers = oldStream.Spec.Finalizers } // ValidateUpdate is the default update validation for an end user. @@ -373,6 +375,9 @@ func NewStatusStrategy(strategy Strategy) StatusStrategy { } func (StatusStrategy) PrepareForUpdate(obj, old runtime.Object) { + newImageStream := obj.(*api.ImageStream) + oldImageStream := obj.(*api.ImageStream) + newImageStream.Spec = oldImageStream.Spec } func (StatusStrategy) AllowUnconditionalUpdate() bool { @@ -384,6 +389,27 @@ func (StatusStrategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) return validation.ValidateImageStreamStatusUpdate(obj.(*api.ImageStream), old.(*api.ImageStream)) } +type FinalizeStrategy struct { + Strategy +} + +func NewFinalizeStrategy(strategy Strategy) FinalizeStrategy { + return FinalizeStrategy{strategy} +} + +func (FinalizeStrategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { + return validation.ValidateImageStreamFinalizeUpdate(obj.(*api.ImageStream), old.(*api.ImageStream)) +} + +// PrepareForUpdate clears fields that are not allowed to be set by end users on update. +func (FinalizeStrategy) PrepareForUpdate(obj, old runtime.Object) { + newStream := obj.(*api.ImageStream) + oldStream := old.(*api.ImageStream) + newStream.Status = oldStream.Status + newStream.Spec.DockerImageRepository = oldStream.Spec.DockerImageRepository + newStream.Spec.Tags = oldStream.Spec.Tags +} + // MatchImageStream returns a generic matcher for a given label and field selector. func MatchImageStream(label labels.Selector, field fields.Selector) generic.Matcher { return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { @@ -402,6 +428,7 @@ func ImageStreamToSelectableFields(ir *api.ImageStream) labels.Set { "metadata.name": ir.Name, "spec.dockerImageRepository": ir.Spec.DockerImageRepository, "status.dockerImageRepository": ir.Status.DockerImageRepository, + "status.phase": ir.Status.Phase, } } diff --git a/pkg/image/registry/imagestreamdeletion/etcd/etcd.go b/pkg/image/registry/imagestreamdeletion/etcd/etcd.go new file mode 100644 index 000000000000..6d7e203c92f4 --- /dev/null +++ b/pkg/image/registry/imagestreamdeletion/etcd/etcd.go @@ -0,0 +1,77 @@ +package etcd + +import ( + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + + "github.com/openshift/origin/pkg/image/api" + "github.com/openshift/origin/pkg/image/registry/imagestreamdeletion" +) + +// REST implements a RESTStorage for image stream deletions against etcd. +type REST struct { + store *etcdgeneric.Etcd +} + +// NewREST returns a new REST. +func NewREST(s storage.Interface) *REST { + prefix := "/imagestreamdeletions" + store := &etcdgeneric.Etcd{ + NewFunc: func() runtime.Object { return &api.ImageStreamDeletion{} }, + NewListFunc: func() runtime.Object { return &api.ImageStreamDeletionList{} }, + KeyRootFunc: func(ctx kapi.Context) string { + // image stream deletions are not namespace scoped + return prefix + }, + KeyFunc: func(ctx kapi.Context, name string) (string, error) { + // image stream deletions are not namespace scoped + return prefix + "/" + name, nil + }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*api.ImageStreamDeletion).Name, nil + }, + EndpointName: "imageStreamDeletion", + + ReturnDeletedObject: false, + + CreateStrategy: imagestreamdeletion.Strategy, + + Storage: s, + } + + return &REST{store: store} +} + +// New returns a new object +func (r *REST) New() runtime.Object { + return r.store.NewFunc() +} + +// NewList returns a new list object +func (r *REST) NewList() runtime.Object { + return r.store.NewListFunc() +} + +// List obtains a list of image stream deletions with labels that match selector. +func (r *REST) List(ctx kapi.Context, label labels.Selector, field fields.Selector) (runtime.Object, error) { + return r.store.ListPredicate(ctx, imagestreamdeletion.MatchImageStreamDeletion(label, field)) +} + +// Get gets a specific image stream deletion specified by its ID. +func (r *REST) Get(ctx kapi.Context, name string) (runtime.Object, error) { + return r.store.Get(ctx, name) +} + +// Create creates an image stream deletion based on a specification. +func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { + return r.store.Create(ctx, obj) +} + +// Delete deletes an existing image stream deletion specified by its ID. +func (r *REST) Delete(ctx kapi.Context, name string, options *kapi.DeleteOptions) (runtime.Object, error) { + return r.store.Delete(ctx, name, options) +} diff --git a/pkg/image/registry/imagestreamdeletion/etcd/etcd_test.go b/pkg/image/registry/imagestreamdeletion/etcd/etcd_test.go new file mode 100644 index 000000000000..e8139072bb16 --- /dev/null +++ b/pkg/image/registry/imagestreamdeletion/etcd/etcd_test.go @@ -0,0 +1,206 @@ +package etcd + +import ( + "fmt" + "testing" + + "github.com/coreos/go-etcd/etcd" + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/tools" + "k8s.io/kubernetes/pkg/tools/etcdtest" + + "github.com/openshift/origin/pkg/api/latest" + "github.com/openshift/origin/pkg/image/api" +) + +func newHelper(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { + fakeEtcdClient := tools.NewFakeEtcdClient(t) + fakeEtcdClient.TestIndex = true + helper := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + return fakeEtcdClient, helper +} + +func validNewDeletion() *api.ImageStreamDeletion { + return &api.ImageStreamDeletion{ + ObjectMeta: kapi.ObjectMeta{ + Name: "ns:foo", + }, + } +} + +func TestCreate(t *testing.T) { + _, helper := newHelper(t) + storage := NewREST(helper) + deletion := validNewDeletion() + _, err := storage.Create(kapi.NewContext(), deletion) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestGetImageStreamDeletionError(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + fakeEtcdClient.Err = fmt.Errorf("ns:foo") + storage := NewREST(helper) + + deletion, err := storage.Get(kapi.NewContext(), "deletion") + if deletion != nil { + t.Errorf("Unexpected non-nil image stream deletion: %#v", deletion) + } + if err != fakeEtcdClient.Err { + t.Errorf("Expected %#v, got %#v", fakeEtcdClient.Err, err) + } +} + +func TestGetImageStreamDeletionOK(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + storage := NewREST(helper) + + ctx := kapi.NewContext() + deletionName := "ns:foo" + key, _ := storage.store.KeyFunc(ctx, deletionName) + fakeEtcdClient.Set(key, runtime.EncodeOrDie(latest.Codec, &api.ImageStreamDeletion{ObjectMeta: kapi.ObjectMeta{Name: deletionName}}), 0) + + obj, err := storage.Get(kapi.NewContext(), deletionName) + if obj == nil { + t.Fatalf("Unexpected nil deletion") + } + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + deletion := obj.(*api.ImageStreamDeletion) + if e, a := deletionName, deletion.Name; e != a { + t.Errorf("Expected %#v, got %#v", e, a) + } +} + +func TestListImageStreamDeletionssError(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + fakeEtcdClient.Err = fmt.Errorf("ns:foo") + storage := NewREST(helper) + + deletions, err := storage.List(kapi.NewContext(), nil, nil) + if err != fakeEtcdClient.Err { + t.Errorf("Expected %#v, Got %#v", fakeEtcdClient.Err, err) + } + + if deletions != nil { + t.Errorf("Unexpected non-nil imageStreamDeletions list: %#v", deletions) + } +} + +func TestListImageStreamDeletionssEmptyList(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + fakeEtcdClient.ChangeIndex = 1 + fakeEtcdClient.Data["/imagestreamdeletions"] = tools.EtcdResponseWithError{ + R: &etcd.Response{}, + E: fakeEtcdClient.NewError(tools.EtcdErrorCodeNotFound), + } + storage := NewREST(helper) + + deletions, err := storage.List(kapi.NewContext(), labels.Everything(), fields.Everything()) + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + if len(deletions.(*api.ImageStreamDeletionList).Items) != 0 { + t.Errorf("Unexpected non-zero imageStreamDeletions list: %#v", deletions) + } + if deletions.(*api.ImageStreamDeletionList).ResourceVersion != "1" { + t.Errorf("Unexpected resource version: %#v", deletions) + } +} + +func TestListImageStreamDeletionsPopulatedList(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + storage := NewREST(helper) + + fakeEtcdClient.Data["/imagestreamdeletions"] = tools.EtcdResponseWithError{ + R: &etcd.Response{ + Node: &etcd.Node{ + Nodes: []*etcd.Node{ + {Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStreamDeletion{ObjectMeta: kapi.ObjectMeta{Name: "ns:foo"}})}, + {Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStreamDeletion{ObjectMeta: kapi.ObjectMeta{Name: "ns:bar"}})}, + }, + }, + }, + } + + list, err := storage.List(kapi.NewContext(), labels.Everything(), fields.Everything()) + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + + deletions := list.(*api.ImageStreamDeletionList) + + if e, a := 2, len(deletions.Items); e != a { + t.Errorf("Expected %v, got %v", e, a) + } +} + +func TestCreateImageStreamDeletionOK(t *testing.T) { + _, helper := newHelper(t) + storage := NewREST(helper) + + stream := &api.ImageStreamDeletion{ObjectMeta: kapi.ObjectMeta{Name: "ns:foo"}} + _, err := storage.Create(kapi.NewContext(), stream) + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + + actual := &api.ImageStreamDeletion{} + if err := helper.Get("/imagestreamdeletions/ns:foo", actual, false); err != nil { + t.Fatalf("unexpected extraction error: %v", err) + } + if actual.Name != stream.Name { + t.Errorf("unexpected stream: %#v", actual) + } + if stream.CreationTimestamp.IsZero() { + t.Error("Unexpected zero CreationTimestamp") + } +} + +func TestCreateRegistryErrorSaving(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + fakeEtcdClient.Err = fmt.Errorf("foo") + storage := NewREST(helper) + + _, err := storage.Create(kapi.NewContext(), &api.ImageStreamDeletion{ObjectMeta: kapi.ObjectMeta{Name: "ns:foo"}}) + if err != fakeEtcdClient.Err { + t.Fatalf("Unexpected non-nil error: %#v", err) + } +} + +func TestDeleteImageStreamDeletion(t *testing.T) { + fakeEtcdClient, helper := newHelper(t) + fakeEtcdClient.Data["/imagestreamdeletions/ns:foo"] = tools.EtcdResponseWithError{ + R: &etcd.Response{ + Node: &etcd.Node{ + Value: runtime.EncodeOrDie(latest.Codec, validNewDeletion()), + ModifiedIndex: 2, + }, + }, + } + storage := NewREST(helper) + + obj, err := storage.Delete(kapi.NewContext(), "ns:foo", nil) + if err != nil { + t.Fatalf("Unexpected non-nil error: %#v", err) + } + status, ok := obj.(*kapi.Status) + if !ok { + t.Fatalf("Expected status type, got: %#v", obj) + } + if status.Status != kapi.StatusSuccess { + t.Errorf("Expected status=success, got: %#v", status) + } + if len(fakeEtcdClient.DeletedKeys) != 1 { + t.Errorf("Expected 1 delete, found %#v", fakeEtcdClient.DeletedKeys) + } else if key := "/imagestreamdeletions/ns:foo"; fakeEtcdClient.DeletedKeys[0] != key { + t.Errorf("Unexpected key: %s, expected %s", fakeEtcdClient.DeletedKeys[0], key) + } +} diff --git a/pkg/image/registry/imagestreamdeletion/registry.go b/pkg/image/registry/imagestreamdeletion/registry.go new file mode 100644 index 000000000000..9a30746ef1b3 --- /dev/null +++ b/pkg/image/registry/imagestreamdeletion/registry.go @@ -0,0 +1,71 @@ +package imagestreamdeletion + +import ( + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + "github.com/openshift/origin/pkg/image/api" +) + +// Registry is an interface for things that know how to store ImageStreamImageDeletion objects. +type Registry interface { + ListImageStreamDeletions(ctx kapi.Context, selector labels.Selector) (*api.ImageStreamDeletionList, error) + GetImageStreamDeletion(ctx kapi.Context, id string) (*api.ImageStreamDeletion, error) + CreateImageStreamDeletion(ctx kapi.Context, repo *api.ImageStreamDeletion) (*api.ImageStreamDeletion, error) + DeleteImageStreamDeletion(ctx kapi.Context, id string) (*kapi.Status, error) +} + +// Storage is an interface for a standard REST Storage backend +type Storage interface { + rest.GracefulDeleter + rest.Lister + rest.Getter + rest.Creater +} + +// storage puts strong typing around storage calls +type storage struct { + Storage +} + +var _ Storage = &storage{} + +// NewRegistry returns a new Registry interface for the given Storage. Any mismatched +// types will panic. +func NewRegistry(s Storage) Registry { + return &storage{s} +} + +func (s *storage) ListImageStreamDeletions(ctx kapi.Context, label labels.Selector) (*api.ImageStreamDeletionList, error) { + obj, err := s.List(ctx, label, fields.Everything()) + if err != nil { + return nil, err + } + return obj.(*api.ImageStreamDeletionList), nil +} + +func (s *storage) GetImageStreamDeletion(ctx kapi.Context, imageStreamID string) (*api.ImageStreamDeletion, error) { + obj, err := s.Get(ctx, imageStreamID) + if err != nil { + return nil, err + } + return obj.(*api.ImageStreamDeletion), nil +} + +func (s *storage) CreateImageStreamDeletion(ctx kapi.Context, imageStream *api.ImageStreamDeletion) (*api.ImageStreamDeletion, error) { + obj, err := s.Create(ctx, imageStream) + if err != nil { + return nil, err + } + return obj.(*api.ImageStreamDeletion), nil +} + +func (s *storage) DeleteImageStreamDeletion(ctx kapi.Context, imageStreamID string) (*kapi.Status, error) { + obj, err := s.Delete(ctx, imageStreamID, nil) + if err != nil { + return nil, err + } + return obj.(*kapi.Status), nil +} diff --git a/pkg/image/registry/imagestreamdeletion/strategy.go b/pkg/image/registry/imagestreamdeletion/strategy.go new file mode 100644 index 000000000000..f589cfc9e750 --- /dev/null +++ b/pkg/image/registry/imagestreamdeletion/strategy.go @@ -0,0 +1,57 @@ +package imagestreamdeletion + +import ( + "fmt" + + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/fielderrors" + + "github.com/openshift/origin/pkg/image/api" + "github.com/openshift/origin/pkg/image/api/validation" +) + +// Strategy implements behavior for ImageStreamDeletions. +type imageStreamDeletionStrategy struct { + runtime.ObjectTyper + kapi.NameGenerator +} + +// Strategy is the default logic that applies when creating +// ImageStreamDeletion objects via the REST API. +var Strategy = imageStreamDeletionStrategy{kapi.Scheme, kapi.SimpleNameGenerator} + +// NamespaceScoped is false for image stream deletions. +func (s imageStreamDeletionStrategy) NamespaceScoped() bool { + return false +} + +// PrepareForCreate clears fields that are not allowed to be set by end users on creation. +func (s imageStreamDeletionStrategy) PrepareForCreate(obj runtime.Object) { +} + +// Validate validates a new image stream deletion. +func (s imageStreamDeletionStrategy) Validate(ctx kapi.Context, obj runtime.Object) fielderrors.ValidationErrorList { + deletion := obj.(*api.ImageStreamDeletion) + return validation.ValidateImageStreamDeletion(deletion) +} + +// MatchImageStreamDeletion returns a generic matcher for a given label and field selector. +func MatchImageStreamDeletion(label labels.Selector, field fields.Selector) generic.Matcher { + return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { + ir, ok := obj.(*api.ImageStreamDeletion) + if !ok { + return false, fmt.Errorf("not an ImageStreamDeletion") + } + fields := ImageStreamDeletionToSelectableFields(ir) + return label.Matches(labels.Set(ir.Labels)) && field.Matches(fields), nil + }) +} + +// ImageStreamDeletionToSelectableFields returns a label set that represents the object. +func ImageStreamDeletionToSelectableFields(deletion *api.ImageStreamDeletion) labels.Set { + return labels.Set{"metadata.name": deletion.Name} +} diff --git a/pkg/image/registry/imagestreamimage/registry.go b/pkg/image/registry/imagestreamimage/registry.go index 7eb83cc68b7b..8956ba565403 100644 --- a/pkg/image/registry/imagestreamimage/registry.go +++ b/pkg/image/registry/imagestreamimage/registry.go @@ -1,19 +1,24 @@ package imagestreamimage import ( - "github.com/openshift/origin/pkg/image/api" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + "github.com/openshift/origin/pkg/image/api" ) // Registry is an interface for things that know how to store ImageStreamImage objects. type Registry interface { GetImageStreamImage(ctx kapi.Context, nameAndTag string) (*api.ImageStreamImage, error) + ListImageStreamImages(ctx kapi.Context, label labels.Selector, field fields.Selector) (*api.ImageStreamImageList, error) } // Storage is an interface for a standard REST Storage backend type Storage interface { rest.Getter + rest.Lister } // storage puts strong typing around storage calls @@ -34,3 +39,11 @@ func (s *storage) GetImageStreamImage(ctx kapi.Context, name string) (*api.Image } return obj.(*api.ImageStreamImage), nil } + +func (s *storage) ListImageStreamImages(ctx kapi.Context, label labels.Selector, field fields.Selector) (*api.ImageStreamImageList, error) { + obj, err := s.List(ctx, label, field) + if err != nil { + return nil, err + } + return obj.(*api.ImageStreamImageList), nil +} diff --git a/pkg/image/registry/imagestreamimage/rest.go b/pkg/image/registry/imagestreamimage/rest.go index 80a31649760e..c98555276a6b 100644 --- a/pkg/image/registry/imagestreamimage/rest.go +++ b/pkg/image/registry/imagestreamimage/rest.go @@ -5,9 +5,12 @@ import ( "strings" "github.com/docker/distribution/digest" - + "github.com/golang/glog" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" "k8s.io/kubernetes/pkg/runtime" "github.com/openshift/origin/pkg/image/api" @@ -103,3 +106,80 @@ func (r *REST) Get(ctx kapi.Context, id string) (runtime.Object, error) { return nil, errors.NewConflict("imageStreamImage", imageID, fmt.Errorf("multiple images match the prefix %q: %s", imageID, strings.Join(set.List(), ", "))) } } + +// NewList returns a new list object +func (r *REST) NewList() runtime.Object { + return &api.ImageStreamImageList{} +} + +func (r *REST) List(ctx kapi.Context, label labels.Selector, field fields.Selector) (runtime.Object, error) { + repos, err := r.imageStreamRegistry.ListImageStreams(ctx, labels.Everything()) + if err != nil { + return nil, err + } + + list := api.ImageStreamImageList{} + for _, repo := range repos.Items { + if repo.Status.Tags == nil { + continue + } + + for _, history := range repo.Status.Tags { + for _, tagging := range history.Items { + imageName := tagging.Image + image, err := r.imageRegistry.GetImage(ctx, imageName) + if err != nil { + glog.V(4).Infof("Failed to get image %q of image stream %s: %v", imageName, repo.Name, err) + continue + } + imageWithMetadata, err := api.ImageWithMetadata(*image) + if err != nil { + glog.V(4).Infof("Failed to get metadata of image %q of image stream %s: %v", imageName, repo.Name, err) + continue + } + + if d, err := digest.ParseDigest(imageName); err == nil { + imageName = d.Hex() + } + if len(imageName) > 7 { + imageName = imageName[:7] + } + + isi := api.ImageStreamImage{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: repo.Namespace, + Name: fmt.Sprintf("%s@%s", repo.Name, imageName), + }, + Image: *imageWithMetadata, + } + + list.Items = append(list.Items, isi) + } + } + } + + return generic.FilterList(&list, MatchImageStreamImage(label, field), generic.DecoratorFunc(nil)) +} + +// MatchImageStreamImage returns a generic matcher for a given label and field selector. +func MatchImageStreamImage(label labels.Selector, field fields.Selector) generic.Matcher { + return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { + ir, ok := obj.(*api.ImageStreamImage) + if !ok { + return false, fmt.Errorf("not an ImageStreamImage") + } + fields := ImageStreamImageToSelectableFields(ir) + return label.Matches(labels.Set(ir.Labels)) && field.Matches(fields), nil + }) +} + +// ImageStreamImageToSelectableFields returns a label set that represents the object. +func ImageStreamImageToSelectableFields(ir *api.ImageStreamImage) labels.Set { + nameParts := strings.Split(ir.Name, "@") + return labels.Set{ + "metadata.name": ir.Name, + "imagestream.name": nameParts[0], + "image.name": ir.Image.Name, + "image.status.phase": ir.Image.Status.Phase, + } +} diff --git a/pkg/image/registry/imagestreamimage/rest_test.go b/pkg/image/registry/imagestreamimage/rest_test.go index 38a8eb68def7..a07874db99fa 100644 --- a/pkg/image/registry/imagestreamimage/rest_test.go +++ b/pkg/image/registry/imagestreamimage/rest_test.go @@ -4,6 +4,15 @@ import ( "testing" "github.com/coreos/go-etcd/etcd" + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + kstorage "k8s.io/kubernetes/pkg/storage" + etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/tools" + "k8s.io/kubernetes/pkg/tools/etcdtest" + "github.com/openshift/origin/pkg/api/latest" authorizationapi "github.com/openshift/origin/pkg/authorization/api" "github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview" @@ -12,15 +21,72 @@ import ( imageetcd "github.com/openshift/origin/pkg/image/registry/image/etcd" "github.com/openshift/origin/pkg/image/registry/imagestream" imagestreametcd "github.com/openshift/origin/pkg/image/registry/imagestream/etcd" - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - kstorage "k8s.io/kubernetes/pkg/storage" - etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" - "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" ) -var testDefaultRegistry = imagestream.DefaultRegistryFunc(func() (string, bool) { return "defaultregistry:5000", true }) +var ( + testDefaultRegistry = imagestream.DefaultRegistryFunc(func() (string, bool) { return "defaultregistry:5000", true }) + testImage = &api.Image{ + ObjectMeta: kapi.ObjectMeta{ + Name: "id", + }, + DockerImageManifest: `{ + "name": "library/ubuntu", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + { + "blobSum": "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0" + }, + { + "blobSum": "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171" + }, + { + "blobSum": "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd" + }, + { + "blobSum": "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7\",\"parent\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"created\":\"2015-02-21T02:11:06.735146646Z\",\"container\":\"c9a3eda5951d28aa8dbe5933be94c523790721e4f80886d0a8e7a710132a38ec\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/bin/bash]\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"parent\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"created\":\"2015-02-21T02:11:02.473113442Z\",\"container\":\"b4d4c42c196081cdb8e032446e9db92c0ce8ddeeeb1ef4e582b0275ad62b9af2\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"sed -i 's/^#\\\\s*\\\\(deb.*universe\\\\)$/\\\\1/g' /etc/apt/sources.list\"],\"Image\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0\",\"Size\":1895}\n" + }, + { + "v1Compatibility": "{\"id\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"parent\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"created\":\"2015-02-21T02:10:56.648624065Z\",\"container\":\"d6323d4f92bf13dd2a7d2957e8970c8dc8ba3c2df08ffebdf4a04b4b658c83fb\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"echo '#!/bin/sh' \\u003e /usr/sbin/policy-rc.d \\u0009\\u0026\\u0026 echo 'exit 101' \\u003e\\u003e /usr/sbin/policy-rc.d \\u0009\\u0026\\u0026 chmod +x /usr/sbin/policy-rc.d \\u0009\\u0009\\u0026\\u0026 dpkg-divert --local --rename --add /sbin/initctl \\u0009\\u0026\\u0026 cp -a /usr/sbin/policy-rc.d /sbin/initctl \\u0009\\u0026\\u0026 sed -i 's/^exit.*/exit 0/' /sbin/initctl \\u0009\\u0009\\u0026\\u0026 echo 'force-unsafe-io' \\u003e /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \\u0009\\u0009\\u0026\\u0026 echo 'DPkg::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e /etc/apt/apt.conf.d/docker-clean \\u0009\\u0026\\u0026 echo 'APT::Update::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\u0009\\u0026\\u0026 echo 'Dir::Cache::pkgcache \\\"\\\"; Dir::Cache::srcpkgcache \\\"\\\";' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\u0009\\u0009\\u0026\\u0026 echo 'Acquire::Languages \\\"none\\\";' \\u003e /etc/apt/apt.conf.d/docker-no-languages \\u0009\\u0009\\u0026\\u0026 echo 'Acquire::GzipIndexes \\\"true\\\"; Acquire::CompressionTypes::Order:: \\\"gz\\\";' \\u003e /etc/apt/apt.conf.d/docker-gzip-indexes\"],\"Image\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171\",\"Size\":194533}\n" + }, + { + "v1Compatibility": "{\"id\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"parent\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"created\":\"2015-02-21T02:10:48.627646094Z\",\"container\":\"43bd710ec89aa1d61685c5ddb8737b315f2152d04c9e127ea2a9cf79f950fa5d\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:0018ff77d038472f52512fd66ae2466e6325e5d91289e10a6b324f40582298df in /\"],\"Image\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd\",\"Size\":188097705}\n" + }, + { + "v1Compatibility": "{\"id\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"comment\":\"Imported from -\",\"created\":\"2013-06-13T14:03:50.821769-07:00\",\"container_config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null},\"docker_version\":\"0.4.0\",\"architecture\":\"x86_64\",\"checksum\":\"tarsum.dev+sha256:324d4cf44ee7daa46266c1df830c61a7df615c0632176a339e7310e34723d67a\",\"Size\":0}\n" + } + ], + "schemaVersion": 1, + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "JafVC022gLJbK8fEp9UrW-GWAi53nD4Xf0-fmb766nV6zGTq7g9RutSWHcpv3OqhV8t5b-4j-bXhvnGBjXA7Yg", + "protected": "eyJmb3JtYXRMZW5ndGgiOjEwMDc2LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTUtMDMtMDdUMTI6Mzk6MjJaIn0" + } + ] +}`, + } +) type fakeSubjectAccessReviewRegistry struct { } @@ -35,10 +101,10 @@ func setup(t *testing.T) (*tools.FakeEtcdClient, kstorage.Interface, *REST) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true helper := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) - imageStorage := imageetcd.NewREST(helper) - imageRegistry := image.NewRegistry(imageStorage) - imageStreamStorage, imageStreamStatus := imagestreametcd.NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) - imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus) + imageStorage, imageStatusStorage, imageFinalizeStorage := imageetcd.NewREST(helper) + imageRegistry := image.NewRegistry(imageStorage, imageStatusStorage, imageFinalizeStorage) + imageStreamStorage, imageStreamStatus, imageStreamFinalize := imagestreametcd.NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus, imageStreamFinalize) storage := NewREST(imageRegistry, imageStreamRegistry) return fakeEtcdClient, helper, storage } @@ -167,67 +233,7 @@ func TestGet(t *testing.T) { }, }, }, - image: &api.Image{ - ObjectMeta: kapi.ObjectMeta{ - Name: "id", - }, - DockerImageManifest: `{ - "name": "library/ubuntu", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - }, - { - "blobSum": "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0" - }, - { - "blobSum": "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171" - }, - { - "blobSum": "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd" - }, - { - "blobSum": "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7\",\"parent\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"created\":\"2015-02-21T02:11:06.735146646Z\",\"container\":\"c9a3eda5951d28aa8dbe5933be94c523790721e4f80886d0a8e7a710132a38ec\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/bin/bash]\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"parent\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"created\":\"2015-02-21T02:11:02.473113442Z\",\"container\":\"b4d4c42c196081cdb8e032446e9db92c0ce8ddeeeb1ef4e582b0275ad62b9af2\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"sed -i 's/^#\\\\s*\\\\(deb.*universe\\\\)$/\\\\1/g' /etc/apt/sources.list\"],\"Image\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0\",\"Size\":1895}\n" - }, - { - "v1Compatibility": "{\"id\":\"1c8294cc516082dfbb731f062806b76b82679ce38864dd87635f08869c993e45\",\"parent\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"created\":\"2015-02-21T02:10:56.648624065Z\",\"container\":\"d6323d4f92bf13dd2a7d2957e8970c8dc8ba3c2df08ffebdf4a04b4b658c83fb\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"echo '#!/bin/sh' \\u003e /usr/sbin/policy-rc.d \\u0009\\u0026\\u0026 echo 'exit 101' \\u003e\\u003e /usr/sbin/policy-rc.d \\u0009\\u0026\\u0026 chmod +x /usr/sbin/policy-rc.d \\u0009\\u0009\\u0026\\u0026 dpkg-divert --local --rename --add /sbin/initctl \\u0009\\u0026\\u0026 cp -a /usr/sbin/policy-rc.d /sbin/initctl \\u0009\\u0026\\u0026 sed -i 's/^exit.*/exit 0/' /sbin/initctl \\u0009\\u0009\\u0026\\u0026 echo 'force-unsafe-io' \\u003e /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \\u0009\\u0009\\u0026\\u0026 echo 'DPkg::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e /etc/apt/apt.conf.d/docker-clean \\u0009\\u0026\\u0026 echo 'APT::Update::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\u0009\\u0026\\u0026 echo 'Dir::Cache::pkgcache \\\"\\\"; Dir::Cache::srcpkgcache \\\"\\\";' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\u0009\\u0009\\u0026\\u0026 echo 'Acquire::Languages \\\"none\\\";' \\u003e /etc/apt/apt.conf.d/docker-no-languages \\u0009\\u0009\\u0026\\u0026 echo 'Acquire::GzipIndexes \\\"true\\\"; Acquire::CompressionTypes::Order:: \\\"gz\\\";' \\u003e /etc/apt/apt.conf.d/docker-gzip-indexes\"],\"Image\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171\",\"Size\":194533}\n" - }, - { - "v1Compatibility": "{\"id\":\"fa4fd76b09ce9b87bfdc96515f9a5dd5121c01cc996cf5379050d8e13d4a864b\",\"parent\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"created\":\"2015-02-21T02:10:48.627646094Z\",\"container\":\"43bd710ec89aa1d61685c5ddb8737b315f2152d04c9e127ea2a9cf79f950fa5d\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:0018ff77d038472f52512fd66ae2466e6325e5d91289e10a6b324f40582298df in /\"],\"Image\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd\",\"Size\":188097705}\n" - }, - { - "v1Compatibility": "{\"id\":\"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158\",\"comment\":\"Imported from -\",\"created\":\"2013-06-13T14:03:50.821769-07:00\",\"container_config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null},\"docker_version\":\"0.4.0\",\"architecture\":\"x86_64\",\"checksum\":\"tarsum.dev+sha256:324d4cf44ee7daa46266c1df830c61a7df615c0632176a339e7310e34723d67a\",\"Size\":0}\n" - } - ], - "schemaVersion": 1, - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "JafVC022gLJbK8fEp9UrW-GWAi53nD4Xf0-fmb766nV6zGTq7g9RutSWHcpv3OqhV8t5b-4j-bXhvnGBjXA7Yg", - "protected": "eyJmb3JtYXRMZW5ndGgiOjEwMDc2LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTUtMDMtMDdUMTI6Mzk6MjJaIn0" - } - ] -}`, - }, + image: testImage, }, } @@ -293,3 +299,103 @@ func TestGet(t *testing.T) { } } } + +func TestList(t *testing.T) { + fakeEtcdClient, _, storage := setup(t) + + fakeEtcdClient.Data["/imagestreams/default"] = tools.EtcdResponseWithError{ + R: &etcd.Response{ + Node: &etcd.Node{ + Nodes: []*etcd.Node{ + {Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStream{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: "ns", + Name: "reponoimage", + }, + Status: api.ImageStreamStatus{ + Tags: map[string]api.TagEventList{ + "latest": { + Items: []api.TagEvent{ + {Image: "anotherid"}, + }, + }, + }, + }, + }), + }, + + {Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStream{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: "ns", + Name: "repo", + }, + Status: api.ImageStreamStatus{ + Tags: map[string]api.TagEventList{ + "latest": { + Items: []api.TagEvent{ + {Image: "anotherid"}, + {Image: "anotherid2"}, + {Image: "id"}, + }, + }, + }, + }, + }), + }, + }, + }, + }, + } + + fakeEtcdClient.Data["/images/id"] = tools.EtcdResponseWithError{ + R: &etcd.Response{ + Node: &etcd.Node{ + Value: runtime.EncodeOrDie(latest.Codec, testImage), + }, + }, + } + + fakeEtcdClient.Data["/images/unreferenced"] = tools.EtcdResponseWithError{ + R: &etcd.Response{ + Node: &etcd.Node{ + Value: runtime.EncodeOrDie(latest.Codec, &api.Image{ + ObjectMeta: kapi.ObjectMeta{ + Name: "unreferenced", + }, + DockerImageManifest: `{ + "name": "foo/bar", + "tag": "1.2.3", + "architecture": "amd64", + "fsLayers": [], + "history": [], + "schemaVersion": 1, + "signatures": [], + }`, + }, + )}, + }, + } + + list, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything()) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + isiList := list.(*api.ImageStreamImageList) + if len(isiList.Items) != 1 { + t.Fatalf("expected 1 item, got %d", len(isiList.Items)) + } + isi := isiList.Items[0] + // validate a couple of the fields + if isi.Namespace != "ns" { + t.Errorf("got unexpected namespace %q", isi.Namespace, "ns") + } + expectedImageName := "repo@id" + if isi.Name != expectedImageName { + t.Errorf("got unexpected name (%q != %q)", isi.Name, expectedImageName) + } + expectedMID := "2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7" + if isi.Image.DockerImageMetadata.ID != expectedMID { + t.Errorf("got unexpected image id (%q != %q)", isi.Image.DockerImageMetadata.ID, expectedMID) + } +} diff --git a/pkg/image/registry/imagestreammapping/rest_test.go b/pkg/image/registry/imagestreammapping/rest_test.go index 4558eab9e43f..e818f01ea548 100644 --- a/pkg/image/registry/imagestreammapping/rest_test.go +++ b/pkg/image/registry/imagestreammapping/rest_test.go @@ -15,6 +15,7 @@ import ( "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" + oapi "github.com/openshift/origin/pkg/api" "github.com/openshift/origin/pkg/api/latest" authorizationapi "github.com/openshift/origin/pkg/authorization/api" "github.com/openshift/origin/pkg/authorization/registry/subjectaccessreview" @@ -40,22 +41,13 @@ func setup(t *testing.T) (*tools.FakeEtcdClient, kstorage.Interface, *REST) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true helper := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) - imageStorage := imageetcd.NewREST(helper) - imageRegistry := image.NewRegistry(imageStorage) - imageStreamStorage, imageStreamStatus := imagestreametcd.NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) - imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus) + imageRegistry := image.NewRegistry(imageetcd.NewREST(helper)) + imageStreamStorage, imageStreamStatus, imageStreamFinalize := imagestreametcd.NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus, imageStreamFinalize) storage := NewREST(imageRegistry, imageStreamRegistry) return fakeEtcdClient, helper, storage } -func validImageStream() *api.ImageStream { - return &api.ImageStream{ - ObjectMeta: kapi.ObjectMeta{ - Name: "test", - }, - } -} - func validNewMappingWithName() *api.ImageStreamMapping { return &api.ImageStreamMapping{ ObjectMeta: kapi.ObjectMeta{ @@ -77,6 +69,10 @@ func validNewMappingWithName() *api.ImageStreamMapping { WorkingDir: "/workingDir", }, }, + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + Status: api.ImageStatus{ + Phase: api.ImageAvailable, + }, }, Tag: "latest", } @@ -356,8 +352,10 @@ func TestTrackingTags(t *testing.T) { }, }, }, + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, }, Status: api.ImageStreamStatus{ + Phase: api.ImageStreamAvailable, Tags: map[string]api.TagEventList{ "tracking": { Items: []api.TagEvent{ @@ -396,6 +394,10 @@ func TestTrackingTags(t *testing.T) { Name: "5678", }, DockerImageReference: "foo/bar@sha256:5678", + Finalizers: []kapi.FinalizerName{oapi.FinalizerOrigin}, + Status: api.ImageStatus{ + Phase: api.ImageAvailable, + }, } mapping := api.ImageStreamMapping{ diff --git a/pkg/image/registry/imagestreamtag/rest_test.go b/pkg/image/registry/imagestreamtag/rest_test.go index 7f6f95fe8f7d..8e9ecbfda318 100644 --- a/pkg/image/registry/imagestreamtag/rest_test.go +++ b/pkg/image/registry/imagestreamtag/rest_test.go @@ -58,10 +58,9 @@ func setup(t *testing.T) (*tools.FakeEtcdClient, kstorage.Interface, *REST) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true helper := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) - imageStorage := imageetcd.NewREST(helper) - imageRegistry := image.NewRegistry(imageStorage) - imageStreamStorage, imageStreamStatus := imagestreametcd.NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) - imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus) + imageRegistry := image.NewRegistry(imageetcd.NewREST(helper)) + imageStreamStorage, imageStreamStatus, imageStreamFinalize := imagestreametcd.NewREST(helper, testDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) + imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatus, imageStreamFinalize) storage := NewREST(imageRegistry, imageStreamRegistry) return fakeEtcdClient, helper, storage } @@ -272,6 +271,7 @@ func TestGetImageStreamTag(t *testing.T) { } func TestDeleteImageStreamTag(t *testing.T) { + now := util.Now() tests := map[string]struct { repo *api.ImageStream expectError bool @@ -310,8 +310,9 @@ func TestDeleteImageStreamTag(t *testing.T) { "happy path": { repo: &api.ImageStream{ ObjectMeta: kapi.ObjectMeta{ - Namespace: "default", - Name: "test", + Namespace: "default", + Name: "test", + DeletionTimestamp: &now, }, Spec: api.ImageStreamSpec{ Tags: map[string]api.TagReference{ @@ -331,6 +332,7 @@ func TestDeleteImageStreamTag(t *testing.T) { }, Status: api.ImageStreamStatus{ DockerImageRepository: "registry.default.local/default/test", + Phase: api.ImageStreamTerminating, Tags: map[string]api.TagEventList{ "another": { Items: []api.TagEvent{ diff --git a/pkg/image/util/util.go b/pkg/image/util/util.go new file mode 100644 index 000000000000..30b00240821e --- /dev/null +++ b/pkg/image/util/util.go @@ -0,0 +1,131 @@ +package util + +import ( + kapi "k8s.io/kubernetes/pkg/api" + kerrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/util/sets" + + oapi "github.com/openshift/origin/pkg/api" + "github.com/openshift/origin/pkg/client" + "github.com/openshift/origin/pkg/image/api" +) + +// ImageFinalized returns true if the finalizers does not contain the origin finalizer +func ImageFinalized(image *api.Image) bool { + for i := range image.Finalizers { + if oapi.FinalizerOrigin == image.Finalizers[i] { + return false + } + } + return true +} + +// FinalizeImage will remove the origin finalizer from the image +func FinalizeImage(oClient client.ImagesInterfacer, image *api.Image) (result *api.Image, err error) { + if ImageFinalized(image) { + return image, nil + } + + // there is a potential for a resource conflict with base kubernetes finalizer + // as a result, we handle resource conflicts in case multiple finalizers try + // to finalize at same time + for { + result, err = finalizeImageInternal(oClient, image, false) + if err == nil { + return result, nil + } + + if !kerrors.IsConflict(err) { + return nil, err + } + + image, err = oClient.Images().Get(image.Name) + if err != nil { + return nil, err + } + } +} + +// finalizeImageInternal will update the image finalizer list to either have or not have origin finalizer +func finalizeImageInternal(oClient client.ImagesInterfacer, image *api.Image, withOrigin bool) (*api.Image, error) { + imageFinalize := api.Image{} + imageFinalize.ObjectMeta = image.ObjectMeta + imageFinalize.Finalizers = image.Finalizers + + finalizerSet := sets.NewString() + for i := range image.Finalizers { + finalizerSet.Insert(string(image.Finalizers[i])) + } + + if withOrigin { + finalizerSet.Insert(string(oapi.FinalizerOrigin)) + } else { + finalizerSet.Delete(string(oapi.FinalizerOrigin)) + } + + imageFinalize.Finalizers = make([]kapi.FinalizerName, 0, len(finalizerSet)) + for _, value := range finalizerSet.List() { + imageFinalize.Finalizers = append(imageFinalize.Finalizers, kapi.FinalizerName(value)) + } + return oClient.Images().Finalize(&imageFinalize) +} + +// ImageStreamFinalized returns true if the spec.finalizers does not contain the origin finalizer +func ImageStreamFinalized(imageStream *api.ImageStream) bool { + for i := range imageStream.Spec.Finalizers { + if oapi.FinalizerOrigin == imageStream.Spec.Finalizers[i] { + return false + } + } + return true +} + +// FinalizeImageStream will remove the origin finalizer from the image stream +func FinalizeImageStream(oClient client.ImageStreamsNamespacer, imageStream *api.ImageStream) (result *api.ImageStream, err error) { + if ImageStreamFinalized(imageStream) { + return imageStream, nil + } + + // there is a potential for a resource conflict with base kubernetes finalizer + // as a result, we handle resource conflicts in case multiple finalizers try + // to finalize at same time + for { + result, err = finalizeImageStreamInternal(oClient, imageStream, false) + if err == nil { + return result, nil + } + + if !kerrors.IsConflict(err) { + return nil, err + } + + imageStream, err = oClient.ImageStreams(imageStream.Namespace).Get(imageStream.Name) + if err != nil { + return nil, err + } + } +} + +// finalizeImageStreamInternal will update the image stream finalizer list to either have or not have origin finalizer +func finalizeImageStreamInternal(oClient client.ImageStreamsNamespacer, imageStream *api.ImageStream, withOrigin bool) (*api.ImageStream, error) { + imageStreamFinalize := api.ImageStream{} + imageStreamFinalize.ObjectMeta = imageStream.ObjectMeta + imageStreamFinalize.Spec.Finalizers = imageStream.Spec.Finalizers + + finalizerSet := sets.NewString() + for i := range imageStream.Spec.Finalizers { + finalizerSet.Insert(string(imageStream.Spec.Finalizers[i])) + } + + if withOrigin { + finalizerSet.Insert(string(oapi.FinalizerOrigin)) + } else { + finalizerSet.Delete(string(oapi.FinalizerOrigin)) + } + + imageStreamFinalize.Spec.Finalizers = make([]kapi.FinalizerName, 0, len(finalizerSet)) + for _, value := range finalizerSet.List() { + imageStreamFinalize.Spec.Finalizers = append(imageStreamFinalize.Spec.Finalizers, kapi.FinalizerName(value)) + } + return oClient.ImageStreams(imageStream.Namespace).Finalize(&imageStreamFinalize) +} diff --git a/pkg/project/api/types.go b/pkg/project/api/types.go index 84b8ed4decc2..cd40a4c755c9 100644 --- a/pkg/project/api/types.go +++ b/pkg/project/api/types.go @@ -11,11 +11,6 @@ type ProjectList struct { Items []Project } -const ( - // These are internal finalizer values to Origin - FinalizerOrigin kapi.FinalizerName = "openshift.io/origin" -) - // ProjectSpec describes the attributes on a Project type ProjectSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage diff --git a/pkg/project/controller/controller.go b/pkg/project/controller/controller.go index 926f700de744..861ee75cf9f1 100644 --- a/pkg/project/controller/controller.go +++ b/pkg/project/controller/controller.go @@ -1,6 +1,7 @@ package controller import ( + "fmt" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" kclient "k8s.io/kubernetes/pkg/client/unversioned" @@ -44,6 +45,14 @@ func (c *NamespaceController) Handle(namespace *kapi.Namespace) (err error) { return err } + streams, err := c.Client.ImageStreams(namespace.Name).List(labels.Everything(), fields.Everything()) + if err != nil { + return err + } + if len(streams.Items) > 0 { + return fmt.Errorf("Waiting for image streams to be finalized") + } + // we have removed content, so mark it finalized by us _, err = projectutil.Finalize(c.KubeClient, namespace) if err != nil { diff --git a/pkg/project/controller/controller_test.go b/pkg/project/controller/controller_test.go index 38209861317a..0bc79d61b84e 100644 --- a/pkg/project/controller/controller_test.go +++ b/pkg/project/controller/controller_test.go @@ -3,11 +3,12 @@ package controller import ( "testing" - "github.com/openshift/origin/pkg/client/testclient" - "github.com/openshift/origin/pkg/project/api" kapi "k8s.io/kubernetes/pkg/api" ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/util" + + oapi "github.com/openshift/origin/pkg/api" + "github.com/openshift/origin/pkg/client/testclient" ) func TestSyncNamespaceThatIsTerminating(t *testing.T) { @@ -25,7 +26,7 @@ func TestSyncNamespaceThatIsTerminating(t *testing.T) { DeletionTimestamp: &now, }, Spec: kapi.NamespaceSpec{ - Finalizers: []kapi.FinalizerName{kapi.FinalizerKubernetes, api.FinalizerOrigin}, + Finalizers: []kapi.FinalizerName{kapi.FinalizerKubernetes, oapi.FinalizerOrigin}, }, Status: kapi.NamespaceStatus{ Phase: kapi.NamespaceTerminating, @@ -40,6 +41,9 @@ func TestSyncNamespaceThatIsTerminating(t *testing.T) { expectedActionSet := []ktestclient.Action{ ktestclient.NewListAction("buildconfigs", "", nil, nil), ktestclient.NewListAction("policies", "", nil, nil), + // 1st time for deleting + ktestclient.NewListAction("imagestreams", "", nil, nil), + // 2nd time to check whether they are deleted ktestclient.NewListAction("imagestreams", "", nil, nil), ktestclient.NewListAction("policybindings", "", nil, nil), ktestclient.NewListAction("rolebindings", "", nil, nil), @@ -76,7 +80,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { ResourceVersion: "1", }, Spec: kapi.NamespaceSpec{ - Finalizers: []kapi.FinalizerName{kapi.FinalizerKubernetes, api.FinalizerOrigin}, + Finalizers: []kapi.FinalizerName{kapi.FinalizerKubernetes, oapi.FinalizerOrigin}, }, Status: kapi.NamespaceStatus{ Phase: kapi.NamespaceActive, diff --git a/pkg/project/registry/project/rest_test.go b/pkg/project/registry/project/rest_test.go index 002eaae4b086..b9256afe5aac 100644 --- a/pkg/project/registry/project/rest_test.go +++ b/pkg/project/registry/project/rest_test.go @@ -21,6 +21,7 @@ import ( kapi "k8s.io/kubernetes/pkg/api" + oapi "github.com/openshift/origin/pkg/api" "github.com/openshift/origin/pkg/project/api" ) @@ -36,7 +37,7 @@ func TestProjectStrategy(t *testing.T) { ObjectMeta: kapi.ObjectMeta{Name: "foo", ResourceVersion: "10"}, } Strategy.PrepareForCreate(project) - if len(project.Spec.Finalizers) != 1 || project.Spec.Finalizers[0] != api.FinalizerOrigin { + if len(project.Spec.Finalizers) != 1 || project.Spec.Finalizers[0] != oapi.FinalizerOrigin { t.Errorf("Prepare For Create should have added project finalizer") } errs := Strategy.Validate(ctx, project) @@ -48,7 +49,7 @@ func TestProjectStrategy(t *testing.T) { } // ensure we copy spec.finalizers from old to new Strategy.PrepareForUpdate(invalidProject, project) - if len(invalidProject.Spec.Finalizers) != 1 || invalidProject.Spec.Finalizers[0] != api.FinalizerOrigin { + if len(invalidProject.Spec.Finalizers) != 1 || invalidProject.Spec.Finalizers[0] != oapi.FinalizerOrigin { t.Errorf("PrepareForUpdate should have preserved old.spec.finalizers") } errs = Strategy.ValidateUpdate(ctx, invalidProject, project) diff --git a/pkg/project/registry/project/strategy.go b/pkg/project/registry/project/strategy.go index 79b279fc6362..767b966e8b13 100644 --- a/pkg/project/registry/project/strategy.go +++ b/pkg/project/registry/project/strategy.go @@ -5,6 +5,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/fielderrors" + oapi "github.com/openshift/origin/pkg/api" "github.com/openshift/origin/pkg/project/api" "github.com/openshift/origin/pkg/project/api/validation" ) @@ -29,16 +30,16 @@ func (projectStrategy) PrepareForCreate(obj runtime.Object) { project := obj.(*api.Project) hasProjectFinalizer := false for i := range project.Spec.Finalizers { - if project.Spec.Finalizers[i] == api.FinalizerOrigin { + if project.Spec.Finalizers[i] == oapi.FinalizerOrigin { hasProjectFinalizer = true break } } if !hasProjectFinalizer { if len(project.Spec.Finalizers) == 0 { - project.Spec.Finalizers = []kapi.FinalizerName{api.FinalizerOrigin} + project.Spec.Finalizers = []kapi.FinalizerName{oapi.FinalizerOrigin} } else { - project.Spec.Finalizers = append(project.Spec.Finalizers, api.FinalizerOrigin) + project.Spec.Finalizers = append(project.Spec.Finalizers, oapi.FinalizerOrigin) } } } diff --git a/pkg/project/util/util.go b/pkg/project/util/util.go index e62c11b83e28..aa04423ca952 100644 --- a/pkg/project/util/util.go +++ b/pkg/project/util/util.go @@ -6,13 +6,13 @@ import ( kclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util/sets" - "github.com/openshift/origin/pkg/project/api" + oapi "github.com/openshift/origin/pkg/api" ) // Associated returns true if the spec.finalizers contains the origin finalizer func Associated(namespace *kapi.Namespace) bool { for i := range namespace.Spec.Finalizers { - if api.FinalizerOrigin == namespace.Spec.Finalizers[i] { + if oapi.FinalizerOrigin == namespace.Spec.Finalizers[i] { return true } } @@ -30,7 +30,7 @@ func Associate(kubeClient kclient.Interface, namespace *kapi.Namespace) (*kapi.N // Finalized returns true if the spec.finalizers does not contain the origin finalizer func Finalized(namespace *kapi.Namespace) bool { for i := range namespace.Spec.Finalizers { - if api.FinalizerOrigin == namespace.Spec.Finalizers[i] { + if oapi.FinalizerOrigin == namespace.Spec.Finalizers[i] { return false } } @@ -75,9 +75,9 @@ func finalizeInternal(kubeClient kclient.Interface, namespace *kapi.Namespace, w } if withOrigin { - finalizerSet.Insert(string(api.FinalizerOrigin)) + finalizerSet.Insert(string(oapi.FinalizerOrigin)) } else { - finalizerSet.Delete(string(api.FinalizerOrigin)) + finalizerSet.Delete(string(oapi.FinalizerOrigin)) } namespaceFinalize.Spec.Finalizers = make([]kapi.FinalizerName, 0, len(finalizerSet)) diff --git a/test/integration/deploy_trigger_test.go b/test/integration/deploy_trigger_test.go index baff452b702c..18e505963ca5 100644 --- a/test/integration/deploy_trigger_test.go +++ b/test/integration/deploy_trigger_test.go @@ -354,6 +354,8 @@ func NewTestDeployOpenshift(t *testing.T) *testDeployOpenshift { imageStreamTagStorage := imagestreamtag.NewREST(imageRegistry, imageStreamRegistry) //imageStreamTagRegistry := imagestreamtag.NewRegistry(imageStreamTagStorage) + imageStreamDeletionStorage := imagestreamdeletion.NewREST(etcdHelper) + deployConfigStorage := deployconfigetcd.NewStorage(etcdHelper) deployConfigRegistry := deployconfigregistry.NewRegistry(deployConfigStorage) @@ -371,6 +373,7 @@ func NewTestDeployOpenshift(t *testing.T) *testDeployOpenshift { "imageStreamImages": imageStreamImageStorage, "imageStreamMappings": imageStreamMappingStorage, "imageStreamTags": imageStreamTagStorage, + "imageStreamDeletions": imageStreamDeletionStorage, "deploymentConfigs": deployConfigStorage, "generateDeploymentConfigs": deployconfiggenerator.NewREST(deployConfigGenerator, latest.Codec), }