From 77d413427b27abd4b827f593b12267056a74e780 Mon Sep 17 00:00:00 2001 From: Arnaud Rebillout Date: Mon, 16 Sep 2019 05:18:11 +0100 Subject: [PATCH 1/1] Import docker.io_18.09.9+dfsg1.orig-distribution.tar.xz [dgit import orig docker.io_18.09.9+dfsg1.orig-distribution.tar.xz] --- .mailmap | 18 + AUTHORS | 182 ++ BUILDING.md | 117 + CHANGELOG.md | 108 + CONTRIBUTING.md | 148 + Dockerfile | 21 + LICENSE | 202 ++ MAINTAINERS | 58 + Makefile | 99 + README.md | 130 + RELEASE-CHECKLIST.md | 44 + ROADMAP.md | 267 ++ blobs.go | 257 ++ circle.yml | 94 + cmd/digest/main.go | 100 + cmd/registry-api-descriptor-template/main.go | 131 + cmd/registry/config-cache.yml | 55 + cmd/registry/config-dev.yml | 69 + cmd/registry/config-example.yml | 18 + cmd/registry/main.go | 25 + configuration/configuration.go | 663 +++++ configuration/configuration_test.go | 549 ++++ configuration/parser.go | 283 ++ context/context.go | 73 + context/doc.go | 88 + context/http.go | 367 +++ context/http_test.go | 285 ++ context/logger.go | 121 + context/trace.go | 105 + context/trace_test.go | 85 + context/util.go | 25 + context/version.go | 22 + context/version_test.go | 19 + contrib/apache/README.MD | 36 + contrib/apache/apache.conf | 127 + contrib/compose/README.md | 147 + contrib/compose/docker-compose.yml | 15 + contrib/compose/nginx/Dockerfile | 6 + contrib/compose/nginx/docker-registry-v2.conf | 6 + contrib/compose/nginx/docker-registry.conf | 7 + contrib/compose/nginx/nginx.conf | 27 + contrib/compose/nginx/registry.conf | 41 + contrib/docker-integration/Dockerfile | 9 + contrib/docker-integration/README.md | 63 + contrib/docker-integration/docker-compose.yml | 91 + contrib/docker-integration/golem.conf | 18 + contrib/docker-integration/helpers.bash | 127 + contrib/docker-integration/install_certs.sh | 50 + .../malevolent-certs/ca.pem | 18 + .../malevolent-certs/localregistry.cert | 19 + .../malevolent-certs/localregistry.key | 27 + contrib/docker-integration/malevolent.bats | 192 ++ contrib/docker-integration/nginx/Dockerfile | 10 + .../nginx/docker-registry-v2.conf | 6 + contrib/docker-integration/nginx/nginx.conf | 61 + .../nginx/registry-basic.conf | 8 + .../nginx/registry-noauth.conf | 5 + .../docker-integration/nginx/registry.conf | 260 ++ .../nginx/ssl/registry-ca+ca.pem | 29 + .../nginx/ssl/registry-ca+client-cert.pem | 29 + .../nginx/ssl/registry-ca+client-key.pem | 51 + .../nginx/ssl/registry-ca+localhost-cert.pem | 29 + .../nginx/ssl/registry-ca+localhost-key.pem | 51 + .../ssl/registry-ca+localregistry-cert.pem | 30 + .../ssl/registry-ca+localregistry-key.pem | 51 + .../nginx/ssl/registry-noca+client-cert.pem | 29 + .../nginx/ssl/registry-noca+client-key.pem | 51 + .../ssl/registry-noca+localhost-cert.pem | 29 + .../nginx/ssl/registry-noca+localhost-key.pem | 51 + .../ssl/registry-noca+localregistry-cert.pem | 30 + .../ssl/registry-noca+localregistry-key.pem | 51 + contrib/docker-integration/nginx/test.passwd | 1 + .../docker-integration/nginx/v1/search.json | 1 + contrib/docker-integration/plugins.bats | 103 + .../docker-integration/run_multiversion.sh | 67 + contrib/docker-integration/tls.bats | 108 + contrib/docker-integration/token.bats | 129 + .../tokenserver-oauth/.htpasswd | 1 + .../tokenserver-oauth/Dockerfile | 8 + .../certs/auth.localregistry.cert | 19 + .../certs/auth.localregistry.key | 27 + .../tokenserver-oauth/certs/ca.pem | 18 + .../certs/localregistry.cert | 19 + .../tokenserver-oauth/certs/localregistry.key | 27 + .../tokenserver-oauth/certs/signing.cert | 18 + .../tokenserver-oauth/certs/signing.key | 27 + .../registry-config-notls.yml | 15 + .../tokenserver-oauth/registry-config.yml | 18 + .../docker-integration/tokenserver/.htpasswd | 1 + .../docker-integration/tokenserver/Dockerfile | 8 + .../tokenserver/certs/auth.localregistry.cert | 19 + .../tokenserver/certs/auth.localregistry.key | 27 + .../tokenserver/certs/ca.pem | 18 + .../tokenserver/certs/localregistry.cert | 19 + .../tokenserver/certs/localregistry.key | 27 + .../tokenserver/certs/signing.cert | 18 + .../tokenserver/certs/signing.key | 27 + .../tokenserver/registry-config.yml | 18 + contrib/token-server/errors.go | 38 + contrib/token-server/main.go | 426 +++ contrib/token-server/token.go | 220 ++ coverpkg.sh | 7 + digestset/set.go | 247 ++ digestset/set_test.go | 371 +++ doc.go | 7 + errors.go | 115 + health/api/api.go | 37 + health/api/api_test.go | 86 + health/checks/checks.go | 73 + health/checks/checks_test.go | 25 + health/doc.go | 136 + health/health.go | 306 ++ health/health_test.go | 107 + manifest/doc.go | 1 + manifest/manifestlist/manifestlist.go | 155 + manifest/manifestlist/manifestlist_test.go | 111 + manifest/schema1/config_builder.go | 287 ++ manifest/schema1/config_builder_test.go | 277 ++ manifest/schema1/manifest.go | 184 ++ manifest/schema1/manifest_test.go | 136 + manifest/schema1/reference_builder.go | 98 + manifest/schema1/reference_builder_test.go | 108 + manifest/schema1/sign.go | 68 + manifest/schema1/verify.go | 32 + manifest/schema2/builder.go | 85 + manifest/schema2/builder_test.go | 210 ++ manifest/schema2/manifest.go | 138 + manifest/schema2/manifest_test.go | 111 + manifest/versioned.go | 12 + manifests.go | 125 + metrics/prometheus.go | 13 + notifications/bridge.go | 214 ++ notifications/bridge_test.go | 222 ++ notifications/endpoint.go | 96 + notifications/event.go | 160 + notifications/event_test.go | 157 + notifications/http.go | 150 + notifications/http_test.go | 201 ++ notifications/listener.go | 216 ++ notifications/listener_test.go | 205 ++ notifications/metrics.go | 152 + notifications/metrics_test.go | 28 + notifications/sinks.go | 392 +++ notifications/sinks_test.go | 260 ++ project/dev-image/Dockerfile | 20 + project/hooks/README.md | 6 + project/hooks/configure-hooks.sh | 18 + project/hooks/pre-commit | 29 + reference/helpers.go | 42 + reference/normalize.go | 170 ++ reference/normalize_test.go | 625 ++++ reference/reference.go | 433 +++ reference/reference_test.go | 659 +++++ reference/regexp.go | 143 + reference/regexp_test.go | 553 ++++ registry.go | 113 + registry/api/errcode/errors.go | 267 ++ registry/api/errcode/errors_test.go | 185 ++ registry/api/errcode/handler.go | 40 + registry/api/errcode/register.go | 138 + registry/api/v2/descriptors.go | 1596 ++++++++++ registry/api/v2/doc.go | 9 + registry/api/v2/errors.go | 136 + registry/api/v2/headerparser.go | 161 + registry/api/v2/headerparser_test.go | 161 + registry/api/v2/routes.go | 49 + registry/api/v2/routes_test.go | 355 +++ registry/api/v2/urls.go | 266 ++ registry/api/v2/urls_test.go | 520 ++++ registry/auth/auth.go | 201 ++ registry/auth/htpasswd/access.go | 116 + registry/auth/htpasswd/access_test.go | 122 + registry/auth/htpasswd/htpasswd.go | 82 + registry/auth/htpasswd/htpasswd_test.go | 85 + registry/auth/silly/access.go | 102 + registry/auth/silly/access_test.go | 71 + registry/auth/token/accesscontroller.go | 273 ++ registry/auth/token/stringset.go | 35 + registry/auth/token/token.go | 378 +++ registry/auth/token/token_test.go | 531 ++++ registry/auth/token/util.go | 58 + registry/client/auth/api_version.go | 58 + registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 ++ .../auth/challenge/authchallenge_test.go | 127 + registry/client/auth/session.go | 532 ++++ registry/client/auth/session_test.go | 866 ++++++ registry/client/blob_writer.go | 162 + registry/client/blob_writer_test.go | 211 ++ registry/client/errors.go | 139 + registry/client/errors_test.go | 104 + registry/client/repository.go | 869 ++++++ registry/client/repository_test.go | 1362 +++++++++ registry/client/transport/http_reader.go | 251 ++ registry/client/transport/transport.go | 147 + registry/doc.go | 2 + registry/handlers/api_test.go | 2625 +++++++++++++++++ registry/handlers/app.go | 1065 +++++++ registry/handlers/app_test.go | 279 ++ registry/handlers/basicauth.go | 11 + registry/handlers/basicauth_prego14.go | 41 + registry/handlers/blob.go | 99 + registry/handlers/blobupload.go | 368 +++ registry/handlers/catalog.go | 98 + registry/handlers/context.go | 92 + registry/handlers/health_test.go | 210 ++ registry/handlers/helpers.go | 72 + registry/handlers/hmac.go | 74 + registry/handlers/hmac_test.go | 117 + registry/handlers/hooks.go | 53 + registry/handlers/mail.go | 45 + registry/handlers/manifests.go | 480 +++ registry/handlers/tags.go | 62 + registry/listener/listener.go | 74 + registry/middleware/registry/middleware.go | 54 + registry/middleware/repository/middleware.go | 40 + registry/proxy/proxyauth.go | 83 + registry/proxy/proxyblobstore.go | 225 ++ registry/proxy/proxyblobstore_test.go | 416 +++ registry/proxy/proxymanifeststore.go | 96 + registry/proxy/proxymanifeststore_test.go | 275 ++ registry/proxy/proxymetrics.go | 74 + registry/proxy/proxyregistry.go | 263 ++ registry/proxy/proxytagservice.go | 66 + registry/proxy/proxytagservice_test.go | 182 ++ registry/proxy/scheduler/scheduler.go | 260 ++ registry/proxy/scheduler/scheduler_test.go | 211 ++ registry/registry.go | 370 +++ registry/registry_test.go | 30 + registry/root.go | 89 + registry/storage/blob_test.go | 614 ++++ registry/storage/blobcachemetrics.go | 66 + registry/storage/blobserver.go | 78 + registry/storage/blobstore.go | 222 ++ registry/storage/blobwriter.go | 397 +++ registry/storage/blobwriter_nonresumable.go | 17 + registry/storage/blobwriter_resumable.go | 145 + registry/storage/cache/cache.go | 35 + registry/storage/cache/cachecheck/suite.go | 180 ++ .../cache/cachedblobdescriptorstore.go | 129 + registry/storage/cache/memory/memory.go | 179 ++ registry/storage/cache/memory/memory_test.go | 13 + registry/storage/cache/redis/redis.go | 268 ++ registry/storage/cache/redis/redis_test.go | 53 + registry/storage/catalog.go | 148 + registry/storage/catalog_test.go | 324 ++ registry/storage/digester_resumable_test.go | 22 + registry/storage/doc.go | 3 + registry/storage/driver/azure/azure.go | 507 ++++ registry/storage/driver/azure/azure_test.go | 63 + registry/storage/driver/base/base.go | 240 ++ registry/storage/driver/base/regulator.go | 141 + .../storage/driver/base/regulator_test.go | 67 + registry/storage/driver/factory/factory.go | 64 + registry/storage/driver/fileinfo.go | 79 + registry/storage/driver/filesystem/driver.go | 446 +++ .../storage/driver/filesystem/driver_test.go | 113 + registry/storage/driver/gcs/doc.go | 3 + registry/storage/driver/gcs/gcs.go | 876 ++++++ registry/storage/driver/gcs/gcs_test.go | 311 ++ registry/storage/driver/inmemory/driver.go | 318 ++ .../storage/driver/inmemory/driver_test.go | 19 + registry/storage/driver/inmemory/mfs.go | 338 +++ .../middleware/cloudfront/middleware.go | 204 ++ .../driver/middleware/cloudfront/s3filter.go | 223 ++ .../middleware/cloudfront/s3filter_test.go | 401 +++ .../driver/middleware/redirect/middleware.go | 50 + .../middleware/redirect/middleware_test.go | 58 + .../driver/middleware/storagemiddleware.go | 39 + registry/storage/driver/oss/doc.go | 3 + registry/storage/driver/oss/oss.go | 694 +++++ registry/storage/driver/oss/oss_test.go | 142 + registry/storage/driver/s3-aws/s3.go | 1324 +++++++++ registry/storage/driver/s3-aws/s3_test.go | 315 ++ .../storage/driver/s3-aws/s3_v2_signer.go | 222 ++ registry/storage/driver/s3-goamz/s3.go | 766 +++++ registry/storage/driver/s3-goamz/s3_test.go | 201 ++ registry/storage/driver/storagedriver.go | 171 ++ registry/storage/driver/swift/swift.go | 934 ++++++ registry/storage/driver/swift/swift_test.go | 245 ++ .../storage/driver/testdriver/testdriver.go | 72 + .../storage/driver/testsuites/testsuites.go | 1272 ++++++++ registry/storage/driver/walk.go | 52 + registry/storage/error.go | 9 + registry/storage/filereader.go | 177 ++ registry/storage/filereader_test.go | 194 ++ registry/storage/garbagecollect.go | 154 + registry/storage/garbagecollect_test.go | 502 ++++ registry/storage/io.go | 71 + registry/storage/linkedblobstore.go | 465 +++ registry/storage/linkedblobstore_test.go | 216 ++ registry/storage/manifestlisthandler.go | 93 + registry/storage/manifeststore.go | 142 + registry/storage/manifeststore_test.go | 391 +++ registry/storage/paths.go | 490 +++ registry/storage/paths_test.go | 135 + registry/storage/purgeuploads.go | 139 + registry/storage/purgeuploads_test.go | 166 ++ registry/storage/registry.go | 306 ++ registry/storage/schema2manifesthandler.go | 137 + .../storage/schema2manifesthandler_test.go | 136 + registry/storage/signedmanifesthandler.go | 142 + registry/storage/tagstore.go | 198 ++ registry/storage/tagstore_test.go | 209 ++ registry/storage/util.go | 22 + registry/storage/vacuum.go | 102 + tags.go | 27 + testutil/handler.go | 148 + testutil/manifests.go | 87 + testutil/tarfile.go | 115 + uuid/uuid.go | 126 + uuid/uuid_test.go | 48 + vendor.conf | 49 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 ++ vendor/github.com/spf13/cobra/README.md | 485 +++ .../spf13/cobra/bash_completions.go | 357 +++ vendor/github.com/spf13/cobra/cobra.go | 112 + vendor/github.com/spf13/cobra/command.go | 1031 +++++++ vendor/github.com/spf13/cobra/md_docs.go | 138 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/README.md | 191 ++ vendor/github.com/spf13/pflag/bool.go | 83 + vendor/github.com/spf13/pflag/duration.go | 71 + vendor/github.com/spf13/pflag/flag.go | 695 +++++ vendor/github.com/spf13/pflag/float32.go | 74 + vendor/github.com/spf13/pflag/float64.go | 74 + vendor/github.com/spf13/pflag/int.go | 74 + vendor/github.com/spf13/pflag/int32.go | 74 + vendor/github.com/spf13/pflag/int64.go | 74 + vendor/github.com/spf13/pflag/int8.go | 74 + vendor/github.com/spf13/pflag/ip.go | 76 + vendor/github.com/spf13/pflag/ipmask.go | 86 + vendor/github.com/spf13/pflag/string.go | 69 + vendor/github.com/spf13/pflag/uint.go | 74 + vendor/github.com/spf13/pflag/uint16.go | 72 + vendor/github.com/spf13/pflag/uint32.go | 72 + vendor/github.com/spf13/pflag/uint64.go | 74 + vendor/github.com/spf13/pflag/uint8.go | 74 + version/print.go | 26 + version/version.go | 11 + version/version.sh | 22 + 341 files changed, 60276 insertions(+) create mode 100644 .mailmap create mode 100644 AUTHORS create mode 100644 BUILDING.md create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 MAINTAINERS create mode 100644 Makefile create mode 100644 README.md create mode 100644 RELEASE-CHECKLIST.md create mode 100644 ROADMAP.md create mode 100644 blobs.go create mode 100644 circle.yml create mode 100644 cmd/digest/main.go create mode 100644 cmd/registry-api-descriptor-template/main.go create mode 100644 cmd/registry/config-cache.yml create mode 100644 cmd/registry/config-dev.yml create mode 100644 cmd/registry/config-example.yml create mode 100644 cmd/registry/main.go create mode 100644 configuration/configuration.go create mode 100644 configuration/configuration_test.go create mode 100644 configuration/parser.go create mode 100644 context/context.go create mode 100644 context/doc.go create mode 100644 context/http.go create mode 100644 context/http_test.go create mode 100644 context/logger.go create mode 100644 context/trace.go create mode 100644 context/trace_test.go create mode 100644 context/util.go create mode 100644 context/version.go create mode 100644 context/version_test.go create mode 100644 contrib/apache/README.MD create mode 100644 contrib/apache/apache.conf create mode 100644 contrib/compose/README.md create mode 100644 contrib/compose/docker-compose.yml create mode 100644 contrib/compose/nginx/Dockerfile create mode 100644 contrib/compose/nginx/docker-registry-v2.conf create mode 100644 contrib/compose/nginx/docker-registry.conf create mode 100644 contrib/compose/nginx/nginx.conf create mode 100644 contrib/compose/nginx/registry.conf create mode 100644 contrib/docker-integration/Dockerfile create mode 100644 contrib/docker-integration/README.md create mode 100644 contrib/docker-integration/docker-compose.yml create mode 100644 contrib/docker-integration/golem.conf create mode 100644 contrib/docker-integration/helpers.bash create mode 100644 contrib/docker-integration/install_certs.sh create mode 100644 contrib/docker-integration/malevolent-certs/ca.pem create mode 100644 contrib/docker-integration/malevolent-certs/localregistry.cert create mode 100644 contrib/docker-integration/malevolent-certs/localregistry.key create mode 100644 contrib/docker-integration/malevolent.bats create mode 100644 contrib/docker-integration/nginx/Dockerfile create mode 100644 contrib/docker-integration/nginx/docker-registry-v2.conf create mode 100644 contrib/docker-integration/nginx/nginx.conf create mode 100644 contrib/docker-integration/nginx/registry-basic.conf create mode 100644 contrib/docker-integration/nginx/registry-noauth.conf create mode 100644 contrib/docker-integration/nginx/registry.conf create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+ca.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem create mode 100644 contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem create mode 100644 contrib/docker-integration/nginx/test.passwd create mode 100644 contrib/docker-integration/nginx/v1/search.json create mode 100644 contrib/docker-integration/plugins.bats create mode 100755 contrib/docker-integration/run_multiversion.sh create mode 100644 contrib/docker-integration/tls.bats create mode 100644 contrib/docker-integration/token.bats create mode 100644 contrib/docker-integration/tokenserver-oauth/.htpasswd create mode 100644 contrib/docker-integration/tokenserver-oauth/Dockerfile create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/ca.pem create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/localregistry.key create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/signing.cert create mode 100644 contrib/docker-integration/tokenserver-oauth/certs/signing.key create mode 100644 contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml create mode 100644 contrib/docker-integration/tokenserver-oauth/registry-config.yml create mode 100644 contrib/docker-integration/tokenserver/.htpasswd create mode 100644 contrib/docker-integration/tokenserver/Dockerfile create mode 100644 contrib/docker-integration/tokenserver/certs/auth.localregistry.cert create mode 100644 contrib/docker-integration/tokenserver/certs/auth.localregistry.key create mode 100644 contrib/docker-integration/tokenserver/certs/ca.pem create mode 100644 contrib/docker-integration/tokenserver/certs/localregistry.cert create mode 100644 contrib/docker-integration/tokenserver/certs/localregistry.key create mode 100644 contrib/docker-integration/tokenserver/certs/signing.cert create mode 100644 contrib/docker-integration/tokenserver/certs/signing.key create mode 100644 contrib/docker-integration/tokenserver/registry-config.yml create mode 100644 contrib/token-server/errors.go create mode 100644 contrib/token-server/main.go create mode 100644 contrib/token-server/token.go create mode 100755 coverpkg.sh create mode 100644 digestset/set.go create mode 100644 digestset/set_test.go create mode 100644 doc.go create mode 100644 errors.go create mode 100644 health/api/api.go create mode 100644 health/api/api_test.go create mode 100644 health/checks/checks.go create mode 100644 health/checks/checks_test.go create mode 100644 health/doc.go create mode 100644 health/health.go create mode 100644 health/health_test.go create mode 100644 manifest/doc.go create mode 100644 manifest/manifestlist/manifestlist.go create mode 100644 manifest/manifestlist/manifestlist_test.go create mode 100644 manifest/schema1/config_builder.go create mode 100644 manifest/schema1/config_builder_test.go create mode 100644 manifest/schema1/manifest.go create mode 100644 manifest/schema1/manifest_test.go create mode 100644 manifest/schema1/reference_builder.go create mode 100644 manifest/schema1/reference_builder_test.go create mode 100644 manifest/schema1/sign.go create mode 100644 manifest/schema1/verify.go create mode 100644 manifest/schema2/builder.go create mode 100644 manifest/schema2/builder_test.go create mode 100644 manifest/schema2/manifest.go create mode 100644 manifest/schema2/manifest_test.go create mode 100644 manifest/versioned.go create mode 100644 manifests.go create mode 100644 metrics/prometheus.go create mode 100644 notifications/bridge.go create mode 100644 notifications/bridge_test.go create mode 100644 notifications/endpoint.go create mode 100644 notifications/event.go create mode 100644 notifications/event_test.go create mode 100644 notifications/http.go create mode 100644 notifications/http_test.go create mode 100644 notifications/listener.go create mode 100644 notifications/listener_test.go create mode 100644 notifications/metrics.go create mode 100644 notifications/metrics_test.go create mode 100644 notifications/sinks.go create mode 100644 notifications/sinks_test.go create mode 100644 project/dev-image/Dockerfile create mode 100644 project/hooks/README.md create mode 100755 project/hooks/configure-hooks.sh create mode 100755 project/hooks/pre-commit create mode 100644 reference/helpers.go create mode 100644 reference/normalize.go create mode 100644 reference/normalize_test.go create mode 100644 reference/reference.go create mode 100644 reference/reference_test.go create mode 100644 reference/regexp.go create mode 100644 reference/regexp_test.go create mode 100644 registry.go create mode 100644 registry/api/errcode/errors.go create mode 100644 registry/api/errcode/errors_test.go create mode 100644 registry/api/errcode/handler.go create mode 100644 registry/api/errcode/register.go create mode 100644 registry/api/v2/descriptors.go create mode 100644 registry/api/v2/doc.go create mode 100644 registry/api/v2/errors.go create mode 100644 registry/api/v2/headerparser.go create mode 100644 registry/api/v2/headerparser_test.go create mode 100644 registry/api/v2/routes.go create mode 100644 registry/api/v2/routes_test.go create mode 100644 registry/api/v2/urls.go create mode 100644 registry/api/v2/urls_test.go create mode 100644 registry/auth/auth.go create mode 100644 registry/auth/htpasswd/access.go create mode 100644 registry/auth/htpasswd/access_test.go create mode 100644 registry/auth/htpasswd/htpasswd.go create mode 100644 registry/auth/htpasswd/htpasswd_test.go create mode 100644 registry/auth/silly/access.go create mode 100644 registry/auth/silly/access_test.go create mode 100644 registry/auth/token/accesscontroller.go create mode 100644 registry/auth/token/stringset.go create mode 100644 registry/auth/token/token.go create mode 100644 registry/auth/token/token_test.go create mode 100644 registry/auth/token/util.go create mode 100644 registry/client/auth/api_version.go create mode 100644 registry/client/auth/challenge/addr.go create mode 100644 registry/client/auth/challenge/authchallenge.go create mode 100644 registry/client/auth/challenge/authchallenge_test.go create mode 100644 registry/client/auth/session.go create mode 100644 registry/client/auth/session_test.go create mode 100644 registry/client/blob_writer.go create mode 100644 registry/client/blob_writer_test.go create mode 100644 registry/client/errors.go create mode 100644 registry/client/errors_test.go create mode 100644 registry/client/repository.go create mode 100644 registry/client/repository_test.go create mode 100644 registry/client/transport/http_reader.go create mode 100644 registry/client/transport/transport.go create mode 100644 registry/doc.go create mode 100644 registry/handlers/api_test.go create mode 100644 registry/handlers/app.go create mode 100644 registry/handlers/app_test.go create mode 100644 registry/handlers/basicauth.go create mode 100644 registry/handlers/basicauth_prego14.go create mode 100644 registry/handlers/blob.go create mode 100644 registry/handlers/blobupload.go create mode 100644 registry/handlers/catalog.go create mode 100644 registry/handlers/context.go create mode 100644 registry/handlers/health_test.go create mode 100644 registry/handlers/helpers.go create mode 100644 registry/handlers/hmac.go create mode 100644 registry/handlers/hmac_test.go create mode 100644 registry/handlers/hooks.go create mode 100644 registry/handlers/mail.go create mode 100644 registry/handlers/manifests.go create mode 100644 registry/handlers/tags.go create mode 100644 registry/listener/listener.go create mode 100644 registry/middleware/registry/middleware.go create mode 100644 registry/middleware/repository/middleware.go create mode 100644 registry/proxy/proxyauth.go create mode 100644 registry/proxy/proxyblobstore.go create mode 100644 registry/proxy/proxyblobstore_test.go create mode 100644 registry/proxy/proxymanifeststore.go create mode 100644 registry/proxy/proxymanifeststore_test.go create mode 100644 registry/proxy/proxymetrics.go create mode 100644 registry/proxy/proxyregistry.go create mode 100644 registry/proxy/proxytagservice.go create mode 100644 registry/proxy/proxytagservice_test.go create mode 100644 registry/proxy/scheduler/scheduler.go create mode 100644 registry/proxy/scheduler/scheduler_test.go create mode 100644 registry/registry.go create mode 100644 registry/registry_test.go create mode 100644 registry/root.go create mode 100644 registry/storage/blob_test.go create mode 100644 registry/storage/blobcachemetrics.go create mode 100644 registry/storage/blobserver.go create mode 100644 registry/storage/blobstore.go create mode 100644 registry/storage/blobwriter.go create mode 100644 registry/storage/blobwriter_nonresumable.go create mode 100644 registry/storage/blobwriter_resumable.go create mode 100644 registry/storage/cache/cache.go create mode 100644 registry/storage/cache/cachecheck/suite.go create mode 100644 registry/storage/cache/cachedblobdescriptorstore.go create mode 100644 registry/storage/cache/memory/memory.go create mode 100644 registry/storage/cache/memory/memory_test.go create mode 100644 registry/storage/cache/redis/redis.go create mode 100644 registry/storage/cache/redis/redis_test.go create mode 100644 registry/storage/catalog.go create mode 100644 registry/storage/catalog_test.go create mode 100644 registry/storage/digester_resumable_test.go create mode 100644 registry/storage/doc.go create mode 100644 registry/storage/driver/azure/azure.go create mode 100644 registry/storage/driver/azure/azure_test.go create mode 100644 registry/storage/driver/base/base.go create mode 100644 registry/storage/driver/base/regulator.go create mode 100644 registry/storage/driver/base/regulator_test.go create mode 100644 registry/storage/driver/factory/factory.go create mode 100644 registry/storage/driver/fileinfo.go create mode 100644 registry/storage/driver/filesystem/driver.go create mode 100644 registry/storage/driver/filesystem/driver_test.go create mode 100644 registry/storage/driver/gcs/doc.go create mode 100644 registry/storage/driver/gcs/gcs.go create mode 100644 registry/storage/driver/gcs/gcs_test.go create mode 100644 registry/storage/driver/inmemory/driver.go create mode 100644 registry/storage/driver/inmemory/driver_test.go create mode 100644 registry/storage/driver/inmemory/mfs.go create mode 100644 registry/storage/driver/middleware/cloudfront/middleware.go create mode 100644 registry/storage/driver/middleware/cloudfront/s3filter.go create mode 100644 registry/storage/driver/middleware/cloudfront/s3filter_test.go create mode 100644 registry/storage/driver/middleware/redirect/middleware.go create mode 100644 registry/storage/driver/middleware/redirect/middleware_test.go create mode 100644 registry/storage/driver/middleware/storagemiddleware.go create mode 100644 registry/storage/driver/oss/doc.go create mode 100644 registry/storage/driver/oss/oss.go create mode 100644 registry/storage/driver/oss/oss_test.go create mode 100644 registry/storage/driver/s3-aws/s3.go create mode 100644 registry/storage/driver/s3-aws/s3_test.go create mode 100644 registry/storage/driver/s3-aws/s3_v2_signer.go create mode 100644 registry/storage/driver/s3-goamz/s3.go create mode 100644 registry/storage/driver/s3-goamz/s3_test.go create mode 100644 registry/storage/driver/storagedriver.go create mode 100644 registry/storage/driver/swift/swift.go create mode 100644 registry/storage/driver/swift/swift_test.go create mode 100644 registry/storage/driver/testdriver/testdriver.go create mode 100644 registry/storage/driver/testsuites/testsuites.go create mode 100644 registry/storage/driver/walk.go create mode 100644 registry/storage/error.go create mode 100644 registry/storage/filereader.go create mode 100644 registry/storage/filereader_test.go create mode 100644 registry/storage/garbagecollect.go create mode 100644 registry/storage/garbagecollect_test.go create mode 100644 registry/storage/io.go create mode 100644 registry/storage/linkedblobstore.go create mode 100644 registry/storage/linkedblobstore_test.go create mode 100644 registry/storage/manifestlisthandler.go create mode 100644 registry/storage/manifeststore.go create mode 100644 registry/storage/manifeststore_test.go create mode 100644 registry/storage/paths.go create mode 100644 registry/storage/paths_test.go create mode 100644 registry/storage/purgeuploads.go create mode 100644 registry/storage/purgeuploads_test.go create mode 100644 registry/storage/registry.go create mode 100644 registry/storage/schema2manifesthandler.go create mode 100644 registry/storage/schema2manifesthandler_test.go create mode 100644 registry/storage/signedmanifesthandler.go create mode 100644 registry/storage/tagstore.go create mode 100644 registry/storage/tagstore_test.go create mode 100644 registry/storage/util.go create mode 100644 registry/storage/vacuum.go create mode 100644 tags.go create mode 100644 testutil/handler.go create mode 100644 testutil/manifests.go create mode 100644 testutil/tarfile.go create mode 100644 uuid/uuid.go create mode 100644 uuid/uuid_test.go create mode 100644 vendor.conf create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/md_docs.go create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 version/print.go create mode 100644 version/version.go create mode 100755 version/version.sh diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..d9910601 --- /dev/null +++ b/.mailmap @@ -0,0 +1,18 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..252ff8aa --- /dev/null +++ b/AUTHORS @@ -0,0 +1,182 @@ +a-palchikov +Aaron Lehmann +Aaron Schlesinger +Aaron Vinson +Adam Duke +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +Alexey Gladkov +allencloud +amitshukla +Amy Lindburg +Andrew Hsu +Andrew Meredith +Andrew T Nguyen +Andrey Kostov +Andy Goldstein +Anis Elleuch +Anton Tiurin +Antonio Mercado +Antonio Murdaca +Anusha Ragunathan +Arien Holthuizen +Arnaud Porterie +Arthur Baars +Asuka Suzuki +Avi Miller +Ayose Cazorla +BadZen +Ben Bodenmiller +Ben Firshman +bin liu +Brian Bland +burnettk +Carson A +Cezar Sa Espinola +Charles Smith +Chris Dillon +cuiwei13 +cyli +Daisuke Fujita +Daniel Huhn +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +David Verhasselt +David Xia +davidli +Dejan Golja +Derek McGowan +Diogo Mónica +DJ Enriquez +Donald Huang +Doug Davis +Edgar Lee +Eric Yang +Fabio Berchtold +Fabio Huser +farmerworking +Felix Yan +Florentin Raud +Frank Chen +Frederick F. Kautz IV +gabriell nascimento +Gleb Schukin +harche +Henri Gomez +Hu Keping +Hua Wang +HuKeping +Ian Babrou +igayoso +Jack Griffin +James Findley +Jason Freidman +Jason Heiss +Jeff Nickoloff +Jess Frazelle +Jessie Frazelle +jhaohai +Jianqing Wang +Jihoon Chung +Joao Fernandes +John Mulhausen +John Starks +Jon Johnson +Jon Poler +Jonathan Boulle +Jordan Liggitt +Josh Chorlton +Josh Hawn +Julien Fernandez +Ke Xu +Keerthan Mala +Kelsey Hightower +Kenneth Lim +Kenny Leung +Li Yi +Liu Hua +liuchang0812 +Lloyd Ramey +Louis Kottmann +Luke Carpenter +Marcus Martins +Mary Anthony +Matt Bentley +Matt Duch +Matt Moore +Matt Robenolt +Matthew Green +Michael Prokop +Michal Minar +Michal Minář +Mike Brown +Miquel Sabaté +Misty Stanley-Jones +Misty Stanley-Jones +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nikita Tarasov +Noah Treuhaft +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Omer Cohen +Patrick Devine +Phil Estes +Philip Misiowiec +Pierre-Yves Ritschard +Qiao Anran +Randy Barlow +Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran +Sebastiaan van Stijn +Sebastien Coavoux +Serge Dubrouski +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +spacexnice +Spencer Rinehart +Stan Hu +Stefan Majewsky +Stefan Weil +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds +Troels Thomsen +Victor Vieux +Victoria Bialas +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King +weiyuan.yl +xg.song +xiekeyang +Yann ROBERT +yaoyao.xyy +yuexiao-wang +yuzou +zhouhaibing089 +姜继忠 diff --git a/BUILDING.md b/BUILDING.md new file mode 100644 index 00000000..2981d016 --- /dev/null +++ b/BUILDING.md @@ -0,0 +1,117 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/golang/lint/golint + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry --version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..e7b16b3c --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,108 @@ +# Changelog + +## 2.6.0 (2017-01-18) + +#### Storage +- S3: fixed bug in delete due to read-after-write inconsistency +- S3: allow EC2 IAM roles to be used when authorizing region endpoints +- S3: add Object ACL Support +- S3: fix delete method's notion of subpaths +- S3: use multipart upload API in `Move` method for performance +- S3: add v2 signature signing for legacy S3 clones +- Swift: add simple heuristic to detect incomplete DLOs during read ops +- Swift: support different user and tenant domains +- Swift: bulk deletes in chunks +- Aliyun OSS: fix delete method's notion of subpaths +- Aliyun OSS: optimize data copy after upload finishes +- Azure: close leaking response body +- Fix storage drivers dropping non-EOF errors when listing repositories +- Compare path properly when listing repositories in catalog +- Add a foreign layer URL host whitelist +- Improve catalog enumerate runtime + +#### Registry +- Export `storage.CreateOptions` in top-level package +- Enable notifications to endpoints that use self-signed certificates +- Properly validate multi-URL foreign layers +- Add control over validation of URLs in pushed manifests +- Proxy mode: fix socket leak when pull is cancelled +- Tag service: properly handle error responses on HEAD request +- Support for custom authentication URL in proxying registry +- Add configuration option to disable access logging +- Add notification filtering by target media type +- Manifest: `References()` returns all children +- Honor `X-Forwarded-Port` and Forwarded headers +- Reference: Preserve tag and digest in With* functions +- Add policy configuration for enforcing repository classes + +#### Client +- Changes the client Tags `All()` method to follow links +- Allow registry clients to connect via HTTP2 +- Better handling of OAuth errors in client + +#### Spec +- Manifest: clarify relationship between urls and foreign layers +- Authorization: add support for repository classes + +#### Manifest +- Override media type returned from `Stat()` for existing manifests +- Add plugin mediatype to distribution manifest + +#### Docs +- Document `TOOMANYREQUESTS` error code +- Document required Let's Encrypt port +- Improve documentation around implementation of OAuth2 +- Improve documentation for configuration + +#### Auth +- Add support for registry type in scope +- Add support for using v2 ping challenges for v1 +- Add leeway to JWT `nbf` and `exp` checking +- htpasswd: dynamically parse htpasswd file +- Fix missing auth headers with PATCH HTTP request when pushing to default port + +#### Dockerfile +- Update to go1.7 +- Reorder Dockerfile steps for better layer caching + +#### Notes + +Documentation has moved to the documentation repository at +`github.com/docker/docker.github.io/tree/master/registry` + +The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. + + +## 2.5.0 (2016-06-14) + +#### Storage +- Ensure uploads directory is cleaned after upload is committed +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +#### API +- Support returning HTTP 429 (Too Many Requests) + +#### Documentation +- Update auth documentation examples to show "expires in" as int + +#### Docker Image +- Use Alpine Linux as base image + + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..4c067d9e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..ac8dbca2 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:1.8-alpine + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV DOCKER_BUILDTAGS include_oss include_gcs + +ARG GOOS=linux +ARG GOARCH=amd64 + +RUN set -ex \ + && apk add --no-cache make git + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + +RUN make PREFIX=/go clean binaries + +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..bda40015 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,58 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "dmcgowan", + "dmp42", + "richardscothern", + "shykes", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.richardscothern] + Name = "Richard Scothern" + Email = "richard.scothern@gmail.com" + GitHub = "richardscothern" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..7c6f9c7a --- /dev/null +++ b/Makefile @@ -0,0 +1,99 @@ +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" + +.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +.DEFAULT: all +all: fmt vet lint build test binaries + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Go files +GOFILES=$(shell find . -type f -name '*.go') + +# Package list +PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT=$(shell which golint || echo '') +VNDR=$(shell which vndr || echo '') + +${PREFIX}/bin/registry: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry + +${PREFIX}/bin/digest: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest + +${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template + +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template + ./bin/registry-api-descriptor-template $< > $@ + +vet: + @echo "+ $@" + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +fmt: + @echo "+ $@" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) + +lint: + @echo "+ $@" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" + +build: + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) + +test: + @echo "+ $@" + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +test-full: + @echo "+ $@" + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template + @echo "+ $@" + +clean: + @echo "+ $@" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-validate: + @echo "+ $@" + $(if $(VNDR), , \ + $(error Please install vndr: go get github.com/lk4d4/vndr)) + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @$(VNDR) + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) + @rm -Rf vendor + @mv .vendor.bak vendor diff --git a/README.md b/README.md new file mode 100644 index 00000000..99887885 --- /dev/null +++ b/README.md @@ -0,0 +1,130 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator](https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/RELEASE-CHECKLIST.md b/RELEASE-CHECKLIST.md new file mode 100644 index 00000000..73eba5a8 --- /dev/null +++ b/RELEASE-CHECKLIST.md @@ -0,0 +1,44 @@ +## Registry Release Checklist + +10. Compile release notes detailing features and since the last release. + + Update the `CHANGELOG.md` file and create a PR to master with the updates. +Once that PR has been approved by maintainers the change may be cherry-picked +to the release branch (new release branches may be forked from this commit). + +20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. + +``` +make AUTHORS +``` + +40. Create a signed tag. + + Distribution uses semantic versioning. Tags are of the format +`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added +to your Github account. The comment for the tag should include the release +notes, use previous tags as a guide for formatting consistently. Run +`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, +check comment and correct commit hash. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. + +70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 00000000..701127af --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/blobs.go b/blobs.go new file mode 100644 index 00000000..145b0785 --- /dev/null +++ b/blobs.go @@ -0,0 +1,257 @@ +package distribution + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/circle.yml b/circle.yml new file mode 100644 index 00000000..ddc76c86 --- /dev/null +++ b/circle.yml @@ -0,0 +1,94 @@ +# Pony-up! +machine: + pre: + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install codecov for coverage + - pip install --user codecov + + post: + # go + - gvm install go1.8 --prefer-binary --name=stable + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_oss include_gcs" + # Workaround Circle parsing dumb bugs and/or YAML wonkyness + CIRCLE_PAIN: "mode: set" + + hosts: + # Not used yet + fancy: 127.0.0.1 + +dependencies: + pre: + # Copy the code to the gopath of all go versions + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + override: + # Install dependencies for every copied clone/go version + - gvm use stable && go get github.com/lk4d4/vndr: + pwd: $BASE_STABLE + + post: + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + +test: + pre: + # Output the go versions we are going to test + # - gvm use old && go version + - gvm use stable && go version + + # Ensure validation of dependencies + - git fetch origin: + pwd: $BASE_STABLE + - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: + pwd: $BASE_STABLE + + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): + pwd: $BASE_STABLE + + # FMT + - gvm use stable && make fmt: + pwd: $BASE_STABLE + + # VET + - gvm use stable && make vet: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && make lint: + pwd: $BASE_STABLE + + override: + # Test stable, and report + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + + # Test stable with race + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + post: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): + pwd: $BASE_STABLE + + ## Notes + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck diff --git a/cmd/digest/main.go b/cmd/digest/main.go new file mode 100644 index 00000000..308be461 --- /dev/null +++ b/cmd/digest/main.go @@ -0,0 +1,100 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/docker/distribution/version" + "github.com/opencontainers/go-digest" + + _ "crypto/sha256" + _ "crypto/sha512" +) + +var ( + algorithm = digest.Canonical + showVersion bool +) + +type job struct { + name string + reader io.Reader +} + +func init() { + flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") + flag.Var(&algorithm, "algorithm", "select the digest algorithm") + flag.BoolVar(&showVersion, "version", false, "show the version and exit") + + log.SetFlags(0) + log.SetPrefix(os.Args[0] + ": ") +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) + fmt.Fprint(os.Stderr, ` +Calculate the digest of one or more input files, emitting the result +to standard out. If no files are provided, the digest of stdin will +be calculated. + +`) + flag.PrintDefaults() +} + +func unsupported() { + log.Fatalf("unsupported digest algorithm: %v", algorithm) +} + +func main() { + var jobs []job + + flag.Usage = usage + flag.Parse() + if showVersion { + version.PrintVersion() + return + } + + var fail bool // if we fail on one item, foul the exit code + if flag.NArg() > 0 { + for _, path := range flag.Args() { + fp, err := os.Open(path) + + if err != nil { + log.Printf("%s: %v", path, err) + fail = true + continue + } + defer fp.Close() + + jobs = append(jobs, job{name: path, reader: fp}) + } + } else { + // just read stdin + jobs = append(jobs, job{name: "-", reader: os.Stdin}) + } + + digestFn := algorithm.FromReader + + if !algorithm.Available() { + unsupported() + } + + for _, job := range jobs { + dgst, err := digestFn(job.reader) + if err != nil { + log.Printf("%s: %v", job.name, err) + fail = true + continue + } + + fmt.Printf("%v\t%s\n", dgst, job.name) + } + + if fail { + os.Exit(1) + } +} diff --git a/cmd/registry-api-descriptor-template/main.go b/cmd/registry-api-descriptor-template/main.go new file mode 100644 index 00000000..e9cbc42a --- /dev/null +++ b/cmd/registry-api-descriptor-template/main.go @@ -0,0 +1,131 @@ +// registry-api-descriptor-template uses the APIDescriptor defined in the +// api/v2 package to execute templates passed to the command line. +// +// For example, to generate a new API specification, one would execute the +// following command from the repo root: +// +// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md +// +// The templates are passed in the api/v2.APIDescriptor object. Please see the +// package documentation for fields available on that object. The template +// syntax is from Go's standard library text/template package. For information +// on Go's template syntax, please see golang.org/pkg/text/template. +package main + +import ( + "log" + "net/http" + "os" + "path/filepath" + "regexp" + "text/template" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" +) + +var spaceRegex = regexp.MustCompile(`\n\s*`) + +func main() { + + if len(os.Args) != 2 { + log.Fatalln("please specify a template to execute.") + } + + path := os.Args[1] + filename := filepath.Base(path) + + funcMap := template.FuncMap{ + "removenewlines": func(s string) string { + return spaceRegex.ReplaceAllString(s, " ") + }, + "statustext": http.StatusText, + "prettygorilla": prettyGorillaMuxPath, + } + + tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) + + data := struct { + RouteDescriptors []v2.RouteDescriptor + ErrorDescriptors []errcode.ErrorDescriptor + }{ + RouteDescriptors: v2.APIDescriptor.RouteDescriptors, + ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), + // The following are part of the specification but provided by errcode default. + errcode.ErrorCodeUnauthorized.Descriptor(), + errcode.ErrorCodeDenied.Descriptor(), + errcode.ErrorCodeUnsupported.Descriptor()), + } + + if err := tmpl.Execute(os.Stdout, data); err != nil { + log.Fatalln(err) + } +} + +// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux +// route string, making it suitable for documentation. +func prettyGorillaMuxPath(s string) string { + // Stateful parser that removes regular expressions from gorilla + // routes. It correctly handles balanced bracket pairs. + + var output string + var label string + var level int + +start: + if s[0] == '{' { + s = s[1:] + level++ + goto capture + } + + output += string(s[0]) + s = s[1:] + + goto end +capture: + switch s[0] { + case '{': + level++ + case '}': + level-- + + if level == 0 { + s = s[1:] + goto label + } + case ':': + s = s[1:] + goto skip + default: + label += string(s[0]) + } + s = s[1:] + goto capture +skip: + switch s[0] { + case '{': + level++ + case '}': + level-- + } + s = s[1:] + + if level == 0 { + goto label + } + + goto skip +label: + if label != "" { + output += "<" + label + ">" + label = "" + } +end: + if s != "" { + goto start + } + + return output + +} diff --git a/cmd/registry/config-cache.yml b/cmd/registry/config-cache.yml new file mode 100644 index 00000000..7a274ea5 --- /dev/null +++ b/cmd/registry/config-cache.yml @@ -0,0 +1,55 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development +storage: + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry-cache + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-8082 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +proxy: + remoteurl: https://registry-1.docker.io + username: username + password: password +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/cmd/registry/config-dev.yml b/cmd/registry/config-dev.yml new file mode 100644 index 00000000..e6b09064 --- /dev/null +++ b/cmd/registry/config-dev.yml @@ -0,0 +1,69 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + debug: + addr: :5001 + prometheus: + enabled: true + path: /metrics + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/cmd/registry/config-example.yml b/cmd/registry/config-example.yml new file mode 100644 index 00000000..3277f9a2 --- /dev/null +++ b/cmd/registry/config-example.yml @@ -0,0 +1,18 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry +http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/cmd/registry/main.go b/cmd/registry/main.go new file mode 100644 index 00000000..c077a0c1 --- /dev/null +++ b/cmd/registry/main.go @@ -0,0 +1,25 @@ +package main + +import ( + _ "net/http/pprof" + + "github.com/docker/distribution/registry" + _ "github.com/docker/distribution/registry/auth/htpasswd" + _ "github.com/docker/distribution/registry/auth/silly" + _ "github.com/docker/distribution/registry/auth/token" + _ "github.com/docker/distribution/registry/proxy" + _ "github.com/docker/distribution/registry/storage/driver/azure" + _ "github.com/docker/distribution/registry/storage/driver/filesystem" + _ "github.com/docker/distribution/registry/storage/driver/gcs" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/middleware/redirect" + _ "github.com/docker/distribution/registry/storage/driver/oss" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" + _ "github.com/docker/distribution/registry/storage/driver/s3-goamz" + _ "github.com/docker/distribution/registry/storage/driver/swift" +) + +func main() { + registry.RootCmd.Execute() +} diff --git a/configuration/configuration.go b/configuration/configuration.go new file mode 100644 index 00000000..621d29ae --- /dev/null +++ b/configuration/configuration.go @@ -0,0 +1,663 @@ +package configuration + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables. +// +// Note that yaml field names should never include _ characters, since this is the separator used +// in environment variable names. +type Configuration struct { + // Version is the version which defines the format of the rest of the configuration + Version Version `yaml:"version"` + + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // AccessLog configures access logging. + AccessLog struct { + // Disabled disables access logging. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configure the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. This is + // deprecated. Please use Log.Level in the future. + Loglevel Loglevel `yaml:"loglevel,omitempty"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` + + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth,omitempty"` + + // Middleware lists all middlewares to be used by the registry. + Middleware map[string][]Middleware `yaml:"middleware,omitempty"` + + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting,omitempty"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + // Host specifies an externally-reachable address for the registry, as a fully + // qualified URL. + Host string `yaml:"host,omitempty"` + + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // RelativeURLs specifies that relative URLs should be returned in + // Location headers + RelativeURLs bool `yaml:"relativeurls,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + + // LetsEncrypt is used to configuration setting up TLS through + // Let's Encrypt instead of manually specifying certificate and + // key. If a TLS certificate is specified, the Let's Encrypt + // section will not be used. + LetsEncrypt struct { + // CacheFile specifies cache file to use for lets encrypt + // certificates and keys. + CacheFile string `yaml:"cachefile,omitempty"` + + // Email is the email to use during Let's Encrypt registration + Email string `yaml:"email,omitempty"` + + // Hosts specifies the hosts which are allowed to obtain Let's + // Encrypt certificates. + Hosts []string `yaml:"hosts,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + + // Headers is a set of headers to include in HTTP responses. A common + // use case for this would be security headers such as + // Strict-Transport-Security. The map keys are the header names, and + // the values are the associated header payloads. + Headers http.Header `yaml:"headers,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + // Prometheus configures the Prometheus telemetry endpoint. + Prometheus struct { + Enabled bool `yaml:"enabled,omitempty"` + Path string `yaml:"path,omitempty"` + } `yaml:"prometheus,omitempty"` + } `yaml:"debug,omitempty"` + + // HTTP2 configuration options + HTTP2 struct { + // Specifies whether the registry should disallow clients attempting + // to connect via http2. If set to true, only http/1.1 is supported. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + } `yaml:"http,omitempty"` + + // Notifications specifies configuration about various endpoint to which + // registry events are dispatched. + Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` + + Health Health `yaml:"health,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` + + // Compatibility is used for configurations of working with older or deprecated features. + Compatibility struct { + // Schema1 configures how schema1 manifests will be handled + Schema1 struct { + // TrustKey is the signing key to use for adding the signature to + // schema1 manifests. + TrustKey string `yaml:"signingkeyfile,omitempty"` + } `yaml:"schema1,omitempty"` + } `yaml:"compatibility,omitempty"` + + // Validation configures validation options for the registry. + Validation struct { + // Enabled enables the other options in this section. This field is + // deprecated in favor of Disabled. + Enabled bool `yaml:"enabled,omitempty"` + // Disabled disables the other options in this section. + Disabled bool `yaml:"disabled,omitempty"` + // Manifests configures manifest validation. + Manifests struct { + // URLs configures validation for URLs in pushed manifests. + URLs struct { + // Allow specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must match. + Allow []string `yaml:"allow,omitempty"` + // Deny specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must not match. + Deny []string `yaml:"deny,omitempty"` + } `yaml:"urls,omitempty"` + } `yaml:"manifests,omitempty"` + } `yaml:"validation,omitempty"` + + // Policy configures registry policy options. + Policy struct { + // Repository configures policies for repositories + Repository struct { + // Classes is a list of repository classes which the + // registry allows content for. This class is matched + // against the configuration media type inside uploaded + // manifests. When non-empty, the registry will enforce + // the class in authorized resources. + Classes []string `yaml:"classes"` + } `yaml:"repository,omitempty"` + } `yaml:"policy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configure email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure certification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` +} + +// FileChecker is a type of entry in the health section for checking files. +type FileChecker struct { + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // File is the path to check + File string `yaml:"file,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// HTTPChecker is a type of entry in the health section for checking HTTP URIs. +type HTTPChecker struct { + // Timeout is the duration to wait before timing out the HTTP request + Timeout time.Duration `yaml:"timeout,omitempty"` + // StatusCode is the expected status code + StatusCode int + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // URI is the HTTP URI to check + URI string `yaml:"uri,omitempty"` + // Headers lists static headers that should be added to all requests + Headers http.Header `yaml:"headers"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// TCPChecker is a type of entry in the health section for checking TCP servers. +type TCPChecker struct { + // Timeout is the duration to wait before timing out the TCP connection + Timeout time.Duration `yaml:"timeout,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Addr is the TCP address to check + Addr string `yaml:"addr,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// Health provides the configuration section for health checks. +type Health struct { + // FileCheckers is a list of paths to check + FileCheckers []FileChecker `yaml:"file,omitempty"` + // HTTPCheckers is a list of URIs to check + HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` + // TCPCheckers is a list of URIs to check + TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` + // StorageDriver configures a health check on the configured storage + // driver + StorageDriver struct { + // Enabled turns on the health check for the storage driver + Enabled bool `yaml:"enabled,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` + } `yaml:"storagedriver,omitempty"` +} + +// v0_1Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v0_1Configuration Configuration + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil +} + +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + +// Storage defines the configuration for registry object storage +type Storage map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + var storageType []string + + // Return only key in this map + for k := range storage { + switch k { + case "maintenance": + // allow configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + storageType = append(storageType, k) + } + } + if len(storageType) > 1 { + panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) + } + if len(storageType) == 1 { + return storageType[0] + } + return "" +} + +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key string, value interface{}) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + switch k { + case "maintenance": + // allow for configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (storage Storage) MarshalYAML() (interface{}, error) { + if storage.Parameters() == nil { + return storage.Type(), nil + } + return map[string]Parameters(storage), nil +} + +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the auth type, such as htpasswd or token +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} + +// Notifications configures multiple http endpoints. +type Notifications struct { + // Endpoints is a list of http configurations for endpoints that + // respond to webhook notifications. In the future, we may allow other + // kinds of endpoints, such as external queues. + Endpoints []Endpoint `yaml:"endpoints,omitempty"` +} + +// Endpoint describes the configuration of an http webhook notification +// endpoint. +type Endpoint struct { + Name string `yaml:"name"` // identifies the endpoint in the registry instance. + Disabled bool `yaml:"disabled"` // disables the endpoint + URL string `yaml:"url"` // post url for the endpoint. + Headers http.Header `yaml:"headers"` // static headers that should be added to all requests + Timeout time.Duration `yaml:"timeout"` // HTTP timeout + Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure + Backoff time.Duration `yaml:"backoff"` // backoff duration + IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore + Ignore Ignore `yaml:"ignore"` // ignore event types +} + +//Ignore configures mediaTypes and actions of the event, that it won't be propagated +type Ignore struct { + MediaTypes []string `yaml:"mediatypes"` // target media types to ignore + Actions []string `yaml:"actions"` // ignore action types +} + +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey,omitempty"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage,omitempty"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint,omitempty"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey,omitempty"` + // Name is the component name of the registry in NewRelic + Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` +} + +// Middleware configures named middlewares to be applied at injection points. +type Middleware struct { + // Name the middleware registers itself as + Name string `yaml:"name"` + // Flag to disable middleware easily + Disabled bool `yaml:"disabled,omitempty"` + // Map of parameters that will be passed to the middleware's initialization function + Options Parameters `yaml:"options"` +} + +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + +// Parse parses an input configuration yaml document into a Configuration struct +// This should generally be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Loglevel == Loglevel("") { + v0_1.Loglevel = Loglevel("info") + } + if v0_1.Storage.Type() == "" { + return nil, errors.New("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) + + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go new file mode 100644 index 00000000..e74b92c4 --- /dev/null +++ b/configuration/configuration_test.go @@ -0,0 +1,549 @@ +package configuration + +import ( + "bytes" + "net/http" + "os" + "reflect" + "strings" + "testing" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +// Hook up gocheck into the "go test" runner +func Test(t *testing.T) { TestingT(t) } + +// configStruct is a canonical example configuration, which should map to configYamlV0_1 +var configStruct = Configuration{ + Version: "0.1", + Log: struct { + AccessLog struct { + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + Level Loglevel `yaml:"level"` + Formatter string `yaml:"formatter,omitempty"` + Fields map[string]interface{} `yaml:"fields,omitempty"` + Hooks []LogHook `yaml:"hooks,omitempty"` + }{ + Fields: map[string]interface{}{"environment": "test"}, + }, + Loglevel: "info", + Storage: Storage{ + "s3": Parameters{ + "region": "us-east-1", + "bucket": "my-bucket", + "rootdirectory": "/registry", + "encrypt": true, + "secure": false, + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": nil, + "port": 42, + }, + }, + Auth: Auth{ + "silly": Parameters{ + "realm": "silly", + "service": "silly", + }, + }, + Reporting: Reporting{ + Bugsnag: BugsnagReporting{ + APIKey: "BugsnagApiKey", + }, + }, + Notifications: Notifications{ + Endpoints: []Endpoint{ + { + Name: "endpoint-1", + URL: "http://example.com", + Headers: http.Header{ + "Authorization": []string{"Bearer "}, + }, + IgnoredMediaTypes: []string{"application/octet-stream"}, + Ignore: Ignore{ + MediaTypes: []string{"application/octet-stream"}, + Actions: []string{"pull"}, + }, + }, + }, + }, + HTTP: struct { + Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` + Host string `yaml:"host,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + RelativeURLs bool `yaml:"relativeurls,omitempty"` + TLS struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + LetsEncrypt struct { + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + Hosts []string `yaml:"hosts,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + Headers http.Header `yaml:"headers,omitempty"` + Debug struct { + Addr string `yaml:"addr,omitempty"` + Prometheus struct { + Enabled bool `yaml:"enabled,omitempty"` + Path string `yaml:"path,omitempty"` + } `yaml:"prometheus,omitempty"` + } `yaml:"debug,omitempty"` + HTTP2 struct { + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + }{ + TLS: struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + LetsEncrypt struct { + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + Hosts []string `yaml:"hosts,omitempty"` + } `yaml:"letsencrypt,omitempty"` + }{ + ClientCAs: []string{"/path/to/ca.pem"}, + }, + Headers: http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, + }, + HTTP2: struct { + Disabled bool `yaml:"disabled,omitempty"` + }{ + Disabled: false, + }, + }, +} + +// configYamlV0_1 is a Version 0.1 yaml document representing configStruct +var configYamlV0_1 = ` +version: 0.1 +log: + fields: + environment: test +loglevel: info +storage: + s3: + region: us-east-1 + bucket: my-bucket + rootdirectory: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: 42 +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] + ignoredmediatypes: + - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull +reporting: + bugsnag: + apikey: BugsnagApiKey +http: + clientcas: + - /path/to/ca.pem + headers: + X-Content-Type-Options: [nosniff] +` + +// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory +// storage driver with no parameters +var inmemoryConfigYamlV0_1 = ` +version: 0.1 +loglevel: info +storage: inmemory +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] + ignoredmediatypes: + - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull +http: + headers: + X-Content-Type-Options: [nosniff] +` + +type ConfigSuite struct { + expectedConfig *Configuration +} + +var _ = Suite(new(ConfigSuite)) + +func (suite *ConfigSuite) SetUpTest(c *C) { + os.Clearenv() + suite.expectedConfig = copyConfig(configStruct) +} + +// TestMarshalRoundtrip validates that configStruct can be marshaled and +// unmarshaled without changing any parameters +func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + config, err := Parse(bytes.NewReader(configBytes)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseSimple validates that configYamlV0_1 can be parsed into a struct +// matching configStruct +func (suite *ConfigSuite) TestParseSimple(c *C) { + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInmemory validates that configuration yaml with storage provided as +// a string can be parsed into a Configuration struct with no storage parameters +func (suite *ConfigSuite) TestParseInmemory(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Log.Fields = nil + + config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseIncomplete validates that an incomplete yaml configuration cannot +// be parsed without providing environment variables to fill in the missing +// components. +func (suite *ConfigSuite) TestParseIncomplete(c *C) { + incompleteConfigYaml := "version: 0.1" + _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, NotNil) + + suite.expectedConfig.Log.Fields = nil + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} + suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Notifications = Notifications{} + suite.expectedConfig.HTTP.Headers = nil + + // Note: this also tests that REGISTRY_STORAGE and + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + os.Setenv("REGISTRY_AUTH", "silly") + os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") + + config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvStorage validates that providing environment variables +// that match the given storage type will only include environment-defined +// parameters and remove yaml-defined parameters +func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { + suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} + + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change +// and add to the given storage parameters will change and add parameters to the parsed +// Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { + suite.expectedConfig.Storage.setParameter("region", "us-west-1") + suite.expectedConfig.Storage.setParameter("secure", true) + suite.expectedConfig.Storage.setParameter("newparam", "some Value") + + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") + os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") + os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageType validates that providing an environment variable that +// changes the storage type will be reflected in the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + + os.Setenv("REGISTRY_STORAGE", "inmemory") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable +// that changes the storage type will be reflected in the parsed Configuration struct and that +// environment storage parameters will also be included +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} + suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log +// level to the same as the one provided in the yaml will not change the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { + os.Setenv("REGISTRY_LOGLEVEL", "info") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the +// log level will override the value provided in the yaml document +func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { + suite.expectedConfig.Loglevel = "error" + + os.Setenv("REGISTRY_LOGLEVEL", "error") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidLoglevel validates that the parser will fail to parse a +// configuration if the loglevel is malformed +func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { + invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" + _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) + c.Assert(err, NotNil) + + os.Setenv("REGISTRY_LOGLEVEL", "derp") + + _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) + +} + +// TestParseWithDifferentEnvReporting validates that environment variables +// properly override reporting parameters +func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { + suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" + suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" + + os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration +// version than the CurrentVersion +func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { + suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + _, err = Parse(bytes.NewReader(configBytes)) + c.Assert(err, NotNil) +} + +// TestParseExtraneousVars validates that environment variables referring to +// nonexistent variables don't cause side effects. +func (suite *ConfigSuite) TestParseExtraneousVars(c *C) { + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + + // A valid environment variable + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + + // Environment variables which shouldn't set config items + os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + os.Setenv("REGISTRY_DUCKS", "quack") + os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseEnvVarImplicitMaps validates that environment variables can set +// values in maps that don't already exist. +func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) { + readonly := make(map[string]interface{}) + readonly["enabled"] = true + + maintenance := make(map[string]interface{}) + maintenance["readonly"] = readonly + + suite.expectedConfig.Storage["maintenance"] = maintenance + + os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a +// string over existing map fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) { + os.Setenv("REGISTRY_STORAGE_S3", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvWrongTypeStruct validates that incorrectly attempting to +// unmarshal a string into a struct fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) { + os.Setenv("REGISTRY_STORAGE_LOG", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvWrongTypeSlice validates that incorrectly attempting to +// unmarshal a string into a slice fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) { + os.Setenv("REGISTRY_LOG_HOOKS", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvMany tests several environment variable overrides. +// The result is not checked - the goal of this test is to detect panics +// from misuse of reflection. +func (suite *ConfigSuite) TestParseEnvMany(c *C) { + os.Setenv("REGISTRY_VERSION", "0.1") + os.Setenv("REGISTRY_LOG_LEVEL", "debug") + os.Setenv("REGISTRY_LOG_FORMATTER", "json") + os.Setenv("REGISTRY_LOG_HOOKS", "json") + os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz") + os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf") + os.Setenv("REGISTRY_LOGLEVEL", "debug") + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1") + os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") + os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) +} + +func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) { + for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return + } + if _, present := structsChecked[t.String()]; present { + // Already checked this type + return + } + + structsChecked[t.String()] = struct{}{} + + byUpperCase := make(map[string]int) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + + // Check that the yaml tag does not contain an _. + yamlTag := sf.Tag.Get("yaml") + if strings.Contains(yamlTag, "_") { + c.Fatalf("yaml field name includes _ character: %s", yamlTag) + } + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + c.Fatalf("field name collision in configuration object: %s", sf.Name) + } + byUpperCase[upper] = i + + checkStructs(c, sf.Type, structsChecked) + } +} + +// TestValidateConfigStruct makes sure that the config struct has no members +// with yaml tags that would be ambiguous to the environment variable parser. +func (suite *ConfigSuite) TestValidateConfigStruct(c *C) { + structsChecked := make(map[string]struct{}) + checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked) +} + +func copyConfig(config Configuration) *Configuration { + configCopy := new(Configuration) + + configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) + configCopy.Loglevel = config.Loglevel + configCopy.Log = config.Log + configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) + for k, v := range config.Log.Fields { + configCopy.Log.Fields[k] = v + } + + configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} + for k, v := range config.Storage.Parameters() { + configCopy.Storage.setParameter(k, v) + } + configCopy.Reporting = Reporting{ + Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, + NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, + } + + configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} + for k, v := range config.Auth.Parameters() { + configCopy.Auth.setParameter(k, v) + } + + configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} + for _, v := range config.Notifications.Endpoints { + configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) + } + + configCopy.HTTP.Headers = make(http.Header) + for k, v := range config.HTTP.Headers { + configCopy.HTTP.Headers[k] = v + } + + return configCopy +} diff --git a/configuration/parser.go b/configuration/parser.go new file mode 100644 index 00000000..b46f7326 --- /dev/null +++ b/configuration/parser.go @@ -0,0 +1,283 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +type envVar struct { + name string + value string +} + +type envVars []envVar + +func (a envVars) Len() int { return len(a) } +func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env envVars +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env = append(p.env, envVar{envParts[0], envParts[1]}) + } + + // We must sort the environment variables lexically by name so that + // more specific variables are applied before less specific ones + // (i.e. REGISTRY_STORAGE before + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a + // lot simpler and easier to get right than unmarshalling map entries + // into temporaries and merging with the existing entry. + sort.Sort(p.env) + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + for _, envVar := range p.env { + pathStr := envVar.name + if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { + path := strings.Split(pathStr, "_") + + err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) + if err != nil { + return err + } + } + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +// overwriteFields replaces configuration values with alternate values specified +// through the environment. Precondition: an empty path slice must never be +// passed in. +func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + panic("encountered nil pointer while handling environment variable " + fullpath) + } + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + return p.overwriteStruct(v, fullpath, path, payload) + case reflect.Map: + return p.overwriteMap(v, fullpath, path, payload) + case reflect.Interface: + if v.NumMethod() == 0 { + if !v.IsNil() { + return p.overwriteFields(v.Elem(), fullpath, path, payload) + } + // Interface was empty; create an implicit map + var template map[string]interface{} + wrappedV := reflect.MakeMap(reflect.TypeOf(template)) + v.Set(wrappedV) + return p.overwriteMap(wrappedV, fullpath, path, payload) + } + } + return nil +} + +func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { + // Generate case-insensitive map of struct fields + byUpperCase := make(map[string]int) + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) + } + byUpperCase[upper] = i + } + + fieldIndex, present := byUpperCase[path[0]] + if !present { + logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) + return nil + } + field := v.Field(fieldIndex) + sf := v.Type().Field(fieldIndex) + + if len(path) == 1 { + // Env var specifies this field directly + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) + if err != nil { + return err + } + field.Set(reflect.Indirect(fieldVal)) + return nil + } + + // If the field is nil, must create an object + switch sf.Type.Kind() { + case reflect.Map: + if field.IsNil() { + field.Set(reflect.MakeMap(sf.Type)) + } + case reflect.Ptr: + if field.IsNil() { + field.Set(reflect.New(sf.Type)) + } + } + + err := p.overwriteFields(field, fullpath, path[1:], payload) + if err != nil { + return err + } + + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { + if m.Type().Key().Kind() != reflect.String { + // non-string keys unsupported + logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) + return nil + } + + if len(path) > 1 { + // If a matching key exists, get its value and continue the + // overwriting process. + for _, k := range m.MapKeys() { + if strings.ToUpper(k.String()) == path[0] { + mapValue := m.MapIndex(k) + // If the existing value is nil, we want to + // recreate it instead of using this value. + if (mapValue.Kind() == reflect.Ptr || + mapValue.Kind() == reflect.Interface || + mapValue.Kind() == reflect.Map) && + mapValue.IsNil() { + break + } + return p.overwriteFields(mapValue, fullpath, path[1:], payload) + } + } + } + + // (Re)create this key + var mapValue reflect.Value + if m.Type().Elem().Kind() == reflect.Map { + mapValue = reflect.MakeMap(m.Type().Elem()) + } else { + mapValue = reflect.New(m.Type().Elem()) + } + if len(path) > 1 { + err := p.overwriteFields(mapValue, fullpath, path[1:], payload) + if err != nil { + return err + } + } else { + err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) + if err != nil { + return err + } + } + + m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) + + return nil +} diff --git a/context/context.go b/context/context.go new file mode 100644 index 00000000..ab686546 --- /dev/null +++ b/context/context.go @@ -0,0 +1,73 @@ +package context + +import ( + "context" + "sync" + + "github.com/docker/distribution/uuid" +) + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + context.Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() context.Context { + return background +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/context/doc.go b/context/doc.go new file mode 100644 index 00000000..0c631a9c --- /dev/null +++ b/context/doc.go @@ -0,0 +1,88 @@ +// Package context provides several utilities for working with +// Go's context in http requests. Primarily, the focus is on logging relevant +// request information but this package is not limited to that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped variables. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/context/http.go b/context/http.go new file mode 100644 index 00000000..b7447800 --- /dev/null +++ b/context/http.go @@ -0,0 +1,367 @@ +package context + +import ( + "context" + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx context.Context, r *http.Request) context.Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx context.Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx context.Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + }, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx context.Context, r *http.Request) context.Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx context.Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx context.Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + context.Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + context.Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + context.Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} + +func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + } + + return irw.instrumentedResponseWriter.Value(key) +} diff --git a/context/http_test.go b/context/http_test.go new file mode 100644 index 00000000..3d4b3c8e --- /dev/null +++ b/context/http_test.go @@ -0,0 +1,285 @@ +package context + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = "GET" + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(testcase.key) + + if v == nil { + t.Fatalf("value not found for %q", testcase.key) + } + + if testcase.expected != nil && v != testcase.expected { + t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) + } + + // Key specific checks! + switch testcase.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(testcase.key) + + if !reflect.DeepEqual(v, testcase.expected) { + t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) + } + } +} + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/context/logger.go b/context/logger.go new file mode 100644 index 00000000..3e5b81bb --- /dev/null +++ b/context/logger.go @@ -0,0 +1,121 @@ +package context + +import ( + "context" + "fmt" + "runtime" + + "github.com/sirupsen/logrus" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) + + WithError(err error) *logrus.Entry +} + +type loggerKey struct{} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx context.Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value(loggerKey{}) + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/context/trace.go b/context/trace.go new file mode 100644 index 00000000..5b88ddaf --- /dev/null +++ b/context/trace.go @@ -0,0 +1,105 @@ +package context + +import ( + "context" + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapsed time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + context.Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/context/trace_test.go b/context/trace_test.go new file mode 100644 index 00000000..f973f9a4 --- /dev/null +++ b/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "context" + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(ctx, t, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(ctx, t, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(ctx context.Context, t *testing.T, values []valueTestCase) { + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/context/util.go b/context/util.go new file mode 100644 index 00000000..c462e756 --- /dev/null +++ b/context/util.go @@ -0,0 +1,25 @@ +package context + +import ( + "context" + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx context.Context, key interface{}) time.Duration { + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) + } + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx context.Context, key interface{}) (value string) { + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev + } + return value +} diff --git a/context/version.go b/context/version.go new file mode 100644 index 00000000..97cf9d66 --- /dev/null +++ b/context/version.go @@ -0,0 +1,22 @@ +package context + +import "context" + +type versionKey struct{} + +func (versionKey) String() string { return "version" } + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx context.Context, version string) context.Context { + ctx = context.WithValue(ctx, versionKey{}, version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, versionKey{})) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx context.Context) string { + return GetStringValue(ctx, versionKey{}) +} diff --git a/context/version_test.go b/context/version_test.go new file mode 100644 index 00000000..b8165269 --- /dev/null +++ b/context/version_test.go @@ -0,0 +1,19 @@ +package context + +import "testing" + +func TestVersionContext(t *testing.T) { + ctx := Background() + + if GetVersion(ctx) != "" { + t.Fatalf("context should not yet have a version") + } + + expected := "2.1-whatever" + ctx = WithVersion(ctx, expected) + version := GetVersion(ctx) + + if version != expected { + t.Fatalf("version was not set: %q != %q", version, expected) + } +} diff --git a/contrib/apache/README.MD b/contrib/apache/README.MD new file mode 100644 index 00000000..29f6bae1 --- /dev/null +++ b/contrib/apache/README.MD @@ -0,0 +1,36 @@ +# Apache HTTPd sample for Registry v1, v2 and mirror + +3 containers involved + +* Docker Registry v1 (registry 0.9.1) +* Docker Registry v2 (registry 2.0.0) +* Docker Registry v1 in mirror mode + +HTTP for mirror and HTTPS for v1 & v2 + +* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode +* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode + +## 3 Docker containers should be started + +* Docker Registry 1.0 in Mirror mode : port 5001 +* Docker Registry 1.0 in Hosting mode : port 5000 +* Docker Registry 2.0 in Hosting mode : port 5002 + +### Registry v1 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" + +### Mirror + + docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ + -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" + +### Registry v2 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" + +# For Hosting mode access + +* users should have account (valid-user) to be able to fetch images +* only users using account docker-deployer will be allowed to push images diff --git a/contrib/apache/apache.conf b/contrib/apache/apache.conf new file mode 100644 index 00000000..3300a7c0 --- /dev/null +++ b/contrib/apache/apache.conf @@ -0,0 +1,127 @@ +# +# Sample Apache 2.x configuration where : +# + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /_ping http://localhost:5001/_ping + ProxyPassReverse /_ping http://localhost:5001/_ping + + ProxyPass /v1 http://localhost:5001/v1 + ProxyPassReverse /v1 http://localhost:5001/v1 + + # Logs + ErrorLog ${APACHE_LOG_DIR}/mirror_error_log + CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog + + + + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt + SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key + + # Higher Strength SSL Ciphers + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLCipherSuite RC4-SHA:HIGH + SSLHonorCipherOrder on + + # Logs + ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log + CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + # + # Registry v1 + # + + ProxyPass /v1 http://localhost:5000/v1 + ProxyPassReverse /v1 http://localhost:5000/v1 + + ProxyPass /_ping http://localhost:5000/_ping + ProxyPassReverse /_ping http://localhost:5000/_ping + + # Authentication require for push + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer account only + + Require user docker-deployer + + + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # + # Registry v2 + # + + ProxyPass /v2 http://localhost:5002/v2 + ProxyPassReverse /v2 http://localhost:5002/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require user docker-deployer + + + + + + + diff --git a/contrib/compose/README.md b/contrib/compose/README.md new file mode 100644 index 00000000..45050b70 --- /dev/null +++ b/contrib/compose/README.md @@ -0,0 +1,147 @@ +# Docker Compose V1 + V2 registry + +This compose configuration configures a `v1` and `v2` registry behind an `nginx` +proxy. By default, you can access the combined registry at `localhost:5000`. + +The configuration does not support pushing images to `v2` and pulling from `v1`. +If a `docker` client has a version less than 1.6, Nginx will route its requests +to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. + +### Install Docker Compose + +1. Open a new terminal on the host with your `distribution` source. + +2. Get the `docker-compose` binary. + + $ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose + + This command installs the binary in the `/usr/local/bin` directory. + +3. Add executable permissions to the binary. + + $ sudo chmod +x /usr/local/bin/docker-compose + +## Build and run with Compose + +1. In your terminal, navigate to the `distribution/contrib/compose` directory + + This directory includes a single `docker-compose.yml` configuration. + + nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 + registryv1: + image: registry + ports: + - "5000" + registryv2: + build: "../../" + ports: + - "5000" + + This configuration builds a new `nginx` image as specified by the + `nginx/Dockerfile` file. The 1.0 registry comes from Docker's official + public image. Finally, the registry 2.0 image is built from the + `distribution/Dockerfile` you've used previously. + +2. Get a registry 1.0 image. + + $ docker pull registry:0.9.1 + + The Compose configuration looks for this image locally. If you don't do this + step, later steps can fail. + +3. Build `nginx`, the registry 2.0 image, and + + $ docker-compose build + registryv1 uses an image, skipping + Building registryv2... + Step 0 : FROM golang:1.4 + + ... + + Removing intermediate container 9f5f5068c3f3 + Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf + ---> 74acc70fa106 + Removing intermediate container edb84c2b40cb + Successfully built 74acc70fa106 + + The commmand outputs its progress until it completes. + +4. Start your configuration with compose. + + $ docker-compose up + Recreating compose_registryv1_1... + Recreating compose_registryv2_1... + Recreating compose_nginx_1... + Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 + ... + + +5. In another terminal, display the running configuration. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 + 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 + aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 + +### Explore a bit + +1. Check for TLS on your `nginx` server. + + $ curl -v https://localhost:5000 + * Rebuilt URL to: https://localhost:5000/ + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 5000 (#0) + * successfully set certificate verify locations: + * CAfile: none + CApath: /etc/ssl/certs + * SSLv3, TLS handshake, Client hello (1): + * SSLv3, TLS handshake, Server hello (2): + * SSLv3, TLS handshake, CERT (11): + * SSLv3, TLS alert, Server hello (2): + * SSL certificate problem: self signed certificate + * Closing connection 0 + curl: (60) SSL certificate problem: self signed certificate + More details here: http://curl.haxx.se/docs/sslcerts.html + +2. Tag the `v1` registry image. + + $ docker tag registry:latest localhost:5000/registry_one:latest + +2. Push it to the localhost. + + $ docker push localhost:5000/registry_one:latest + + If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. + +4. Use `curl` to list the image in the registry. + + $ curl -v -X GET http://localhost:5000/v2/registry_one/tags/list + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 32777 (#0) + > GET /v2/registry1/tags/list HTTP/1.1 + > User-Agent: curl/7.36.0 + > Host: localhost:5000 + > Accept: */* + > + < HTTP/1.1 200 OK + < Content-Type: application/json; charset=utf-8 + < Docker-Distribution-Api-Version: registry/2.0 + < Date: Tue, 14 Apr 2015 22:34:13 GMT + < Content-Length: 39 + < + {"name":"registry_one","tags":["latest"]} + * Connection #0 to host localhost left intact + + This example refers to the specific port assigned to the 2.0 registry. You saw + this port earlier, when you used `docker ps` to show your running containers. + + diff --git a/contrib/compose/docker-compose.yml b/contrib/compose/docker-compose.yml new file mode 100644 index 00000000..5cd04858 --- /dev/null +++ b/contrib/compose/docker-compose.yml @@ -0,0 +1,15 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/contrib/compose/nginx/Dockerfile b/contrib/compose/nginx/Dockerfile new file mode 100644 index 00000000..2b252ec7 --- /dev/null +++ b/contrib/compose/nginx/Dockerfile @@ -0,0 +1,6 @@ +FROM nginx:1.7 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/contrib/compose/nginx/docker-registry-v2.conf b/contrib/compose/nginx/docker-registry-v2.conf new file mode 100644 index 00000000..65c4d776 --- /dev/null +++ b/contrib/compose/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/contrib/compose/nginx/docker-registry.conf b/contrib/compose/nginx/docker-registry.conf new file mode 100644 index 00000000..7b039a54 --- /dev/null +++ b/contrib/compose/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line +proxy_read_timeout 900; diff --git a/contrib/compose/nginx/nginx.conf b/contrib/compose/nginx/nginx.conf new file mode 100644 index 00000000..63cd180d --- /dev/null +++ b/contrib/compose/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/contrib/compose/nginx/registry.conf b/contrib/compose/nginx/registry.conf new file mode 100644 index 00000000..47ffd237 --- /dev/null +++ b/contrib/compose/nginx/registry.conf @@ -0,0 +1,41 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + # auth_basic "registry.localhost"; + # auth_basic_user_file test.password; + # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + diff --git a/contrib/docker-integration/Dockerfile b/contrib/docker-integration/Dockerfile new file mode 100644 index 00000000..7a047a68 --- /dev/null +++ b/contrib/docker-integration/Dockerfile @@ -0,0 +1,9 @@ +FROM distribution/golem:0.1 + +MAINTAINER Docker Distribution Team + +RUN apk add --no-cache git + +ENV TMPDIR /var/lib/docker/tmp + +WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration diff --git a/contrib/docker-integration/README.md b/contrib/docker-integration/README.md new file mode 100644 index 00000000..bc5be9d9 --- /dev/null +++ b/contrib/docker-integration/README.md @@ -0,0 +1,63 @@ +# Docker Registry Integration Testing + +These integration tests cover interactions between registry clients such as +the docker daemon and the registry server. All tests can be run using the +[golem integration test runner](https://github.com/docker/golem) + +The integration tests configure components using docker compose +(see docker-compose.yaml) and the runner can be using the golem +configuration file (see golem.conf). + +## Running integration tests + +### Run using multiversion script + +The integration tests in the `contrib/docker-integration` directory can be simply +run by executing the run script `./run_multiversion.sh`. If there is no running +daemon to connect to, run as `./run_multiversion.sh -d`. + +This command will build the distribution image from the locally checked out +version and run against multiple versions of docker defined in the script. To +run a specific version of the registry or docker, Golem will need to be +executed manually. + +### Run manually using Golem + +Using the golem tool directly allows running against multiple versions of +the registry and docker. Running against multiple versions of the registry +can be useful for testing changes in the docker daemon which are not +covered by the default run script. + +#### Installing Golem + +Golem is distributed as an executable binary which can be installed from +the [release page](https://github.com/docker/golem/releases/tag/v0.1). + +#### Running golem with docker + +Additionally golem can be run as a docker image requiring no additonal +installation. + +`docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .` + +#### Golem custom images + +Golem tests version of software by defining the docker image to test. + +Run with registry 2.2.1 and docker 1.10.3 + +`golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .` + + +#### Use golem caching for developing tests + +Golem allows caching image configuration to reduce test start up time. +Using this cache will allow tests with the same set of images to start +up quickly. This can be useful when developing tests and needing the +test to run quickly. If there are changes which effect the image (such as +building a new registry image), then startup time will be slower. + +Run this command multiple times and after the first time test runs +should start much quicker. +`golem -cache ~/.cache/docker/golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .` + diff --git a/contrib/docker-integration/docker-compose.yml b/contrib/docker-integration/docker-compose.yml new file mode 100644 index 00000000..374197ac --- /dev/null +++ b/contrib/docker-integration/docker-compose.yml @@ -0,0 +1,91 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + - "5002:5002" + - "5440:5440" + - "5441:5441" + - "5442:5442" + - "5443:5443" + - "5444:5444" + - "5445:5445" + - "5446:5446" + - "5447:5447" + - "5448:5448" + - "5554:5554" + - "5555:5555" + - "5556:5556" + - "5557:5557" + - "5558:5558" + - "5559:5559" + - "5600:5600" + - "6666:6666" + links: + - registryv2:registryv2 + - malevolent:malevolent + - registryv2token:registryv2token + - tokenserver:tokenserver + - registryv2tokenoauth:registryv2tokenoauth + - registryv2tokenoauthnotls:registryv2tokenoauthnotls + - tokenserveroauth:tokenserveroauth +registryv2: + image: golem-distribution:latest + ports: + - "5000" +registryv2token: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver/registry-config.yml:/etc/docker/registry/config.yml + - ./tokenserver/certs/localregistry.cert:/etc/docker/registry/localregistry.cert + - ./tokenserver/certs/localregistry.key:/etc/docker/registry/localregistry.key + - ./tokenserver/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +tokenserver: + build: "tokenserver" + command: "--debug -addr 0.0.0.0:5556 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5556" + ports: + - "5556" +registryv2tokenoauth: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver-oauth/registry-config.yml:/etc/docker/registry/config.yml + - ./tokenserver-oauth/certs/localregistry.cert:/etc/docker/registry/localregistry.cert + - ./tokenserver-oauth/certs/localregistry.key:/etc/docker/registry/localregistry.key + - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +registryv2tokenoauthnotls: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver-oauth/registry-config-notls.yml:/etc/docker/registry/config.yml + - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +tokenserveroauth: + build: "tokenserver-oauth" + command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559 -enforce-class" + ports: + - "5559" +malevolent: + image: "dmcgowan/malevolent:0.1.0" + command: "-l 0.0.0.0:6666 -r http://registryv2:5000 -c /certs/localregistry.cert -k /certs/localregistry.key" + links: + - registryv2:registryv2 + volumes: + - ./malevolent-certs:/certs:ro + ports: + - "6666" +docker: + image: golem-dind:latest + container_name: dockerdaemon + command: "docker daemon --debug -s $DOCKER_GRAPHDRIVER" + privileged: true + environment: + DOCKER_GRAPHDRIVER: + volumes: + - /etc/generated_certs.d:/etc/docker/certs.d + - /var/lib/docker + links: + - nginx:localregistry + - nginx:auth.localregistry diff --git a/contrib/docker-integration/golem.conf b/contrib/docker-integration/golem.conf new file mode 100644 index 00000000..eb175707 --- /dev/null +++ b/contrib/docker-integration/golem.conf @@ -0,0 +1,18 @@ +[[suite]] + dind=true + images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0", "dmcgowan/ncat:latest" ] + + [[suite.pretest]] + command="sh ./install_certs.sh /etc/generated_certs.d" + [[suite.testrunner]] + command="bats -t ." + format="tap" + env=["TEST_REPO=hello-world", "TEST_TAG=latest", "TEST_USER=testuser", "TEST_PASSWORD=passpassword", "TEST_REGISTRY=localregistry", "TEST_SKIP_PULL=true"] + [[suite.customimage]] + tag="golem-distribution:latest" + default="registry:2.2.1" + [[suite.customimage]] + tag="golem-dind:latest" + default="docker:1.10.1-dind" + version="1.10.1" + diff --git a/contrib/docker-integration/helpers.bash b/contrib/docker-integration/helpers.bash new file mode 100644 index 00000000..8760f9cf --- /dev/null +++ b/contrib/docker-integration/helpers.bash @@ -0,0 +1,127 @@ +# has_digest enforces the last output line is "Digest: sha256:..." +# the input is the output from a docker push cli command +function has_digest() { + filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') + [ "$filtered" != "" ] + # See http://wiki.alpinelinux.org/wiki/Regex#BREs before making changes to regex + digest=$(expr "$filtered" : ".*\(sha[0-9]\{3,3\}:[a-z0-9]*\)") +} + +# tempImage creates a new image using the provided name +# requires bats +function tempImage() { + dir=$(mktemp -d) + run dd if=/dev/urandom of="$dir/f" bs=1024 count=512 + cat < "$dir/Dockerfile" +FROM scratch +COPY f /f + +CMD [] +DockerFileContent + + cp_t $dir "/tmpbuild/" + exec_t "cd /tmpbuild/; docker build --no-cache -t $1 .; rm -rf /tmpbuild/" +} + +# skip basic auth tests with Docker 1.6, where they don't pass due to +# certificate issues, requires bats +function basic_auth_version_check() { + run sh -c 'docker version | fgrep -q "Client version: 1.6."' + if [ "$status" -eq 0 ]; then + skip "Basic auth tests don't support 1.6.x" + fi +} + +email="a@nowhere.com" + +# docker_t_login calls login with email depending on version +function docker_t_login() { + # Only pass email field pre 1.11, no deprecation warning + parse_version "$GOLEM_DIND_VERSION" + v=$version + parse_version "1.11.0" + if [ "$v" -lt "$version" ]; then + run docker_t login -e $email $@ + else + run docker_t login $@ + fi +} + +# login issues a login to docker to the provided server +# uses user, password, and email variables set outside of function +# requies bats +function login() { + rm -f /root/.docker/config.json + + docker_t_login -u $user -p $password $1 + if [ "$status" -ne 0 ]; then + echo $output + fi + [ "$status" -eq 0 ] + + # Handle different deprecation warnings + parse_version "$GOLEM_DIND_VERSION" + v=$version + parse_version "1.11.0" + if [ "$v" -lt "$version" ]; then + # First line is WARNING about credential save or email deprecation (maybe both) + [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] + else + [ "${lines[0]}" = "Login Succeeded" ] + fi + +} + +function login_oauth() { + login $@ + + tmpFile=$(mktemp) + get_file_t /root/.docker/config.json $tmpFile + run awk -v RS="" "/\"$1\": \\{[[:space:]]+\"auth\": \"[[:alnum:]]+\",[[:space:]]+\"identitytoken\"/ {exit 3}" $tmpFile + [ "$status" -eq 3 ] +} + +function parse_version() { + version=$(echo "$1" | cut -d '-' -f1) # Strip anything after '-' + major=$(echo "$version" | cut -d . -f1) + minor=$(echo "$version" | cut -d . -f2) + rev=$(echo "$version" | cut -d . -f3) + + version=$((major * 1000 * 1000 + minor * 1000 + rev)) +} + +function version_check() { + name=$1 + checkv=$2 + minv=$3 + parse_version "$checkv" + v=$version + parse_version "$minv" + if [ "$v" -lt "$version" ]; then + skip "$name version \"$checkv\" does not meet required version \"$minv\"" + fi +} + +function get_file_t() { + docker cp dockerdaemon:$1 $2 +} + +function cp_t() { + docker cp $1 dockerdaemon:$2 +} + +function exec_t() { + docker exec dockerdaemon sh -c "$@" +} + +function docker_t() { + docker exec dockerdaemon docker $@ +} + +# build creates a new docker image id from another image +function build() { + docker exec -i dockerdaemon docker build --no-cache -t $1 - <> $2/ca.crt +} + +install_test_certs $installdir + +# Malevolent server +install_ca_file ./malevolent-certs/ca.pem $installdir/$hostname:6666 + +# Token server +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5554 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5555 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5557 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5558 +append_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5600 + diff --git a/contrib/docker-integration/malevolent-certs/ca.pem b/contrib/docker-integration/malevolent-certs/ca.pem new file mode 100644 index 00000000..93325986 --- /dev/null +++ b/contrib/docker-integration/malevolent-certs/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIQKQTGjKpSVBW78ef0fOcxRTALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz +MjE0OVoXDTE4MDgwNDIzMjE0OVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV +BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwoPM +xiDZK6Fwy5r3waRkfJHhyZZH828Jyj+nz5UVkMyOM/xN6MgJ2w911hTj1wSXG2n3 +AohF3gTFNrDYh4j2qRZnixDrOM5GBm2/KJbyfBIYkrR45yLfjidO7MRnhaPZ5Fov +l+RKwNBXP4Q2mUe7q9FM457Rm8hAcqXP04AJT20m1QSYQivDgxsDxuAQte3VEy1E +0j0CwUKoFHT6MHOnDPEZbc4r1+ba34WBM1Sc5KXyV2JlbtU07J4hACYWVsD7vQCl +VFlZNE4E35ahMDZ+ODLal9PAT8ARLdAtjvRWrT+h8qZ4Yfwt/sGF1K4CAkTP3H5p +uMkJG56zmqIEYeHMuwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/ +BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALpieTckiPEeb3rTAWl7waDPLPOIhS5C +XHVfOm7cPmRn3pT2VuR8y74U7a1uOkYMgJnCWb8lSXhbqC89FatLnAhKqo4I9oD8 +2BXgYeIpP5/OWBcjzmsMnowrvokc0chAmAR0Ux6AP0eX9amC0lGMuTHdw3+is0AR +lhoImOUPXvgMH7W2RimpSgnX0R5wKqfuGwMfbGa0xhWBZ+wekAKcU8b+pIHDyX0c +EQcir2y8/lVjECXSAIlV6iasPQ3hm1sd0xq1hx4yrwYFvQb7yEhOXbK24HLr/20D +RRmEOuS8gg2XtUFv66z/VOw/nUleIg9GAuWDJaiu9frmIma4/tIY4qY= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/malevolent-certs/localregistry.cert b/contrib/docker-integration/malevolent-certs/localregistry.cert new file mode 100644 index 00000000..071e7a2b --- /dev/null +++ b/contrib/docker-integration/malevolent-certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQZRKt7OeG+TlC2riszYwQQTALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz +MjE0OVoXDTE4MDgwNDIzMjE0OVowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDPdsUBStNMz4coXfQVIJIafG85VkngM4fV7hrg7AbiGLCWvq8cWOrYM50G9Wmo +twK1WeQ6bigYOjINgSfTxcy3adciVZIIJyXqboz6n2V0yRPWpakof939bvuAurAP +tSqQ2V5fGN0ZZn4J4IbXMSovKwo7sG3X6i4q/8DYHZ/mKjvCRMPC3MGWqunknpkm +dzyKbIFHaDKlAqIOwTsDhHvGzm/9n3D+h4sl5ZPBobuBEV2u5GR0H5ujak4+Kczt +thCWtRkzCfnjW0TEanheSYJGu8OgCGoFjQnHotgqvOO6iHZCsrB3gf8WQeou+y9e ++OyLZv3FmqdC9SXr3b0LGQTFAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQC/PP2Y9QVhO8t4BXML1QpNRWqXG8Gg0P1XIh6M6FoxcGIodLdbzui828YB +wm9ZlyKars+nDdgLdQWawdV7hSd6s2NeQlHYQSGLsdTAVkgIxiD7D2Tw3kAZ6Zrj +dPikoVAc+rBMm/BXQLzy95IAbBVOHOpBkOOgF+TYxeLnOc3GzbUqBi1Pq97DMaxr +DaDuywH55P/6v7qt610UIsZ6+RZ78iiRx4Q+oRxEqGT0rXI76gVxOFabbJuFr1n1 +kEWa3u/BssJzX3KVAm7oUtaBnj2SH5fokFmvZ5lBXA4QO/5doOa8yZiFFvvQs7EY +SWDxLrvS33UCtsCcpPggjehnxKaC +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/malevolent-certs/localregistry.key b/contrib/docker-integration/malevolent-certs/localregistry.key new file mode 100644 index 00000000..c5bf7ac1 --- /dev/null +++ b/contrib/docker-integration/malevolent-certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAz3bFAUrTTM+HKF30FSCSGnxvOVZJ4DOH1e4a4OwG4hiwlr6v +HFjq2DOdBvVpqLcCtVnkOm4oGDoyDYEn08XMt2nXIlWSCCcl6m6M+p9ldMkT1qWp +KH/d/W77gLqwD7UqkNleXxjdGWZ+CeCG1zEqLysKO7Bt1+ouKv/A2B2f5io7wkTD +wtzBlqrp5J6ZJnc8imyBR2gypQKiDsE7A4R7xs5v/Z9w/oeLJeWTwaG7gRFdruRk +dB+bo2pOPinM7bYQlrUZMwn541tExGp4XkmCRrvDoAhqBY0Jx6LYKrzjuoh2QrKw +d4H/FkHqLvsvXvjsi2b9xZqnQvUl6929CxkExQIDAQABAoIBAQCZjCUI7NFwwxQc +m1UAogeglMJZJHUu+9SoUD8Sg34grvdbyqueBm1iMOkiclaOKU1W3b4eRNNmAwRy +nEnW4km+4hX48m5PnHHijYnIIFsd0YjeT+Pf9qtdXFvGjeWq6oIjjM3dAnD50LKu +KsCB2oCHQoqjXNQfftJGvt2C1oI2/WvdOR4prnGXElVfASswX4PkP5LCfLhIx+Fr +7ErfaRIKigLSaAWLKaw3IlL12Q/KkuGcnzYIzIRwY4VJ64ENN6M3+KknfGovQItL +sCxceSe61THDP9AAI3Mequm8z3H0CImOWhJCge5l7ttLLMXZXqGxDCVx+3zvqlCa +X0cgGSVBAoGBAOvTN3oJJx1vnh1mRj8+hqzFq1bjm4T/Wp314QWLeo++43II4uMM +5hxUlO5ViY1sKxQrGwK+9c9ddxAvm5OAFFkzgW9EhDCu0tXUb2/vAJQ93SgqbcRu +coXWJpk0eNW/ouk2s1X8dzs+sCs3a4H64fEEj8yhwoyovjfucspsn7t1AoGBAOE2 +ayLKx7CcWCiD/VGNvP7714MDst2isyq8reg8LEMmAaXR2IWWj5eGwKrImTQCsrjW +P37aBp1lcWuuYRKl/WEGBy6JLNdATyUoYc1Yo+8YdenekkOtOHHJerlK3OKi3ZVp +q4HJY9wzKg/wYLcbTmjjzKj+OBIZWwig73XUHwoRAoGBAJnuIrYbp1aFdvXFvnCl +xY6c8DwlEWx8qY+V4S2XX4bYmOnkdwSxdLplU1lGqCSRyIS/pj/imdyjK4Z7LNfY +sG+RORmB5a9JTgGZSqwLm5snzmXbXA7t8P7/S+6Q25baIeKMe/7SbplTT/bFk/0h +371MtvhhVfYuZwtnL7KFuLXJAoGBAMQ3UHKYsBC8tsZd8Pf8AL07mFHKiC04Etfa +Wb5rpri+RVM+mGITgnmnavehHHHHJAWMjPetZ3P8rSv/Ww4PVsoQoXM3Cr1jh1E9 +dLCfWPz4l8syIscaBYKF4wnLItXGxj3mOgoy93EjlrMaYHlILjGOv4JBM4L5WmoT +JW7IaF6xAoGAZ4K8MwU/cAah8VinMmLGxvWWuBSgTTebuY5zN603MvFLKv5necuc +BZfTTxD+gOnxRT6QAh++tOsbBmsgR9HmTSlQSSgw1L7cwGyXzLCDYw+5K/03KXSU +DaFdgtfcDDJO8WtjOgjyTRzEAOsqFta1ige4pIu5fTilNVMQlhts5Iw= +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/malevolent.bats b/contrib/docker-integration/malevolent.bats new file mode 100644 index 00000000..36cfe360 --- /dev/null +++ b/contrib/docker-integration/malevolent.bats @@ -0,0 +1,192 @@ +#!/usr/bin/env bats + +# This tests various expected error scenarios when pulling bad content + +load helpers + +host="localregistry:6666" +base="malevolent-test" + +function setup() { + tempImage $base:latest +} + +@test "Test malevolent proxy pass through" { + docker_t tag $base:latest $host/$base/nochange:latest + run docker_t push $host/$base/nochange:latest + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + + run docker_t pull $host/$base/nochange:latest + echo "$output" + [ "$status" -eq 0 ] +} + +@test "Test malevolent image name change" { + imagename="$host/$base/rename" + image="$imagename:lastest" + docker_t tag $base:latest $image + run docker_t push $image + [ "$status" -eq 0 ] + has_digest "$output" + + # Pull attempt should fail to verify manifest digest + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent altered layer" { + image="$host/$base/addfile:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull $image + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent altered layer (by digest)" { + imagename="$host/$base/addfile" + image="$imagename:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent poisoned images" { + truncid="777cf9284131" + poison="${truncid}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32" + image1="$host/$base/image1/poison:$poison" + tempImage $image1 + run docker_t push $image1 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + image2="$host/$base/image2/poison:$poison" + tempImage $image2 + run docker_t push $image2 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + run docker_t pull $image1 + echo "$output" + [ "$status" -eq 0 ] + run docker_t pull $image2 + echo "$output" + [ "$status" -eq 0 ] + + # Test if there are multiple images + run docker_t images + echo "$output" + [ "$status" -eq 0 ] + + # Test images have same ID and not the poison + id1=$(docker_t inspect --format="{{.Id}}" $image1) + id2=$(docker_t inspect --format="{{.Id}}" $image2) + + # Remove old images + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + [ "$id1" != "$id2" ] + + [ "$id1" != "$truncid" ] + + [ "$id2" != "$truncid" ] +} + +@test "Test malevolent altered identical images" { + truncid1="777cf9284131" + poison1="${truncid1}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32" + truncid2="888cf9284131" + poison2="${truncid2}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa64" + + image1="$host/$base/image1/alteredid:$poison1" + tempImage $image1 + run docker_t push $image1 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + image2="$host/$base/image2/alteredid:$poison2" + docker_t tag $image1 $image2 + run docker_t push $image2 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + run docker_t pull $image1 + echo "$output" + [ "$status" -eq 0 ] + run docker_t pull $image2 + echo "$output" + [ "$status" -eq 0 ] + + # Test if there are multiple images + run docker_t images + echo "$output" + [ "$status" -eq 0 ] + + # Test images have same ID and not the poison + id1=$(docker_t inspect --format="{{.Id}}" $image1) + id2=$(docker_t inspect --format="{{.Id}}" $image2) + + # Remove old images + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + [ "$id1" == "$id2" ] + + [ "$id1" != "$truncid1" ] + + [ "$id2" != "$truncid2" ] +} + +@test "Test malevolent resumeable pull" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + version_check registry "$GOLEM_DISTRIBUTION_VERSION" "2.3.0" + + imagename="$host/$base/resumeable" + image="$imagename:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -eq 0 ] +} diff --git a/contrib/docker-integration/nginx/Dockerfile b/contrib/docker-integration/nginx/Dockerfile new file mode 100644 index 00000000..17f999d2 --- /dev/null +++ b/contrib/docker-integration/nginx/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:1.9 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf +COPY registry-noauth.conf /etc/nginx/registry-noauth.conf +COPY registry-basic.conf /etc/nginx/registry-basic.conf +COPY test.passwd /etc/nginx/test.passwd +COPY ssl /etc/nginx/ssl +COPY v1 /var/www/html/v1 diff --git a/contrib/docker-integration/nginx/docker-registry-v2.conf b/contrib/docker-integration/nginx/docker-registry-v2.conf new file mode 100644 index 00000000..65c4d776 --- /dev/null +++ b/contrib/docker-integration/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/contrib/docker-integration/nginx/nginx.conf b/contrib/docker-integration/nginx/nginx.conf new file mode 100644 index 00000000..543eab69 --- /dev/null +++ b/contrib/docker-integration/nginx/nginx.conf @@ -0,0 +1,61 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + +# Setup TCP proxies +stream { + # Malevolent proxy + server { + listen 6666; + proxy_pass malevolent:6666; + } + + # Registry configured for token server + server { + listen 5554; + listen 5555; + proxy_pass registryv2token:5000; + } + + # Token server + server { + listen 5556; + proxy_pass tokenserver:5556; + } + + # Registry configured for token server with oauth + server { + listen 5557; + listen 5558; + proxy_pass registryv2tokenoauth:5000; + } + + # Token server with oauth + server { + listen 5559; + proxy_pass tokenserveroauth:5559; + } +} diff --git a/contrib/docker-integration/nginx/registry-basic.conf b/contrib/docker-integration/nginx/registry-basic.conf new file mode 100644 index 00000000..117ea584 --- /dev/null +++ b/contrib/docker-integration/nginx/registry-basic.conf @@ -0,0 +1,8 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + include docker-registry-v2.conf; +} diff --git a/contrib/docker-integration/nginx/registry-noauth.conf b/contrib/docker-integration/nginx/registry-noauth.conf new file mode 100644 index 00000000..6e182d44 --- /dev/null +++ b/contrib/docker-integration/nginx/registry-noauth.conf @@ -0,0 +1,5 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + include docker-registry-v2.conf; +} diff --git a/contrib/docker-integration/nginx/registry.conf b/contrib/docker-integration/nginx/registry.conf new file mode 100644 index 00000000..e693d569 --- /dev/null +++ b/contrib/docker-integration/nginx/registry.conf @@ -0,0 +1,260 @@ +# Docker registry proxy for api version 2 + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + include docker-registry-v2.conf; + } +} + +# No client auth or TLS (V2 Only) +server { + listen 5002; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry-v2.conf; + } +} + +# TLS Configuration chart +# Username/Password: testuser/passpassword +# | ca | client | basic | notes +# 5440 | yes | no | no | Tests CA certificate +# 5441 | yes | no | yes | Tests basic auth over TLS +# 5442 | yes | yes | no | Tests client auth with client CA +# 5443 | yes | yes | no | Tests client auth without client CA +# 5444 | yes | yes | yes | Tests using basic auth + tls auth +# 5445 | no | no | no | Tests insecure using TLS +# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS +# 5447 | no | yes | no | Tests client auth to insecure +# 5448 | yes | no | no | Bad SSL version + +server { + listen 5440; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + +# Add configuration for localregistry server_name +# Requires configuring /etc/hosts to use +# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing +# Docker secure/insecure registry features +server { + listen 5440; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + + +# V1 search test +# Registry configured with token auth and no tls +# TLS termination done by nginx, search results +# served by nginx + +upstream docker-registry-v2-oauth { + server registryv2tokenoauthnotls:5000; +} + +server { + listen 5600; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + + root /var/www/html; + + client_max_body_size 0; + chunked_transfer_encoding on; + location /v2/ { + proxy_buffering off; + proxy_pass http://docker-registry-v2-oauth; + proxy_set_header Host $http_host; # required for docker client's sake + proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; + } + + location /v1/search { + if ($http_authorization !~ "Bearer [a-zA-Z0-9\._-]+") { + return 401; + } + try_files /v1/search.json =404; + add_header Content-Type application/json; + } +} diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem b/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem new file mode 100644 index 00000000..8c9b1bca --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV +BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX +GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx +BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT +wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO +cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY +AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR +sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE +CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p +y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk +o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN +NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G +Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/ +BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z +PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU +lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU +GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna +XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi +QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK +UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w +Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH +sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF +AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib +KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA +KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w== +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem b/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem new file mode 100644 index 00000000..a239939d --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy +MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/ +oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M +WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/ +MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II +hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4 +nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai +gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru +5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/ +Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO +nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j +NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB +twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG +CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn +XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw +Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw +C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx +jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm +Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx +/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1 +bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR ++xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38 +nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q +wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W +SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA== +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem b/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem new file mode 100644 index 00000000..acfc9a48 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z +WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT +2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky ++RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop +8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy +MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte +WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1 +ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2 +Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw +f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY +sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA +AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h +SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2 +wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN +X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA +02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as +zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c +dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1 ++ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG +y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY +PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh +lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK +I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+ +0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV +PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc +TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9 +0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux +eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq +IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF +GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0 +U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU +RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc +/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu +MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna +q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta +LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK +EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M +6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP +RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3 +esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU +49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk +9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2 +N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx +YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m +4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8 +9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7 +ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv +bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx +Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2 +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem new file mode 100644 index 00000000..3ca47f45 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCTCCAvOgAwIBAgIQdcXDOHrLsd2ENSfj5h8ZmjALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQwM1oXDTE4MDUxMDIwNTQwM1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV +BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2K +saEVcHq0eldu5kABbWtZsf9keK7lz8beVIowzOqp5IHpGlggtH7xDVeigA/sLdds +WTgKEOq3zsJzdgfEti5TNAjjmPqjMKkolqv3LXDJG0dZ2GZ8W/eBB6X1wB0LKr3i +ye3/5jb/wCZYVGGMQXj0VQxY8Qq+OHEp0effeheJqA0OYOj+RaZwi20OR/KmJRgY +wXU33bZyapuyT4krhFlFbtzXeKsKQPrT2ePWxPAceqUGUTIqyJySYIw6vb72YxjX +FNRw6Jg7B7RqVJaVCfBrVxtAv+rCLOhUOVYmWhgWEIODPXiqOGwB0VUApAVAYqfi +TYnJIZ7QYLlQx5VPNlzZuSJTUzKmHQLtLcTqdO5HmLxfxc0WuS/ftK916wy/jpSc +m2DiHjIy6aAEaHKGQrNgT+no68kp30xkYAVsIs0BFpl6Q2iNr5e0uKta82A0xU1Q +we7swSHOHCevuDZfFA/CqnBptOjvNUuVytcroCeCrV/ftp75w/Fd9zOcb6LGLxM2 +2UzhkSXl3II250xj74Q3q8T9TDxCLty7oiawhaYKI+8SDYc510EQ7MH46WMO+3Uq +JkpmmELd9POgnnZ1JrCFmf0flUKTi2CqU3wrBPpPMwFBxoFipp5iL87npACHc3DY +6uaoF4Pf9Et1Fd7HRon8RMsKkrSF92NFiBx5UvhZAgMBAAGjNjA0MA4GA1UdDwEB +/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq +hkiG9w0BAQsDggIBAC0F4ci1nqZ9KUhEEAmWmy8g89DovNNIGSC51r2WJ/COmYUX +X70TONscsBL/kx5MK4xoAmb+EN6Yy8i+z9NkNJd0B+2MjXPMFBpgGb0UiPv2wEmZ +5PAKyjwTxNIm6L/nFhkmVqfsQHfjHukXES4C0ff6fj6fuDpBfl5nTlVmc9LpP+hT +5RAwW10qumucGxAWGNBWW+K66cf8O7n/0nQykxJxYjBx16ZB80H2uvqFDKDVFqze +co5M4euXQq9KiXPRlcC9rab2a7FGLHd0TyPkq6TvfsqpxcryyKS4rIAz3sQh/tl/ +/qm1tBcZW2bce3UlF2Wb2dW9HqvIu1O84f6ptLqwgKcIdTbwgQZ0kbFoWE2kWJSV +w+eAFb7tz1LDTpF3NRlz+1K27pBQWRQgcqoIRoQXpC0LfQY9Mp70QIfUQdUh6tnO +8hmq5y623tfxiDwCxb/EOpwCmwK1Cp9cloZTDefVE1r6NkEJWeeHG79VljUGF1KT +NKzXWrrsFtge/hU9Pj+frcZO9qExxPCcsrdZcoK7Ll8s+pjulRvbnCnJkNpeOI3P +iz6+sdGmzKSKg2daRM67Zmy5tmlBEX/eV7kFqt+b3HsdUiLo3Ng2lyPLNNDfwUtB +EukgYGjVJoyqLjLXgsCxLJlk7X/ogVwf8SlAnQ7p6KuxGWm02vlUpEmJp+Hq +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem new file mode 100644 index 00000000..baccc9eb --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArYqxoRVwerR6V27mQAFta1mx/2R4ruXPxt5UijDM6qnkgeka +WCC0fvENV6KAD+wt12xZOAoQ6rfOwnN2B8S2LlM0COOY+qMwqSiWq/ctcMkbR1nY +Znxb94EHpfXAHQsqveLJ7f/mNv/AJlhUYYxBePRVDFjxCr44cSnR5996F4moDQ5g +6P5FpnCLbQ5H8qYlGBjBdTfdtnJqm7JPiSuEWUVu3Nd4qwpA+tPZ49bE8Bx6pQZR +MirInJJgjDq9vvZjGNcU1HDomDsHtGpUlpUJ8GtXG0C/6sIs6FQ5ViZaGBYQg4M9 +eKo4bAHRVQCkBUBip+JNickhntBguVDHlU82XNm5IlNTMqYdAu0txOp07keYvF/F +zRa5L9+0r3XrDL+OlJybYOIeMjLpoARocoZCs2BP6ejrySnfTGRgBWwizQEWmXpD +aI2vl7S4q1rzYDTFTVDB7uzBIc4cJ6+4Nl8UD8KqcGm06O81S5XK1yugJ4KtX9+2 +nvnD8V33M5xvosYvEzbZTOGRJeXcgjbnTGPvhDerxP1MPEIu3LuiJrCFpgoj7xIN +hznXQRDswfjpYw77dSomSmaYQt3086CednUmsIWZ/R+VQpOLYKpTfCsE+k8zAUHG +gWKmnmIvzuekAIdzcNjq5qgXg9/0S3UV3sdGifxEywqStIX3Y0WIHHlS+FkCAwEA +AQKCAgAtZw3V8P/+el1PpqoCsNzpqwvQn36bc3CKvPwtM1tJQa2Q92V3DQdr9rDg +7pjGkankpGorKScH4ZLseLy2h5aKRCZm9PS/DhbbCs1wrDhtO5AxeKYPGhYNiOpx +VvwuHQ/Pohfmdn7KgNrKrW1WIBW5CWN+2X4mq2Gk6aYLHgKZSeB3mf1st6mNRACW +RZg5OZKW3VMv0a/l3cVaeqooXwQ/PtUkXhMp3ILnnKly3Gulzi2gIyj3EQ5vODSe +O3gND/UZOJwwgGG6Aief4fnDc7an+c1OSgBr8OVC21Ys3dfQWWV0os9gVFhymX8k +2AgRf6jP93sFw2NSY34KvcGZpKG59oMDxWF1vPo8sOt17Ey0+qp3eUtB3FfE7Wtf +BaLaD/x4U91izIqOEMzQ6QiZAyvmUoBkUSo125CYuIkt8C8Q1lA1KjihETWF37QR +mr8LUk0A0x3SErtm4wVfeDEqVSfI9gKpk6i6rlUzuCjv58Rc0yyqoghXwBWM4CKj +5ZHYpBKAxj4bM6IrKnodAOcsyVk2c2zVTaMxPhoUj0fF7IE5Hy6YAQ/yBheZEM1v +fhsdBFyS6OqSCnN6UinhH268QPam82lfKTFjW5lOgsSDQZ9rhiWoyamhonJTq65I +nb08f4mzT6OGMwV13zq8dXio6WnUIQAhXdEYWrMBmxp5b6CxAQKCAQEA4kmwV3Nb +n3ZIzVAp2l+yGZwdg4YWzN2kcfdNkL8I+Pn8pWrOwv/uGQYmM0786ys9kB5lu4FR +TMcoEo3AaK/z8N49ro2Kl6HcTmxZgTMr+cl6iwetzqYdkRK7klxyCv5uVloDQDtc +AulDH6RkW9BfRERpi6XtlgiFdJj5jMvXMpwGHX69JVsXb83ZSQESjI2JfO9Y8+4M +a7hNKWW/W0ZBrGCcQQPbgpysfJ+PFKUF/yF1h8SSCdetW2Kv2ix16wL5uHKINYmZ +Y/Om+/AFnUOQlANycgThtgBI5mvg9Khq6W2i/RNcIL7bvwAzq1p+o6cGnImXo4bY +hC4fs2/aeX17UQKCAQEAxFQHSLBYDLal5CQYbHbNZ2sLjwRUraEd/+BA8XoERVVQ +JPihgEvTPEaHnWrFTw0qaGKgMZ5SZCZSWUIfXjYvQIUcEMhNUOHweXhJJhifO5sd +sTuvU7bWg76F69bRKfp8KM266m7qMYv+tNlQ6Kbz/1ImsW00xb86vCK2hPfhldtN +d/iBb4HVDu1uoATHUNuqsSGj/UvttKudQdg7MapzM4N+D4m6rPZUjQmtoMWOXt7R +LYrqEOHWfkxXKlVHw1cL9uzUpArvnR0VcYvGfXiYJFbXWsEB07VxIoLMPEtPbpH9 +YLY37KugrthEVnsbySmZIWCRDEqQuuAaa5o8S1naiQKCAQAiU/dybMebe0A0FVMk +E5xbEjnP+AmBbqZBu7iCmthrnNDc70UKg/TEyxAEfJkVu+uM72+TcFy6/wNvPR3R +Q9AH3E8TKdm6gw1+wCUb2n1zWUND0Bhn3v9hQKw/2dJbJJnsc59GoTqmHmjWZgPr +gcLSAmbYjoVqW0STmZlR6KJuxQiQdOeQwS7fASVTU9xSgi43S7/80UIFHWJnQ04y +NIhF9CoAGuuz9ryb80CraxVrzNGdlQ5qe9OKp3/x4wjIbB0iBA3xwTwJ066jTZgs +cVF/gr5b2a28BHMKsZbgxqPhYYZ2SfeR6CJB6W/tML9BaFcybBUa85vpAW5BtFg6 +UfThAoIBAAp1/71byBVFVimF0tdUrTUpewAv1uM5hoOvy0YSnk+jcBXIObLAV40K +pQc6PTEtHmlZd/es2+8CK7kd0NYQRQxHC2vJgHUi1NFkG2GwRivC5B4hdAId5+g1 +KqWaWKLH+f2imKcNKeVh9Dxmp+z9mFquYelqTDmNKvADWX5URuzZNpOB5kOuw098 +TzyvhH9GdR3jEP3aIdxSmJp9jwnibyj7hKgHSq8UoQSy01GRtThQ3wxyLm6f2fH4 +11wmFyDNbpHFpL7o5kOU3SOjsvvUhSbKiccIKbTCIjkYhxFfYegeV0Xj767opjMq +ytlgzeY2FTa2EoR5JKUQc9fv6+6H5yECggEAVVfnywPm8QXn+ByFDdUndZg3uEje +DGyvt1M3mIz5geyRZO8ECzgsZVzKgZC8jDB4lPKz3AGgNlUl/vyGHk6CtW6V6ePA +EXcmOkkMKJQMdopY2/cE6YlSpBGMCcnfothgL0HXxYoop4xVjb74k7tFcNrIDoRx +zp9dSalgxx9aMeaURRbMWf8AhWLZUAjJ/359M1SmcNW619SL3p8Q95Nptvdiltww +lWOCkBdgkjW0mel+Mi2+gY8UPmgNBMPrJ1z9b7b7529YCv5Oci8ABn/N202nhjCp +LupADooNknOMLDyqwRorEv4g6wRjuPIYTIhI9fO5ranu089x+mmGU2tCBw== +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem new file mode 100644 index 00000000..ee72c0c6 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFETCCAvugAwIBAgIQJ+iLgsp9gA0DmROqW+tHFzALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQxNloXDTE4MDUxMDIwNTQxNlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDHR/A6uiQ9X/Xh5ivmdjRr5XVr1D7+fU9Qu6ohArqtBuJsLr6t2RBTS9w6PIAf +xjQSMSFlrm/CY+hbfBMSgm9NeH23o3kYCgoEPhP/634A45W5xwUFno388U8/NHK7 +qwzSP1ezKXfXNvzuo1mZhT08aVdGMOrZUcZZZl8R3RPcIRw9XDSfXKVkMluH6egk +8iLdOxdIdRS58DeSI09FskWe3cIZ5kJmMqnKoIbYSJCVVeYPO0RFlIBi+zpdVyI/ +r9LG0r0plRdz/HJevbOitU2y93S1s9NWMNEkOFU1PFJmsF3ZzNqJFCySj00y/Hcs +jPULYwIxYdqcv16cTNmd3P6FegvuzLJLjNuGaLJGc1antv+p62P7ZdE3DyprFuxs +MJgDL9+NjDaIzoamFf0Uv7K3F7hxrrAHfvm1CMUOyQLg9J6Wl4mLsOy2ZhCbdNFs +T6dobAUGvz4Muj9V8V5pR+nFehjmsPENSsTcs5j0e8zTWtvMFISdS+NZAkpiz0s4 +PV8DLgk5Rp1ZG2V5OnRPLMOTgK0nngc5GVaxf7OYCrFHbBJ8tL93MXNQptNFeBpV +FhjUGqVFcz+6nbFX2NsFLZnghQRs9lej4TTG33NSAYusKqhVwpYFf8CsXCcvYuU6 +RlkCYjr3PB+nX1UDa0eUGm0zOabf9O3D1VzHQBpDuzSHQwIDAQABozowODAOBgNV +HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz +dHJ5MAsGCSqGSIb3DQEBCwOCAgEAaPfAs6saij4FZIPbzAb5M6ZVvfXBg+AfH52t +p3tFsnWUJCiOh9ywsc2NcmJdleKDc4/spElFMUarHqcE1ua6EH15O5GEnHWKj8EY +PVQFrPvf30UkRGNPl8eC7afZtCNk9MLllIATAzBr5Z1i+psV7MmgBKpbZ4B0TnhR +GXNT60QaCJ9RfUuc2z7RHJNo9XTn3Q44X7TFj+P3jHOWzTf8y6Mz6saTy2bugIUy +AfRgRgq/bB8hRjrazg55FIlrMv7dr3J0cIuqmaHfsw7Q2ECMCXW8oQXMBzfuIT0n +sG4u0oVxdNx4OdHsAubGjjwNDhxJvN5j8+YFqZMu03i8LbyamTwsrZg2C3QrRUq8 +SujQEEB+AmO0lpuJ24FsOOYVSYCpLy2ugrKOr2NUqbiBKZs8uBh6RGACfunMZlEw +4BntohiO7oZ5gjvhGZNUEqzMChw7knvVjZ+DkhFk9yE4qIL7VsJSUNI2ZJym/Xeq +jr/oT8CpP8/mFZspa6DFciPfhGLQqKcaZZohL7461pOYWY5C2vsJNR2ucBZzTFvD +BiN/rMnIGFrxUscCCje6RLmrsZ3Lb7bfhB3W6kwzLRfr/XEygAzx6S2mlOM34kqF +HFpKrg9TtLIpYLAKAIfuNbrLaNP1UKh7iLarhDz/qDcvRka/qJTzLD3eLeGXefAP +KjJ1S7s= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem new file mode 100644 index 00000000..d20bda0c --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAx0fwOrokPV/14eYr5nY0a+V1a9Q+/n1PULuqIQK6rQbibC6+ +rdkQU0vcOjyAH8Y0EjEhZa5vwmPoW3wTEoJvTXh9t6N5GAoKBD4T/+t+AOOVuccF +BZ6N/PFPPzRyu6sM0j9Xsyl31zb87qNZmYU9PGlXRjDq2VHGWWZfEd0T3CEcPVw0 +n1ylZDJbh+noJPIi3TsXSHUUufA3kiNPRbJFnt3CGeZCZjKpyqCG2EiQlVXmDztE +RZSAYvs6XVciP6/SxtK9KZUXc/xyXr2zorVNsvd0tbPTVjDRJDhVNTxSZrBd2cza +iRQsko9NMvx3LIz1C2MCMWHanL9enEzZndz+hXoL7syyS4zbhmiyRnNWp7b/qetj ++2XRNw8qaxbsbDCYAy/fjYw2iM6GphX9FL+ytxe4ca6wB375tQjFDskC4PSelpeJ +i7DstmYQm3TRbE+naGwFBr8+DLo/VfFeaUfpxXoY5rDxDUrE3LOY9HvM01rbzBSE +nUvjWQJKYs9LOD1fAy4JOUadWRtleTp0TyzDk4CtJ54HORlWsX+zmAqxR2wSfLS/ +dzFzUKbTRXgaVRYY1BqlRXM/up2xV9jbBS2Z4IUEbPZXo+E0xt9zUgGLrCqoVcKW +BX/ArFwnL2LlOkZZAmI69zwfp19VA2tHlBptMzmm3/Ttw9Vcx0AaQ7s0h0MCAwEA +AQKCAgBd61qd4vKHdn1kzNztzdHg9BDGFA7oU9iYvQlua2HdgDwgLluxhXa7Oyp8 +y9y6nOgXls4dpPuJCxsMWsqGU7DvOxVNAh9lI/4ah8NXPv5wntIG73Q/dL2Ic5Yc +vLRCHFh7klzb1HRlmsXUFmp4/yGgIil+rDlS2MZ5hdTSj3X3ricoCBfI75oHQfB/ +es7s8q1ZxKqxfHSbOUqHdlq7B0zmla8QE8RBdCkvlT5YGsMBjq1RimYfwOBNRgf4 +y8MZbt0Q1WtPeLPH9zdTzWYnDfmjmhqINEsq+PDoeCA4aciQGxjwOCrapgZnwF/q +4q+r8HbgufXjnjGw5ERLt7BsRSYynoJiTWQ3p/wZ2VLpjFtxYxoJ5/qpQvbZMgGS +Yu3FZNC6cnbOs+JWbdm7Kg93N24cBrGdk/KdEE6lz6uQq07FTSqLtPEQWePzBiuA +1wfP78b2AH6vyJKq36EfMCJK2i7rpwtNz7d9NI5kiLRDB7gesqC94WJ+psEu+ErO +w9DbTV3xdOPs4FGGrR41Hbo8emrk6smhb8+VK2odggi8i2CLAkYupMsuobBlX3CL +hyJPfWDv1aREJ1w7zWVQlJkvp5zR0oXZXpfFxjpj7Ypbp7BKxmh5+WYj8msFDfaD +8VQ+pqgPpdl6zElEq9m5koHjsHH57fMeJQ59HiWpWFur+kQx4QKCAQEA0Jnvbm7R +WypbPDInkIoPDIhyP9Pqv+wMzNfYEnVEG0GhEU/H5aE20a+Dm6u0bsmPm5lCSQsu +EvylTSL3yumQZMincNIUXcPYb2Qye/ZzJnMIibCqwMKQqi4HxCXprWhiEoGPum8A +fN0bTGgMYfM6JZ/Dh1eGsEvemeW+5tn5xZF4Lfp/vkT8v4FuHDydUF/lIx7F5MMi +VteS0hHnR1DuvxHqtysf0wy2l61LFr7mQCMYTNEyFB3ZfXqpxJmFmCqPbr4PQsIm +2rqIDw+13eeoyDpJJkdi+yzHkAYDOdAsur0vOQvK/Zj1QKz9qmC1O6L4BN5yp265 +vjSE4Orvo7btEQKCAQEA9I/afLw6lHUJ4FVL0p7dH15JSFjt7nmGHocE7Wf6Yp3G +vMp+PdGyoJ2KEQB2unnQZK1gZqUuRQLannjNl7fsIiIhHgHxMBCIiylwSUVnP868 +u9/fpJV/cSGze2zF0WAttIgXKNtXG7xMntcY2k+SAe0qjqX494KT0NGnznySt2nU +A1YlkXm6u3KCOJrBKfbtiHXFoH39sA+ihuPiV7xcETS2ZrFdAX9M422p4yDHqe/0 +dTe18wIxJNiEX4xp/HRE//cuQ5dw/Z/QmNrzgWxHbOmXVR5C90vIJRuYY9xz0tDP +LMnifSKfnG16l2gqg7zb8xsxYqSGndXWKPAeiq3/EwKCAQEAhCWQbWgcjmFFzNuE +/ubG48yoe9DW/OAft8Dg68iH7bBkxd/BpbG8VZeXiw16T1i29f5f5IAFnxeX7EbD +rTLLO1113V3ocwH3YZGa/bbBedETzo4xjc1z8asZVmQiJa1ju4+CKrvZFkDH415i +wcZgxqbwKhQDijl1+g52Ii5iMYuXE6GGPVXcu8DVrWOk0N7+/IGpIeOQJG2KYDPh +TOdzZ22FQKY8EeoS3gF0+SLUIDtbUIaR7/Z86iXD2HzdCemkVaZnaoYuMRBL0ybD +sqDn5nguEObWSII0pgN5Fa3QODhS6xOSc5brfx5X0BBVn0L9VbBJ99GIL3t71jRe +vVrL0QKCAQB+jUYZT+ncUqgWruy6g7yW89pmFqagxb/SYjn5g9m8WDq0DPDAmped +p4f/fkbx/gEJZ/I/i3BjA7QPVyHERcdqblDGz2h4X8XYhUv2jnR8P0XIznNTHo1B +BJh04PeIfgWIqveZC8+KqajYdSQGLDC40Ho6MMahha9p2mPEZRAi2x97zoNIQT6Q +qxOZqPMV/RIzkAYBI9E33w9ST/AbSHw35xgQEe23zaEC+wdzYc4QMPxF/9smcdbu +YyA0tVtO6PefoNAO5/nvNFjkEED7kwVu5X2K7Urn3w4lrZ7w5e4FhEoAukN6T4Va +lAhg+uUtIHiM12B50/tZB4N30bFsP9eDAoIBAHc7ppfpo1aDK3bDr6zTSOU4Mn1l +XrfhBJHDy2Wt9WkvWtcCtXr3sDpthaChueV+mGoKvfgWyzUoauO6HDDsRYriqaQB +cXclVjyy+3atY32Opz9rnWefQkbgTOQ+oQgOzEFhxNS+11Omc6ZZ9s31N6TZi/Yz +rgXzhGrr73DkV6uwiiwkvP8vJxg8AMWKorDIm1myr9wwlK5ogDKSku1DM/y1gvlt +4EA39fqURyqxN9o5Yq+8K1+a/smjGx95M+P8Nke4bMs1+lb7bBXbMaVpC6DLqj8B +eleOZ7adY2mS0CBuf0PNkJRNDwF1B5VDmGBJLubUtGLuUUoEyUbv66WfnUw= +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem b/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem new file mode 100644 index 00000000..ef030f74 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE9DCCAt6gAwIBAgIQb58oJ+9SvWUCcYWA+L1oiTALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTUwMFoXDTE4MDUxMDIwNTUwMFowEzERMA8GA1UEChMIUXVpY2tUTFMwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDDmOL3EhBm4So3agPMmF0z1+/nPlrE +xoG7x0HYPk5CP3PF3TNVk3ArBPkMzge0/895a4ZEb9j+LUQEjOZa/ZwuLmSjfJSt +9xTXI1ldp8KasyzQZjC33/bUj7FGxGzgbHyJrGGBoH2W5HdswH4WzhCnGTslyiDo +VN4hklJ7gr+Geq3TPf8Eji+1L71MOrUyoNp7BaQBQT/gKxK0nV+ZuSk6eaiu+om7 +slp3x4bc21o7eIMmNXggJP6p9fMDctnioKhAPcm+5ADiFYSjivLeUQ85VkMTpmdU +yvq6ziK3Ls6erD+S3xLvcHYAaeu84qLd7qdPwkHMTQsDpO4vPMIwL8piMzZV+kwL +Bq+5xk5//FwnQH0pSo2Nr4vRn+DITZc3GKyGUJQoOUgAdfGNskTt8GXa4IsHn5iw +zr12vGaxb//GDm0RLHnh7NVbD8xxDHIJq+fJNFb7MdXa8v31PYebkWuaPhYt6HQC +I/D81zwcJIOGfzNITS2ifM5tvMaUXireo4pLC2v2aSY6RrPq1owlB6jGFwGwZSAF +O6rxSqWO1gLfhJLzqcw/NjWnO7nCZEs/iKgAa22K2CtTt3dDMTvSBYKdkRe/FYQC +MCa7MFJSaH85pYRzoDN4IuVpvROrtuQmlI47oZzb64uCPoA4A8AN+k8iysqITsgK +1m8ePPXhbu4YlwIDAQABozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYI +KwYBBQUHAwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggIBALSgrCdEQd3I +vb/FNkNZkAwdjfBD6j7ZtPBwvjEiiyNTx9hOLBGvbey7kr0HtW0KkLWsdRmCc+3z +ev9I5VjDOtpiqrvuAA1wRBaL3UzGyj/eFjPJpvkfJi8zjkIZ2y18QG3yJ6Eqy6dD +0aIQAHl9hkXMOVrf364gf0p7EoOGtSlfQ56yIGDPTFKKiy+Al0S42p17lhI4coz9 +zGXE1/SiNeZgdsk4zHDqhzzBp8foZuSL1sGcIXHkG8RtqZ1WvCyIPYRyIjIKZcXd +JCEM//EbgDzQ7VE/jm+hIlYfPjM7fmUzsfii+bIrp/0HGEU3HN++LsA6eQOwWPa/ +PrxKPP36EVXb72QK8C3lmz6y+CHhuuAm0C1b1qmYVEs4eRE21S8eB2l0KUlfOecf +xZ1LWp1agKt6fGqRgcsR3/qO27l8W7hlbFNPeOTgr6NQQkEMRW5OxbnZ58ULXqr3 +gWh8Na3D4+3j53035UBBQUMmeeFfWCvtr5n0+6BTAi62Cwwu9QQQBM/2f9/9K+B7 +cW0xPYtczm+VwJL6/rDtNN9xPWitxab1dkZp2XcHG3VWtYvE2R2EtEoKvvCLPggx +zcafsZfcD1wlvtQF7YjykGJnMa0SB0GBl9SQtvGc8PkP39yXHqXZhIoo3fp4qm9v +RfbdpOr8p/Ks34ZqQPukFwpM1s/6aicF +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem b/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem new file mode 100644 index 00000000..5aee3ea5 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAw5ji9xIQZuEqN2oDzJhdM9fv5z5axMaBu8dB2D5OQj9zxd0z +VZNwKwT5DM4HtP/PeWuGRG/Y/i1EBIzmWv2cLi5ko3yUrfcU1yNZXafCmrMs0GYw +t9/21I+xRsRs4Gx8iaxhgaB9luR3bMB+Fs4Qpxk7Jcog6FTeIZJSe4K/hnqt0z3/ +BI4vtS+9TDq1MqDaewWkAUE/4CsStJ1fmbkpOnmorvqJu7Jad8eG3NtaO3iDJjV4 +ICT+qfXzA3LZ4qCoQD3JvuQA4hWEo4ry3lEPOVZDE6ZnVMr6us4ity7Onqw/kt8S +73B2AGnrvOKi3e6nT8JBzE0LA6TuLzzCMC/KYjM2VfpMCwavucZOf/xcJ0B9KUqN +ja+L0Z/gyE2XNxishlCUKDlIAHXxjbJE7fBl2uCLB5+YsM69drxmsW//xg5tESx5 +4ezVWw/McQxyCavnyTRW+zHV2vL99T2Hm5Frmj4WLeh0AiPw/Nc8HCSDhn8zSE0t +onzObbzGlF4q3qOKSwtr9mkmOkaz6taMJQeoxhcBsGUgBTuq8UqljtYC34SS86nM +PzY1pzu5wmRLP4ioAGttitgrU7d3QzE70gWCnZEXvxWEAjAmuzBSUmh/OaWEc6Az +eCLlab0Tq7bkJpSOO6Gc2+uLgj6AOAPADfpPIsrKiE7ICtZvHjz14W7uGJcCAwEA +AQKCAgBmIvmxpp8l+cH/ub5OIenZXpMJn4fqZPXtxjjd4HshIN0ln0JlF15lOG2M +gDGKFGKUts8gAX/ACocQETtgnDnn65XlwPIqfXFGflD2FNoLyjBGinY6LhtIF9is +aXmpHz1Q7tDjzZiHKLor8cBlzCjp+MToEMpqR5bO1Qd5M2cro/gM7Lyz9kN3S3x/ +x9BCpbgwsVtYxGfEePmFkwAO159tx4WMCYvOlW2kSm5j+a7+iwmA9D7MGkVZHvNN +A7Y/H0F8ekdVBN5pMG9Yrv/vk0ht2lugcS5YGr4eufFq0mhWdv+jhBTxLzqPMMBG +m9oMJcj8XyXYtwpfVsqBpCqK2wnEnv4Kf0rZzBU706nI2mjPXx3dL+5qo8uQJKNp +mxoS7vmHV5RIJgtdvyzGFHjdfu1leowhV+Jy9jWzMw4wlnmlxsfDECf5RoSf2XGt +SMGJb0dbJKae+W4MfNUFsgAWMZk3h3KF8AHHe44OpDbQeoh3JLnkWSG0oS3CR0ch +68TzCy0SZZEZ9IS+I6o5WVpwWfReCQ5NjaKipWcpiJvxg+Dc3GG3QcVXVz2gGrJh +g9v0v6eyeOJ32QGvvP7THFBjpWeeHlXT8Yz6hFcPrvErEZ029TEmhg8aLWBGfsR5 +F1bazdbqvOSEB9vBAAaddNnEDG9Rl8EmC4WdsnVgYUw1J7gfQQKCAQEA9DKjD9eN +CrUl/2YfSm2WaFhYci74XcHDVeAXN2SbOyKbMIqk3aOFQNRAsLRnwPkdiLtuqeDK +BafrfLTCORHfFdYKnUzmuekESNLckN9VyLztgqOqNAv3LD6GmSHBaJEnUyniLxOL +k0wMEBIsEQw7Fb4blM2REYJ3ZzMFmgpRGnIX8KcxhW9XgSrnqMLO0w6mVxjo7xzd +813nCcNrGhySM/EzKYtTNHy2JZmMH5QFHaIj67KklO7VeEZX5U+TKveBEt4rmHqs +Ndqf/djSs8vu1xse82pVRxMXX2mhDLmwjUjPgWYxUL92jTiyJhE7GxpVB/yHgF1J +Ecb47MDahoNKkQKCAQEAzQzvCOA77IQpGO117GcMqcjzwEUhTytojFBT+s5mHfzk +dYr5TyN86LQ7/GktNoJ5oRvD9UGRSul1OGneivqtWj6mv6/Zvfzacx8NXY4MYFs1 +nEr3Gr7orVFIzD2x7nMPG2G6+J6hZ1rhpnZ9Hprf5G41sHIJxHJ9wTYSUAmFh8bv +FiJqF90bSq/E5hgjphtX6wZWeZYspzc/5+IrJ/I0nqoxV3rjUy234zlzKJAV10sV +5oVgxLLQsUujkHp/Da+ij2aTv1Za8y3PTJ7MAHYgdpa5l/4U9MnPUEB2REBCI1NN +TqxnViwD0xgsvxfb79UzruLJIYOCKvfOumlutXM0pwKCAQBUIMXQhWAP2kyW6mXJ +TGvO0vDVlZz3H/Pdt/AHo19fRhLU7E7UFKupo/YNanl8H9au7nO3jrvKqwkT02o+ +IwwKB81sV7v9PGu/cvWN64MwPvZMVXojqCOlWH0icGCjV66Glh1YPpGNU1ushbYs +wVvxp6b04sUhlSLxqMA7S2aZh8j7nX4QDEXHODLLDyIV0Cw6QViuV/GXEDiyQmK5 +gjJUNrp7i4ZExNozpeyCTIpepSde4hKVRJrCbumFFJ8M5GvRRj0asNh3TTRlTbd5 +Pb6w2KUXEwECFW+t7UQQkEBkzDrAx6YhvXRoPqoRN0p3keDNeZBtBrZPq47CccZX +JRAhAoIBAQCJ/DgnGu54XP9i/PksGrSU1Nvi+SJPKoDyW2QIFTj22SXMS7c1oEYA +OrlbRFPeqLK8zfhyZKsnZC8zxVqy37okTqDbwbSfezZt3emamWqOtRJAmNnsr6fY +aii4+JNySQ9Td9LgV69549iRso7EN6iPCfMrR7J29izWBlMQdTfchOyDUqleYbZp +7hpsVLY4o5HoYJ10uLBX3oAsxTARc5YhZ5pIqjOr18o1KIXsN/napXaZaAwUkdiK +VsI9CZHSXezg30Bxs+UEXEFx6DKT5Oo3o3pFZAAqMlxGPvrXNv7K0tXlKXNos7nn +Jg+GkMG6hRiAibCb0umXjKcbHrQXeu1lAoIBAQDcRBsy6cSQXMSu6+PyroH+2DvR +4fuiMfSrUNjv+9K8gtjYLetrZUvRuFT3A/KzDrALKyTFTGJk3YlpTaC5iNKd+QK8 +6RBJRYeYV16fpX/2ak/8MgfB2gdW//pE0eFjw+qakcUXmo957m7dUXbOrw1VNAET +LVBeVnml+2FUj0sTXGwHKcINPR78PWZ8i1ka9DptnKLBNeA+x+OMkCA88RJJegSk +/rgDDV52z4fJHQJh9TZ7zLAXxGgDFYLGPTrdeT+D/owuPXF+SCP4pMtVnwbQgH9G +dfQ9bb7G14vAeu/kEkFdGFEreS09BOTRbTfzFjFdDvSV4JyOXe9i/sUDxf9R +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem new file mode 100644 index 00000000..53ce0742 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCTCCAvOgAwIBAgIQPjclBRGzhznCybQzYRQTyjALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQ1NloXDTE4MDUxMDIwNTQ1NlowJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV +BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALBe +C9O6es+mStDowUd1kiM59VkinzzdHgE24LvKmGxQ6fDnnT8S9L7iyzoxcJWlvSHu +pfyZWvij0ZIyRZ288XemTEFYq25RK0IBGGdvYz9OqT2R3lblBQrXDjSi9WG16sGx +60MGhM2egGMqFQ5DBfT16IKw00+RjFgCVzJ8T64Lzw82E0e7d6hl39SPybY+uvrt +SID60hYGmXoOdaiC9qquivks67BZprGNfORrvyJNrCFI6oKUFWHrQ1PpGd2tOwJN +1P3gkkS8pVlAif6ZQkAf+zuKu+l4j5tKxGlJAkJsafVJDLOxBKutUj5msha0g6uJ +gFXUe0+G8hkNcEjd8XqUUCwIOY3pdv4WsydKBk3uH9zMnYolw53k1q0ObvoY1NXf +beMxHQAtDi7nfQGlae9cuuOSymy95WuvzfhZFKdPWUe8lKN9QXFIWVoCFnOm8T3P ++FNCUE+p8DIWkal6Ul9THi/Kz4p7twyrUp1LwT5EtSaJ3iGAmB9I+8/1vmZT3lPi +nX8P+iVGM5yOUnptrsFm0bUcJWRD6iaTK1KxpH+Is4h2kiUiSz1tC/9bKaJYN2o9 +oy7q7+ZVfHSmIxLo8ZFYsaZBcXi96cKuuPMR3X4ISPwKDqP5irxU/QbI+YQBMshg +G4b0BNoMZ50g30r3Hcsifw4pzPQF0RDMOBeCiOi3AgMBAAGjNjA0MA4GA1UdDwEB +/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq +hkiG9w0BAQsDggIBAFuS/VrMNUwEMyUIktDyna5ExYh/FDOE+YEYf8tsX7dSMhRK +wE560/AcVZcbKKAZOnZ/262a++8tparsQt+bXBJ2so6YUqsFDNdOLCI2aShjWDRe +TNhqmLIO3FNsLRKp96WHVz+jFoiECsoYfKn0jgqTqxx+7nWFqgBaNSlF5cbCgLCH +jQV1uQhzsw/Mh/32hXAidkv/nLeLf7FbKq08hgthtoP+XstlzZ5BxkPodjb8XWXG +DSS49SWX971GHa1apwMKfxVGSppxn18ZwEmW1BUfQBNxtMytqA9DK3+xuoUdXkB0 +iJbm3Jc10JSRju8iyL121Xt6f8O33paVz/ndDJIWztUOjnItc89rxHsINPt5+cUt +jix8ohwmHGDrK7ZooXBvotvmGT/xhPr2eHUAG8JuSJ/Cr09UUOwUEigz4CfgJOHm +XukdzjOkb4r7lhNmVeGqrjRol1W0Wsc1NGH++J6xdkIeQ+i23kHwFHfQWV/J69tm +rOn2N+qijtmbIy9YfVcrFDtUtEAzXylZ2StCVQNofd0M7tXNdrUL8yAFwlrhWGJV +wsSP++1xH2Ie6Diupy8z6rbP383HmnmVPU/UecgLrlX2lEpt/UZkkX1Xm+6PhrrT +HDeeULvqtUP3PD8wS0C873Pl9GXOKISqf0HKEIDUAVZhQOsGFqiZH0388M4L +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem new file mode 100644 index 00000000..636dc828 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAsF4L07p6z6ZK0OjBR3WSIzn1WSKfPN0eATbgu8qYbFDp8Oed +PxL0vuLLOjFwlaW9Ie6l/Jla+KPRkjJFnbzxd6ZMQVirblErQgEYZ29jP06pPZHe +VuUFCtcONKL1YbXqwbHrQwaEzZ6AYyoVDkMF9PXogrDTT5GMWAJXMnxPrgvPDzYT +R7t3qGXf1I/Jtj66+u1IgPrSFgaZeg51qIL2qq6K+SzrsFmmsY185Gu/Ik2sIUjq +gpQVYetDU+kZ3a07Ak3U/eCSRLylWUCJ/plCQB/7O4q76XiPm0rEaUkCQmxp9UkM +s7EEq61SPmayFrSDq4mAVdR7T4byGQ1wSN3xepRQLAg5jel2/hazJ0oGTe4f3Myd +iiXDneTWrQ5u+hjU1d9t4zEdAC0OLud9AaVp71y645LKbL3la6/N+FkUp09ZR7yU +o31BcUhZWgIWc6bxPc/4U0JQT6nwMhaRqXpSX1MeL8rPinu3DKtSnUvBPkS1Jone +IYCYH0j7z/W+ZlPeU+Kdfw/6JUYznI5Sem2uwWbRtRwlZEPqJpMrUrGkf4iziHaS +JSJLPW0L/1spolg3aj2jLurv5lV8dKYjEujxkVixpkFxeL3pwq648xHdfghI/AoO +o/mKvFT9Bsj5hAEyyGAbhvQE2gxnnSDfSvcdyyJ/DinM9AXREMw4F4KI6LcCAwEA +AQKCAgEAnrHg/oD7ZMEC7PuifoRCHMRYCf5nPkLQbtNMYG2pvT0JY6VlDo4l/2Te +7NvzrBPYHSI55RKwkq4FMwFdNtP+imTulJYOm1MaE2gc52WI7jv/eNE6OQIWCWz8 +8Uv4dBVWyTcos8S31rTaXWBOVejlAUgMERy+5wfWOpLQlzLYF4m0pMFJk/AReUtB +nmhLXlsPsB22cag/RWZmzzcXk6tT/LzVe+R5ptLkdTsUuAxjjaBKVCDiMuDAZL1m +dah3h8oKIMab8l0SABumxKqYAKkyvbSJQUhSUYAT5+3c0cfJ6q7WoMk8TqvnwfpQ +2Klbcaa4G6+79H8e/a41RWmcMVTTpLKmwzx/iMLPswLnTFbWYCsLSsml3OpmXPhG +CKdbIWMvNMBfahZmnCP2pNcZBVY1/k/lEw25ehtnWqA7HplawT6V3gk/Bzz+3e3R +XEpioZF70ipdW5Pb3OG/tKSNDvRRjqLPk9UWlQzmedzu7XN28V/blw/CBVcMAcc0 +njwAledTuqv/wQ67dtbXdcxSPZbV/Rq7y3OmpgK6RWLIFzzpOPW5gULqUZfrnxtv +StxVnlZXhFoymodFobTi7AYibsLaXLkunZWXEwFwdtLfFHznfHq/rHfBmna1lcKW +MgWRqsbaoCsqHC1nc0E4llFkn3zqGYgMQNBeqNfX6cIPI/eQzPECggEBAOk0TP8N +edIFENOrzUtpH1fB3k15heeA84SeBhj8t/xrphR3o+IVO/GtMtq9hVLeYFVPwWCi +Mmy4KhwNUOtFeCSX4MbpiXvoPEjL3QF+Sv95HsEWsT1iBQIN4aoV0ZSv48YsRczs +tLjr96hADLTMfpCwyRq9r8XVF/hnx7vqOoOC/J1kteRhjOWRnutFpdAMfkFgzUa9 +1unmDHsDifcT+vpxief9Q9zK9xMYvYmwFkBUjOlhC7WchZC20nrwvM+A2mMBpeLB +WSRWsYeOqW8zcQNGdWuXXMKxsYHwv9tXbANVWxs1gz4x7BxcFoN5poIFrnT+eImY +EwhGrKR6jZsKF00CggEBAMGbdZU0+yvxL2tAul5RGAqv9xhdUV4eg8warTQ8/RWt +8Vef2wllBYnP48rXNDovb7ZNOjMBdjIWZ2zq2McMtHqpzP+zWQWaNT8/7Zi24JTL +y4G75kZdGgTPG2Y71seZoZGxfOu4gf7cLKOqxiHYrNDHEDl5Pi13tJD/8qf6hYm6 +K3yALSv+QlM3mk+5oueKQ7Lj9rV81YomYSV5+K+WhszhvLmuxv0necOLKapeBWvL +GQ5038yAq3PFdu0HXzyA6L8YdusP1d3sqwQvLbi8KAMXJCeT6WZXGYgX2Rjfbuih +ZHUaE7Ac0EsJfMuOowSkS7oXuT81k64ngCoq5KZC5hMCggEBAKYkt9JiZG8HYuSb +GsjmHQllup5RvN+hVF0gRFHbAq2YeBtO3Xg+DpXxAjErIuhWPCWri6bwB6LDVmTj +68milaTke6TbTzLy0rg+Xbcppf766LlCFIYZ5l1/TE3j+4vGAC347sW/wkWY/7lj +4GmS43zsJmqhx6/XUJuOPJOZnZSCZr0vuhL6mOoZZDJUTXy62dx0PetvZsT/O9cM +P2fDWWTCLTEVlBqik4KMdsS4qjGsyzOeCzyZReNDDRO/nZTsRSqSSwARJhQom5Rr +RDVQXeyqbw93KAQhmshroBSB5Rc+4YiyCE3wPTo7NWL38XPi3lbF0VSd/rk/uNH5 +6hcSCmUCggEAIPHjQFCTrRaNiyKolAQYozjuQyceAXYP11tyvcDjEB1ZRB/flemq +15iYmpukN4J67/qUPLmy8zL8xnvwB28SBw195MUQEPP8u5aVR7dW3/sN1jWzKaYO +F2Nmti7YjX6HD9Oz/iiXdlbhAbi9nmTQg3ZcPGt1OSd1gncLQ6pNrvIPFFB7X1EU +2DRN/eMI5X2Rp49DG/7yF2AQh+AJgVeL+LEw/CfRlKJzBeNYY7U8Fuuoh907eAEt +K7YeVpc6jYEiGeJ/2eAH9IuhTkT48saRyHTXoiR5QwDvR0lHmAPtS4irH4Igd4dv +qlUi90B+XPvYJwKCc08aojf2hzZlUiVwIQKCAQEAraCoWea8hLFchxmAiBt7joIg +nNK7a3LOHYxT1gB9H+PoVqTmzGVTeZpD8Jnis/UHmDhRYuUGqvFIefjAWbz0jJAN +t6RMAozENCG1PoeXHf1gt2wspv14kza+8jSdpzNrzZgPZdb7Wh1UEqUkiRYwn87f +C7DHknqCj9S2qq0DFXYz15JNPVrbvD+ZLBFJhTAjppS9TuYQVLf8JPYHpLRio/9A +dMsyOz1VA2RRYN0u/u4ccxiN45K3PbVMCeDPbWXNm8G75YKQ7LnIuehMB1qkZy6N +MOnNGp3l/ZkFK0JsW/pZqTQ2FqSkb0+ttTFApFI3qB04sc4s0uKPI9fa0OQtUw== +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem new file mode 100644 index 00000000..08c393d1 --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFETCCAvugAwIBAgIQCnqSQalw9ytL5bHLgHZe+jALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw +NTQ1OFoXDTE4MDUxMDIwNTQ1OFowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC9gvT3cwz0Ih9+7Ilv5lc15HsEiSmEMh4nOMZrSaamKgf/ydCiGo3DQapr/XDK +FHMLKq68AxwfOlzmEFQ4d9umpPMQ2+4GBr0VG23ppGtQApIPHgD06S0/CeHmDIXN +FXcKybPX/9KbgNkXBWbbJkJy0EcsdP8VJD50Q2WH89nvgEYJNFuKEELD3iGY6bBF +jeDTle5jYA7CgBKvD2avn31g24Qhxn8n8/BdYO/U0kw0qmoy1veLOjCAW0os0jkM +NlKrFpyHEWNj5B3X6UgSn8EGQaVbDq17PrQwlHJYU4nih0TnD1OwvBnFnd27pXjr +68eGA6Zc2BbUnhNGhppWHZ46LpPxpIbafSOH3ES3N/MZAfcUKIUntLlWE2xCQgFV +TW95WeVtP/r1aWgIHu0E2Jb2eHCE+qXYqJxSU7S4DcknmmcTS69hzyHs+92Ec+7Q +m0aQFZ0dyPoYPwXMgZpTAIuXEGg/FKC1fiS/deTW37DyvB2jppehKW3RJY3uso7R +o9vs6DJx1OdU5XEq9R3n7op61N7PK8Wxmn7TVYHEZHkITVvtucZZd1FNTOrOJaNJ +UnE+FuPK1Mrff+jz666Ru4zQL0CondOamX3QR5tuNK6MTqFs87wKY25qsqz7cS27 +kHW+r7UNWbJY3/UQhaPZM78zCZa2IL1nBFUjsFvEA4rtYwIDAQABozowODAOBgNV +HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz +dHJ5MAsGCSqGSIb3DQEBCwOCAgEAHVGMyoyX4lRzWCDkUjrXkrDZzuv03M2ojW2Q +UL61ejMkTWQW8R4gKrcPHAOJAPKVfGEVOrQH3ZMyxV2HnWrJ7egrn65zOzmLbWSh +O7gdpL6YYjBr218fqJn/8HadXZa4k70JyympYOLojeWSLy3KP03U+y7AMcdE1uG6 +6HJI54ZjBoW/nEyWmMh/mfMz8EN+Mgek48Z9AVaOswbtHtDIXN7XO0jbB3DbY5Yh +prVqVLYAz4sCchGTadj+aEChF5sJkKREDvAew/njC0WGS2TmMJ+V1uVhXV6354mr +edk79YvdwzwDgeYArkprahMtn9eu1aSTfUXsmM5OP5tR4gyFV1kUmTPY1yUd/yO+ +638wV0mWtGbbf6j8dUKeUBCyt2qGg8J80OUeFdvdHMswtaUq951NApX44BinPkbK +moBVQByZ5OEcmMidFC9SqYSUwTQ7uNyWeguhCXav+l3x900YlKnUQgRUZntPwXjs +yc7MXv0j0E86Gme6G1O02zamwkRgr3qOTHu2oQOow/a24fM4HASayLR0Kegt0sh3 +rzk0HRF1mGonf1Ecyyj/3LpHVsgYSckwtJoZLOqtDMn+CKtOCEByssQfD+E9Qe07 +qMyvcwpXUpfqe3ZERbJ10m98Z88VeK/XGt9ptq7HY47n1KL6lx3oyXwZIw8pq928 +89dcqL0= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem new file mode 100644 index 00000000..205511af --- /dev/null +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAvYL093MM9CIffuyJb+ZXNeR7BIkphDIeJzjGa0mmpioH/8nQ +ohqNw0Gqa/1wyhRzCyquvAMcHzpc5hBUOHfbpqTzENvuBga9FRtt6aRrUAKSDx4A +9OktPwnh5gyFzRV3Csmz1//Sm4DZFwVm2yZCctBHLHT/FSQ+dENlh/PZ74BGCTRb +ihBCw94hmOmwRY3g05XuY2AOwoASrw9mr599YNuEIcZ/J/PwXWDv1NJMNKpqMtb3 +izowgFtKLNI5DDZSqxachxFjY+Qd1+lIEp/BBkGlWw6tez60MJRyWFOJ4odE5w9T +sLwZxZ3du6V46+vHhgOmXNgW1J4TRoaaVh2eOi6T8aSG2n0jh9xEtzfzGQH3FCiF +J7S5VhNsQkIBVU1veVnlbT/69WloCB7tBNiW9nhwhPql2KicUlO0uA3JJ5pnE0uv +Yc8h7PvdhHPu0JtGkBWdHcj6GD8FzIGaUwCLlxBoPxSgtX4kv3Xk1t+w8rwdo6aX +oSlt0SWN7rKO0aPb7OgycdTnVOVxKvUd5+6KetTezyvFsZp+01WBxGR5CE1b7bnG +WXdRTUzqziWjSVJxPhbjytTK33/o8+uukbuM0C9AqJ3Tmpl90EebbjSujE6hbPO8 +CmNuarKs+3Etu5B1vq+1DVmyWN/1EIWj2TO/MwmWtiC9ZwRVI7BbxAOK7WMCAwEA +AQKCAgEArwqno2uEGnbuKnjmVRInmWKpcb4TN8Rm74lUVEKaB76o1s0cxK3MJP6h +H8/e/vg2bqkE7indLsbkiaepcuLaYijXTcomJzDQMw+7zOOOLz/Aku/+qDg8D47c +NXV5nLzn0HIPiEIF0JYJbmcR4veKxqu0Ic8K0QdCHHcn75P/x2Tuy4+twW9Vi76/ +v5KRuxzZ/fTtVKKj32kWWNXb3fltgCoh+GR0jH2XlVh1DVkVBEwnfT/rM5ESvWwU +riOah7ohT1+6QlOAPwKzwfr6FCG000eNKPb8q+p12q0ylHzMzgxtSxJwFb0X/Nzc +snaboyWLjDAQ2I7LP6WmXizznvkKbE9PjW6UGYQ+2XApqp+Hn8tSC5I/gIDlBOOa +psJ4fkRjr8n5+CbHbGmQG736hZcZY/z10TtOQbxeeeuri6oDQ62D4Z07GpWCG2EG +sUakaytZnJkIN79PpfthPZwtStlG0KVs0i5wggH/iP2h0yAmvJ64ZRIqdvuE/aBn +sdfRRlYUqmFOJsVQgtUWGKGS4WIxrGaclzT1TNxCKdiAk0glXe3sDtvBni6qDW07 +iJzEXxrsLw6MiCDhHfDeae5JYeJXK0HlCfYHXgRmEnDFTGw8rBzwz3eXvPqZ5YNt +j+31uHSwQjgOgEgSrXeTmRfLZsytKqndhBB/yBFmzZNrswXGackCggEBAMN5RSdW +t+WWl8ghDGz/CN1oRjnk298/6L7ijluKGRgG+igwBEy+5m1EGPJT+Y5LEH4TiQJe +Oc2XjQuM7zABX7JWWk1cL8Zlv3kcmR0lg4BWs7wDkoU1HYRkMP57vubtxFzFOsNa +momivEniZ/eonHm3yv0VHeenH9j3mhJ3mVDIpkH+7uhn3++c0zYh96NkjfQi1/jF +P35eSAt7FgHDOt37fWXwtGeYFRN4P19ZUNiIvZwT6Q1gmegRO8BYoW6cSbLWe5Cp +abaULds46+mjM4zJhCZRFkdWHbzP4bZHocSmwGsqcpABJ6SASTVim02GGhBIt1nj +fkqa10X1c5Sqis0CggEBAPgxFKSHccfIJ6yht2HJjysRLN/IHlO9hDcpCWUrISN/ +hxu1uxfNGmUkd0H8zDO/O+QAJXLE8PPPB77pJniIJ8kK4swwsfufN6bNV9XJldjA +o4vXnYt9Mpuky9cugD8LocUgWQzzKY5Y875TC4s3ldzyKQVm0NO+Wz1U3gfjogEC +d7PhTk7Ba/ZjVGtL7HuZxlL+/TgZklMks2ulSTW2y8aqVJxaZXv0H0NX/+fpDHYw +iljr+iqbiqZvjrzySryb0XWMtzP9oyDEXTXrWnG+kOIZW3BZ9FLxT+Te7zZ2PUbK +vTkObsKxc8WVHIYgkt/OwWSwbYLre5nvFPvgEFbQuO8CggEAeZTlUXmbul63m5AK +xYS/w88G1x2lMK/0mT4bY4562zoDwJlVI1MdydqwVZGryDiiUnjeIC3xcBISdZu8 +bjR8jFUvp6xuPs2ska0bA0kBCQNkmc3zBY2rBVy4KKFZdRNwrm8yhK3HL1KcIKyF +FEK4yPBrfozy49JMecxP9aqUHu4eky/4828gl04JBUONXwC9VpuRj7dILdaAozt0 +zbXb2JSDQ7O60jCC83A4oprQMU6j+P9dVqe+Mtz9OD8ocb8eC/FiO/FTwm9aMl+u +RMzw1GHHI3oODGLg7j6y2oilcsZxKnblePJu8N+mKWFizY5aicRg3rUkKU00Ftx7 +fn2xBQKCAQB7w7Xgie5SStyF+KrC58kuF8WB3oBJEAOjoiIeQhCnbAvK5KfkqZHV +CAc0b8TAtUc/XldOUSk6222oZQmbJ4J3fac1Xb8TlAUjd9iqMnk3+nBT5vSYP5mC +Bf7kUjr/tWQ5MfVWQNfjNTZvHWhvRwvDfzq3h9rxDEbhYbXKx1fdGwboO51aJpgY +6NWLH/RQepFsh91sIUxXi8CxGF5Wm84oRn4k7esXkdgZNAPX+N4O/guvZhV9M81D +S/QpAsYEIcuky8P7+Cplx6YXokKa4AXNyglQEHuG9PD7V7SAOxw5dhZAIpNXIThz +OfVcaVf0pVzJQjWKCLW9QHz9UXG0aScfAoIBACdr3exVMUaMOtrAnf2NXj3hecgg +WsWRBOOaSW5wXGt1JNlfYS4zwViafIwy31DNuMg22rj5Mq0TYMtuNYto5RoLSXeB +uupUrENEBnt7JFrwI/NyWG0uYMM3G2MtGHGYooaT9+++wT96QxJZr5fwFYF1ddf6 +5tFeKtNt5VM0wWBHO1voUhQ0TCaooatJjMuAB0+WbvwniKxmdbqQDzY+6myBBUVo +gBJ0JxhxakLm1XGFHDtPCsAAHX/uZ4CvH2uyWqAlx6iwGXd0wwEGrbIRB/BundxR +oaJWswU4FIPAgOpy2LEJKnvzhcmVFtZWD5sFXA1/83QvpceLTFTD5uioBPU= +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/test.passwd b/contrib/docker-integration/nginx/test.passwd new file mode 100644 index 00000000..4e55de81 --- /dev/null +++ b/contrib/docker-integration/nginx/test.passwd @@ -0,0 +1 @@ +testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/contrib/docker-integration/nginx/v1/search.json b/contrib/docker-integration/nginx/v1/search.json new file mode 100644 index 00000000..3da8f1ad --- /dev/null +++ b/contrib/docker-integration/nginx/v1/search.json @@ -0,0 +1 @@ +{"num_pages":1,"num_results":2,"page":1,"page_size": 25,"query":"testsearch","results":[{"description":"","is_automated":false,"is_official":false,"is_trusted":false, "name":"dmcgowan/testsearch-1","star_count":1000},{"description":"Some automated build","is_automated":true,"is_official":false,"is_trusted":false,"name":"dmcgowan/testsearch-2","star_count":10}]} diff --git a/contrib/docker-integration/plugins.bats b/contrib/docker-integration/plugins.bats new file mode 100644 index 00000000..faeae0a7 --- /dev/null +++ b/contrib/docker-integration/plugins.bats @@ -0,0 +1,103 @@ +#!/usr/bin/env bats + +# This tests pushing and pulling plugins + +load helpers + +user="testuser" +password="testpassword" +base="hello-world" + +#TODO: Create plugin image +function create_plugin() { + plugindir=$(mktemp -d) + + cat - > $plugindir/config.json < /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 + done + + trap "kill $DOCKER_PID" EXIT +fi + +distimage=$(docker build -q $DIR/../..) +fullversion=$(git describe --match 'v[0-9]*' --dirty='.m' --always) +distversion=${fullversion:1} + +echo "Testing image $distimage with distribution version $distversion" + +# Pull needed images before invoking golem to get pull time +# These images are defined in golem.conf +time docker pull nginx:1.9 +time docker pull golang:1.6 +time docker pull dmcgowan/token-server:simple +time docker pull dmcgowan/token-server:oauth +time docker pull distribution/golem-runner:0.1-bats + +time docker pull docker:1.9.1-dind +time docker pull docker:1.10.3-dind +time docker pull docker:1.11.1-dind +time docker pull docker:1.12.3-dind +time docker pull docker:1.13.0-rc5-dind + +golem -cache $cachedir \ + -i "golem-distribution:latest,$distimage,$distversion" \ + -i "golem-dind:latest,docker:1.9.1-dind,1.9.1" \ + -i "golem-dind:latest,docker:1.10.3-dind,1.10.3" \ + -i "golem-dind:latest,docker:1.11.1-dind,1.11.1" \ + -i "golem-dind:latest,docker:1.12.3-dind,1.12.3" \ + -i "golem-dind:latest,docker:1.13.0-rc5-dind,1.13.0" \ + $DIR + diff --git a/contrib/docker-integration/tls.bats b/contrib/docker-integration/tls.bats new file mode 100644 index 00000000..fdd6c176 --- /dev/null +++ b/contrib/docker-integration/tls.bats @@ -0,0 +1,108 @@ +#!/usr/bin/env bats + +# Registry host name, should be set to non-localhost address and match +# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d + +load helpers + +hostname="localregistry" +base="hello-world" +image="${base}:latest" + +# Login information, should match values in nginx/test.passwd +user=${TEST_USER:-"testuser"} +password=${TEST_PASSWORD:-"passpassword"} + +function setup() { + tempImage $image +} + +@test "Test valid certificates" { + docker_t tag $image $hostname:5440/$image + run docker_t push $hostname:5440/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth" { + basic_auth_version_check + login $hostname:5441 + docker_t tag $image $hostname:5441/$image + run docker_t push $hostname:5441/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth with build" { + basic_auth_version_check + login $hostname:5441 + + image1=$hostname:5441/$image-build + image2=$hostname:5441/$image-build-2 + + tempImage $image1 + + run docker_t push $image1 + [ "$status" -eq 0 ] + has_digest "$output" + + docker_t rmi $image1 + + run build $image2 $image1 + echo $output + [ "$status" -eq 0 ] + + run docker_t push $image2 + echo $output + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client auth" { + docker_t tag $image $hostname:5442/$image + run docker_t push $hostname:5442/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client with invalid certificate authority fails" { + docker_t tag $image $hostname:5443/$image + run docker_t push $hostname:5443/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with TLS client auth" { + basic_auth_version_check + login $hostname:5444 + docker_t tag $image $hostname:5444/$image + run docker_t push $hostname:5444/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test unknown certificate authority fails" { + docker_t tag $image $hostname:5445/$image + run docker_t push $hostname:5445/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with unknown certificate authority fails" { + run login $hostname:5446 + [ "$status" -ne 0 ] + docker_t tag $image $hostname:5446/$image + run docker_t push $hostname:5446/$image + [ "$status" -ne 0 ] +} + +@test "Test TLS client auth to server with unknown certificate authority fails" { + docker_t tag $image $hostname:5447/$image + run docker_t push $hostname:5447/$image + [ "$status" -ne 0 ] +} + +@test "Test failure to connect to server fails to fallback to SSLv3" { + docker_t tag $image $hostname:5448/$image + run docker_t push $hostname:5448/$image + [ "$status" -ne 0 ] +} + diff --git a/contrib/docker-integration/token.bats b/contrib/docker-integration/token.bats new file mode 100644 index 00000000..fb7adc74 --- /dev/null +++ b/contrib/docker-integration/token.bats @@ -0,0 +1,129 @@ +#!/usr/bin/env bats + +# This tests contacting a registry using a token server + +load helpers + +user="testuser" +password="testpassword" +base="hello-world" + +@test "Test token server login" { + login localregistry:5554 +} + +@test "Test token server bad login" { + docker_t_login -u "testuser" -p "badpassword" localregistry:5554 + [ "$status" -ne 0 ] + + docker_t_login -u "baduser" -p "testpassword" localregistry:5554 + [ "$status" -ne 0 ] +} + +@test "Test push and pull with token auth" { + login localregistry:5555 + image="localregistry:5555/testuser/token" + build $image "$base:latest" + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + + docker_t rmi $image + + docker_t pull $image +} + +@test "Test push and pull with token auth wrong namespace" { + login localregistry:5555 + image="localregistry:5555/notuser/token" + build $image "$base:latest" + + run docker_t push $image + [ "$status" -ne 0 ] +} + +@test "Test oauth token server login" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5557 +} + +@test "Test oauth token server bad login" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + docker_t_login -u "testuser" -p "badpassword" -e $email localregistry:5557 + [ "$status" -ne 0 ] + + docker_t_login -u "baduser" -p "testpassword" -e $email localregistry:5557 + [ "$status" -ne 0 ] +} + +@test "Test oauth push and pull with token auth" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/testuser/token" + build $image "$base:latest" + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + + docker_t rmi $image + + docker_t pull $image +} + +@test "Test oauth push and build with token auth" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/testuser/token-build" + tempImage $image + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + + docker_t rmi $image + + image2="localregistry:5558/testuser/token-build-2" + run build $image2 $image + echo $output + [ "$status" -eq 0 ] + + run docker_t push $image2 + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + +} + +@test "Test oauth push and pull with token auth wrong namespace" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/notuser/token" + build $image "$base:latest" + + run docker_t push $image + [ "$status" -ne 0 ] +} + +@test "Test oauth with v1 search" { + version_check docker "$GOLEM_DIND_VERSION" "1.12.0" + + run docker_t search localregistry:5600/testsearch + [ "$status" -ne 0 ] + + login_oauth localregistry:5600 + + run docker_t search localregistry:5600/testsearch + echo $output + [ "$status" -eq 0 ] + + echo $output | grep "testsearch-1" + echo $output | grep "testsearch-2" +} diff --git a/contrib/docker-integration/tokenserver-oauth/.htpasswd b/contrib/docker-integration/tokenserver-oauth/.htpasswd new file mode 100644 index 00000000..0bbf5740 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/.htpasswd @@ -0,0 +1 @@ +testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m diff --git a/contrib/docker-integration/tokenserver-oauth/Dockerfile b/contrib/docker-integration/tokenserver-oauth/Dockerfile new file mode 100644 index 00000000..5b607132 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/Dockerfile @@ -0,0 +1,8 @@ +FROM dmcgowan/token-server@sha256:5a6f76d3086cdf63249c77b521108387b49d85a30c5e1c4fe82fdf5ae3b76ba7 + +WORKDIR / + +COPY ./.htpasswd /.htpasswd +COPY ./certs/auth.localregistry.cert /tls.cert +COPY ./certs/auth.localregistry.key /tls.key +COPY ./certs/signing.key /sign.key diff --git a/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert b/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert new file mode 100644 index 00000000..4144ca16 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD +VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg +FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c +Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy +9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon +NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu +qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA +oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL +BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH +KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW +PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+ +xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J +ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/ +qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key b/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key new file mode 100644 index 00000000..4c499bb2 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o +4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd +GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC +I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu +IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj +ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo +PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM +ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ +pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/ +NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt +afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf +Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms +4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0 +3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8 +U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT +wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB +9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq +KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b +9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV +Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1 +jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO +x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd +SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez +mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz +W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A== +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/tokenserver-oauth/certs/ca.pem b/contrib/docker-integration/tokenserver-oauth/certs/ca.pem new file mode 100644 index 00000000..0b585b3f --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV +BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf +fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3 +lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl +d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD +L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp +GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2 +ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/ +BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH +Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/ +w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2 +x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh +XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne +SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert b/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert new file mode 100644 index 00000000..105acc4f --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr +Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI +dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ +P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY +V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/ +yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf +iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2 +oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582 +AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8 ++0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants +4aknwTwY3300PuTqBdQufvOFDRN5 +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key b/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key new file mode 100644 index 00000000..cb69a0f3 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg +gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s +Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5 +3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ +3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0 +DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh +Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj +bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8 +CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet +qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK +/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc +yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b +/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl +LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR +lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt +ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz +ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc +PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc +HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z +9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I +j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9 +SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu +LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM +3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt +6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w== +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/tokenserver-oauth/certs/signing.cert b/contrib/docker-integration/tokenserver-oauth/certs/signing.cert new file mode 100644 index 00000000..45166f2d --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/signing.cert @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt +QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r +Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3 +9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K +IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA +NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB +ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV +a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw +hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU +LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G +oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL +29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver-oauth/certs/signing.key b/contrib/docker-integration/tokenserver-oauth/certs/signing.key new file mode 100644 index 00000000..47562540 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/certs/signing.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR +vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU +wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5 +CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4 +hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+ +ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+ +sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT +/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0 +caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9 +uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1 +4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p +5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm +KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM +wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz +IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz +SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt +pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC +mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT +tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5 +zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ +J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM +cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA +jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx +VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu +x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml b/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml new file mode 100644 index 00000000..ed6b3ea5 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml @@ -0,0 +1,15 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 +auth: + token: + realm: "https://auth.localregistry:5559/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/contrib/docker-integration/tokenserver-oauth/registry-config.yml b/contrib/docker-integration/tokenserver-oauth/registry-config.yml new file mode 100644 index 00000000..630ef057 --- /dev/null +++ b/contrib/docker-integration/tokenserver-oauth/registry-config.yml @@ -0,0 +1,18 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 + tls: + certificate: "/etc/docker/registry/localregistry.cert" + key: "/etc/docker/registry/localregistry.key" +auth: + token: + realm: "https://auth.localregistry:5559/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/contrib/docker-integration/tokenserver/.htpasswd b/contrib/docker-integration/tokenserver/.htpasswd new file mode 100644 index 00000000..0bbf5740 --- /dev/null +++ b/contrib/docker-integration/tokenserver/.htpasswd @@ -0,0 +1 @@ +testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m diff --git a/contrib/docker-integration/tokenserver/Dockerfile b/contrib/docker-integration/tokenserver/Dockerfile new file mode 100644 index 00000000..762330cd --- /dev/null +++ b/contrib/docker-integration/tokenserver/Dockerfile @@ -0,0 +1,8 @@ +FROM dmcgowan/token-server@sha256:0eab50ebdff5b6b95b3addf4edbd8bd2f5b940f27b41b43c94afdf05863a81af + +WORKDIR / + +COPY ./.htpasswd /.htpasswd +COPY ./certs/auth.localregistry.cert /tls.cert +COPY ./certs/auth.localregistry.key /tls.key +COPY ./certs/signing.key /sign.key diff --git a/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert b/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert new file mode 100644 index 00000000..4144ca16 --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD +VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg +FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c +Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy +9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon +NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu +qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA +oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL +BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH +KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW +PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+ +xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J +ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/ +qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver/certs/auth.localregistry.key b/contrib/docker-integration/tokenserver/certs/auth.localregistry.key new file mode 100644 index 00000000..4c499bb2 --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/auth.localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o +4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd +GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC +I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu +IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj +ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo +PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM +ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ +pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/ +NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt +afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf +Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms +4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0 +3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8 +U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT +wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB +9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq +KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b +9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV +Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1 +jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO +x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd +SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez +mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz +W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A== +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/tokenserver/certs/ca.pem b/contrib/docker-integration/tokenserver/certs/ca.pem new file mode 100644 index 00000000..0b585b3f --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV +BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf +fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3 +lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl +d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD +L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp +GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2 +ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/ +BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH +Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/ +w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2 +x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh +XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne +SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver/certs/localregistry.cert b/contrib/docker-integration/tokenserver/certs/localregistry.cert new file mode 100644 index 00000000..105acc4f --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr +Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI +dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ +P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY +V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/ +yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf +iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2 +oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582 +AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8 ++0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants +4aknwTwY3300PuTqBdQufvOFDRN5 +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver/certs/localregistry.key b/contrib/docker-integration/tokenserver/certs/localregistry.key new file mode 100644 index 00000000..cb69a0f3 --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg +gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s +Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5 +3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ +3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0 +DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh +Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj +bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8 +CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet +qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK +/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc +yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b +/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl +LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR +lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt +ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz +ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc +PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc +HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z +9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I +j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9 +SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu +LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM +3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt +6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w== +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/tokenserver/certs/signing.cert b/contrib/docker-integration/tokenserver/certs/signing.cert new file mode 100644 index 00000000..45166f2d --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/signing.cert @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt +QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r +Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3 +9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K +IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA +NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB +ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV +a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw +hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU +LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G +oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL +29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI= +-----END CERTIFICATE----- diff --git a/contrib/docker-integration/tokenserver/certs/signing.key b/contrib/docker-integration/tokenserver/certs/signing.key new file mode 100644 index 00000000..47562540 --- /dev/null +++ b/contrib/docker-integration/tokenserver/certs/signing.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR +vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU +wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5 +CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4 +hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+ +ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+ +sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT +/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0 +caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9 +uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1 +4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p +5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm +KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM +wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz +IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz +SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt +pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC +mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT +tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5 +zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ +J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM +cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA +jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx +VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu +x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt +-----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/tokenserver/registry-config.yml b/contrib/docker-integration/tokenserver/registry-config.yml new file mode 100644 index 00000000..bc269056 --- /dev/null +++ b/contrib/docker-integration/tokenserver/registry-config.yml @@ -0,0 +1,18 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 + tls: + certificate: "/etc/docker/registry/localregistry.cert" + key: "/etc/docker/registry/localregistry.key" +auth: + token: + realm: "https://auth.localregistry:5556/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/contrib/token-server/errors.go b/contrib/token-server/errors.go new file mode 100644 index 00000000..bcac8ee3 --- /dev/null +++ b/contrib/token-server/errors.go @@ -0,0 +1,38 @@ +package main + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + errGroup = "tokenserver" + + // ErrorBadTokenOption is returned when a token parameter is invalid + ErrorBadTokenOption = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BAD_TOKEN_OPTION", + Message: "bad token option", + Description: `This error may be returned when a request for a + token contains an option which is not valid`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorMissingRequiredField is returned when a required form field is missing + ErrorMissingRequiredField = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MISSING_REQUIRED_FIELD", + Message: "missing required field", + Description: `This error may be returned when a request for a + token does not contain a required form field`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorUnsupportedValue is returned when a form field has an unsupported value + ErrorUnsupportedValue = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNSUPPORTED_VALUE", + Message: "unsupported value", + Description: `This error may be returned when a request for a + token contains a form field with an unsupported value`, + HTTPStatusCode: http.StatusBadRequest, + }) +) diff --git a/contrib/token-server/main.go b/contrib/token-server/main.go new file mode 100644 index 00000000..138793c7 --- /dev/null +++ b/contrib/token-server/main.go @@ -0,0 +1,426 @@ +package main + +import ( + "context" + "encoding/json" + "flag" + "math/rand" + "net/http" + "strconv" + "strings" + "time" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/htpasswd" + "github.com/docker/libtrust" + "github.com/gorilla/mux" + "github.com/sirupsen/logrus" +) + +var ( + enforceRepoClass bool +) + +func main() { + var ( + issuer = &TokenIssuer{} + pkFile string + addr string + debug bool + err error + + passwdFile string + realm string + + cert string + certKey string + ) + + flag.StringVar(&issuer.Issuer, "issuer", "distribution-token-server", "Issuer string for token") + flag.StringVar(&pkFile, "key", "", "Private key file") + flag.StringVar(&addr, "addr", "localhost:8080", "Address to listen on") + flag.BoolVar(&debug, "debug", false, "Debug mode") + + flag.StringVar(&passwdFile, "passwd", ".htpasswd", "Passwd file") + flag.StringVar(&realm, "realm", "", "Authentication realm") + + flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS") + flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS") + + flag.BoolVar(&enforceRepoClass, "enforce-class", false, "Enforce policy for single repository class") + + flag.Parse() + + if debug { + logrus.SetLevel(logrus.DebugLevel) + } + + if pkFile == "" { + issuer.SigningKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + logrus.Fatalf("Error generating private key: %v", err) + } + logrus.Debugf("Using newly generated key with id %s", issuer.SigningKey.KeyID()) + } else { + issuer.SigningKey, err = libtrust.LoadKeyFile(pkFile) + if err != nil { + logrus.Fatalf("Error loading key file %s: %v", pkFile, err) + } + logrus.Debugf("Loaded private key with id %s", issuer.SigningKey.KeyID()) + } + + if realm == "" { + logrus.Fatalf("Must provide realm") + } + + ac, err := auth.GetAccessController("htpasswd", map[string]interface{}{ + "realm": realm, + "path": passwdFile, + }) + if err != nil { + logrus.Fatalf("Error initializing access controller: %v", err) + } + + // TODO: Make configurable + issuer.Expiration = 15 * time.Minute + + ctx := dcontext.Background() + + ts := &tokenServer{ + issuer: issuer, + accessController: ac, + refreshCache: map[string]refreshToken{}, + } + + router := mux.NewRouter() + router.Path("/token/").Methods("GET").Handler(handlerWithContext(ctx, ts.getToken)) + router.Path("/token/").Methods("POST").Handler(handlerWithContext(ctx, ts.postToken)) + + if cert == "" { + err = http.ListenAndServe(addr, router) + } else if certKey == "" { + logrus.Fatalf("Must provide certficate (-tlscert) and key (-tlskey)") + } else { + err = http.ListenAndServeTLS(addr, cert, certKey, router) + } + + if err != nil { + logrus.Infof("Error serving: %v", err) + } + +} + +// handlerWithContext wraps the given context-aware handler by setting up the +// request context from a base context. +func handlerWithContext(ctx context.Context, handler func(context.Context, http.ResponseWriter, *http.Request)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := dcontext.WithRequest(ctx, r) + logger := dcontext.GetRequestLogger(ctx) + ctx = dcontext.WithLogger(ctx, logger) + + handler(ctx, w, r) + }) +} + +func handleError(ctx context.Context, err error, w http.ResponseWriter) { + ctx, w = dcontext.WithResponseWriter(ctx, w) + + if serveErr := errcode.ServeJSON(w, err); serveErr != nil { + dcontext.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr) + return + } + + dcontext.GetResponseLogger(ctx).Info("application error") +} + +var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +const refreshTokenLength = 15 + +func newRefreshToken() string { + s := make([]rune, refreshTokenLength) + for i := range s { + s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))] + } + return string(s) +} + +type refreshToken struct { + subject string + service string +} + +type tokenServer struct { + issuer *TokenIssuer + accessController auth.AccessController + refreshCache map[string]refreshToken +} + +type tokenResponse struct { + Token string `json:"access_token"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn int `json:"expires_in,omitempty"` +} + +var repositoryClassCache = map[string]string{} + +func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access { + if !strings.HasSuffix(scope, "/") { + scope = scope + "/" + } + grantedAccessList := make([]auth.Access, 0, len(requestedAccessList)) + for _, access := range requestedAccessList { + if access.Type == "repository" { + if !strings.HasPrefix(access.Name, scope) { + dcontext.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name) + continue + } + if enforceRepoClass { + if class, ok := repositoryClassCache[access.Name]; ok { + if class != access.Class { + dcontext.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class) + continue + } + } else if strings.EqualFold(access.Action, "push") { + repositoryClassCache[access.Name] = access.Class + } + } + } else if access.Type == "registry" { + if access.Name != "catalog" { + dcontext.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name) + continue + } + // TODO: Limit some actions to "admin" users + } else { + dcontext.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type) + continue + } + grantedAccessList = append(grantedAccessList, access) + } + return grantedAccessList +} + +type acctSubject struct{} + +func (acctSubject) String() string { return "acctSubject" } + +type requestedAccess struct{} + +func (requestedAccess) String() string { return "requestedAccess" } + +type grantedAccess struct{} + +func (grantedAccess) String() string { return "grantedAccess" } + +// getToken handles authenticating the request and authorizing access to the +// requested scopes. +func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(ctx).Info("getToken") + + params := r.URL.Query() + service := params.Get("service") + scopeSpecifiers := params["scope"] + var offline bool + if offlineStr := params.Get("offline_token"); offlineStr != "" { + var err error + offline, err = strconv.ParseBool(offlineStr) + if err != nil { + handleError(ctx, ErrorBadTokenOption.WithDetail(err), w) + return + } + } + + requestedAccessList := ResolveScopeSpecifiers(ctx, scopeSpecifiers) + + authorizedCtx, err := ts.accessController.Authorized(ctx, requestedAccessList...) + if err != nil { + challenge, ok := err.(auth.Challenge) + if !ok { + handleError(ctx, err, w) + return + } + + // Get response context. + ctx, w = dcontext.WithResponseWriter(ctx, w) + + challenge.SetHeaders(w) + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w) + + dcontext.GetResponseLogger(ctx).Info("get token authentication challenge") + + return + } + ctx = authorizedCtx + + username := dcontext.GetStringValue(ctx, "auth.user.name") + + ctx = context.WithValue(ctx, acctSubject{}, username) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, acctSubject{})) + + dcontext.GetLogger(ctx).Info("authenticated client") + + ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, requestedAccess{})) + + grantedAccessList := filterAccessList(ctx, username, requestedAccessList) + ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, grantedAccess{})) + + token, err := ts.issuer.CreateJWT(username, service, grantedAccessList) + if err != nil { + handleError(ctx, err, w) + return + } + + dcontext.GetLogger(ctx).Info("authorized client") + + response := tokenResponse{ + Token: token, + ExpiresIn: int(ts.issuer.Expiration.Seconds()), + } + + if offline { + response.RefreshToken = newRefreshToken() + ts.refreshCache[response.RefreshToken] = refreshToken{ + subject: username, + service: service, + } + } + + ctx, w = dcontext.WithResponseWriter(ctx, w) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + + dcontext.GetResponseLogger(ctx).Info("get token complete") +} + +type postTokenResponse struct { + Token string `json:"access_token"` + Scope string `json:"scope,omitempty"` + ExpiresIn int `json:"expires_in,omitempty"` + IssuedAt string `json:"issued_at,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` +} + +// postToken handles authenticating the request and authorizing access to the +// requested scopes. +func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { + grantType := r.PostFormValue("grant_type") + if grantType == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing grant_type value"), w) + return + } + + service := r.PostFormValue("service") + if service == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing service value"), w) + return + } + + clientID := r.PostFormValue("client_id") + if clientID == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing client_id value"), w) + return + } + + var offline bool + switch r.PostFormValue("access_type") { + case "", "online": + case "offline": + offline = true + default: + handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown access_type value"), w) + return + } + + requestedAccessList := ResolveScopeList(ctx, r.PostFormValue("scope")) + + var subject string + var rToken string + switch grantType { + case "refresh_token": + rToken = r.PostFormValue("refresh_token") + if rToken == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing refresh_token value"), w) + return + } + rt, ok := ts.refreshCache[rToken] + if !ok || rt.service != service { + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid refresh token"), w) + return + } + subject = rt.subject + case "password": + ca, ok := ts.accessController.(auth.CredentialAuthenticator) + if !ok { + handleError(ctx, ErrorUnsupportedValue.WithDetail("password grant type not supported"), w) + return + } + subject = r.PostFormValue("username") + if subject == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing username value"), w) + return + } + password := r.PostFormValue("password") + if password == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing password value"), w) + return + } + if err := ca.AuthenticateUser(subject, password); err != nil { + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid credentials"), w) + return + } + default: + handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown grant_type value"), w) + return + } + + ctx = context.WithValue(ctx, acctSubject{}, subject) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, acctSubject{})) + + dcontext.GetLogger(ctx).Info("authenticated client") + + ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, requestedAccess{})) + + grantedAccessList := filterAccessList(ctx, subject, requestedAccessList) + ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, grantedAccess{})) + + token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList) + if err != nil { + handleError(ctx, err, w) + return + } + + dcontext.GetLogger(ctx).Info("authorized client") + + response := postTokenResponse{ + Token: token, + ExpiresIn: int(ts.issuer.Expiration.Seconds()), + IssuedAt: time.Now().UTC().Format(time.RFC3339), + Scope: ToScopeList(grantedAccessList), + } + + if offline { + rToken = newRefreshToken() + ts.refreshCache[rToken] = refreshToken{ + subject: subject, + service: service, + } + } + + if rToken != "" { + response.RefreshToken = rToken + } + + ctx, w = dcontext.WithResponseWriter(ctx, w) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + + dcontext.GetResponseLogger(ctx).Info("post token complete") +} diff --git a/contrib/token-server/token.go b/contrib/token-server/token.go new file mode 100644 index 00000000..8df5b6a8 --- /dev/null +++ b/contrib/token-server/token.go @@ -0,0 +1,220 @@ +package main + +import ( + "context" + "crypto" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "regexp" + "strings" + "time" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/distribution/registry/auth/token" + "github.com/docker/libtrust" +) + +// ResolveScopeSpecifiers converts a list of scope specifiers from a token +// request's `scope` query parameters into a list of standard access objects. +func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Access { + requestedAccessSet := make(map[auth.Access]struct{}, 2*len(scopeSpecs)) + + for _, scopeSpecifier := range scopeSpecs { + // There should be 3 parts, separated by a `:` character. + parts := strings.SplitN(scopeSpecifier, ":", 3) + + if len(parts) != 3 { + dcontext.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier) + continue + } + + resourceType, resourceName, actions := parts[0], parts[1], parts[2] + + resourceType, resourceClass := splitResourceClass(resourceType) + if resourceType == "" { + continue + } + + // Actions should be a comma-separated list of actions. + for _, action := range strings.Split(actions, ",") { + requestedAccess := auth.Access{ + Resource: auth.Resource{ + Type: resourceType, + Class: resourceClass, + Name: resourceName, + }, + Action: action, + } + + // Add this access to the requested access set. + requestedAccessSet[requestedAccess] = struct{}{} + } + } + + requestedAccessList := make([]auth.Access, 0, len(requestedAccessSet)) + for requestedAccess := range requestedAccessSet { + requestedAccessList = append(requestedAccessList, requestedAccess) + } + + return requestedAccessList +} + +var typeRegexp = regexp.MustCompile(`^([a-z0-9]+)(\([a-z0-9]+\))?$`) + +func splitResourceClass(t string) (string, string) { + matches := typeRegexp.FindStringSubmatch(t) + if len(matches) < 2 { + return "", "" + } + if len(matches) == 2 || len(matches[2]) < 2 { + return matches[1], "" + } + return matches[1], matches[2][1 : len(matches[2])-1] +} + +// ResolveScopeList converts a scope list from a token request's +// `scope` parameter into a list of standard access objects. +func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access { + scopes := strings.Split(scopeList, " ") + return ResolveScopeSpecifiers(ctx, scopes) +} + +func scopeString(a auth.Access) string { + if a.Class != "" { + return fmt.Sprintf("%s(%s):%s:%s", a.Type, a.Class, a.Name, a.Action) + } + return fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action) +} + +// ToScopeList converts a list of access to a +// scope list string +func ToScopeList(access []auth.Access) string { + var s []string + for _, a := range access { + s = append(s, scopeString(a)) + } + return strings.Join(s, ",") +} + +// TokenIssuer represents an issuer capable of generating JWT tokens +type TokenIssuer struct { + Issuer string + SigningKey libtrust.PrivateKey + Expiration time.Duration +} + +// CreateJWT creates and signs a JSON Web Token for the given subject and +// audience with the granted access. +func (issuer *TokenIssuer) CreateJWT(subject string, audience string, grantedAccessList []auth.Access) (string, error) { + // Make a set of access entries to put in the token's claimset. + resourceActionSets := make(map[auth.Resource]map[string]struct{}, len(grantedAccessList)) + for _, access := range grantedAccessList { + actionSet, exists := resourceActionSets[access.Resource] + if !exists { + actionSet = map[string]struct{}{} + resourceActionSets[access.Resource] = actionSet + } + actionSet[access.Action] = struct{}{} + } + + accessEntries := make([]*token.ResourceActions, 0, len(resourceActionSets)) + for resource, actionSet := range resourceActionSets { + actions := make([]string, 0, len(actionSet)) + for action := range actionSet { + actions = append(actions, action) + } + + accessEntries = append(accessEntries, &token.ResourceActions{ + Type: resource.Type, + Class: resource.Class, + Name: resource.Name, + Actions: actions, + }) + } + + randomBytes := make([]byte, 15) + _, err := io.ReadFull(rand.Reader, randomBytes) + if err != nil { + return "", err + } + randomID := base64.URLEncoding.EncodeToString(randomBytes) + + now := time.Now() + + signingHash := crypto.SHA256 + var alg string + switch issuer.SigningKey.KeyType() { + case "RSA": + alg = "RS256" + case "EC": + alg = "ES256" + default: + panic(fmt.Errorf("unsupported signing key type %q", issuer.SigningKey.KeyType())) + } + + joseHeader := token.Header{ + Type: "JWT", + SigningAlg: alg, + } + + if x5c := issuer.SigningKey.GetExtendedField("x5c"); x5c != nil { + joseHeader.X5c = x5c.([]string) + } else { + var jwkMessage json.RawMessage + jwkMessage, err = issuer.SigningKey.PublicKey().MarshalJSON() + if err != nil { + return "", err + } + joseHeader.RawJWK = &jwkMessage + } + + exp := issuer.Expiration + if exp == 0 { + exp = 5 * time.Minute + } + + claimSet := token.ClaimSet{ + Issuer: issuer.Issuer, + Subject: subject, + Audience: audience, + Expiration: now.Add(exp).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: randomID, + + Access: accessEntries, + } + + var ( + joseHeaderBytes []byte + claimSetBytes []byte + ) + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return "", fmt.Errorf("unable to encode jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return "", fmt.Errorf("unable to encode claim set: %s", err) + } + + encodedJoseHeader := joseBase64Encode(joseHeaderBytes) + encodedClaimSet := joseBase64Encode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = issuer.SigningKey.Sign(strings.NewReader(encodingToSign), signingHash); err != nil { + return "", fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64Encode(signatureBytes) + + return fmt.Sprintf("%s.%s", encodingToSign, signature), nil +} + +func joseBase64Encode(data []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=") +} diff --git a/coverpkg.sh b/coverpkg.sh new file mode 100755 index 00000000..25d419ae --- /dev/null +++ b/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/digestset/set.go b/digestset/set.go new file mode 100644 index 00000000..71327dca --- /dev/null +++ b/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/digestset/set_test.go b/digestset/set_test.go new file mode 100644 index 00000000..67d46c62 --- /dev/null +++ b/digestset/set_test.go @@ -0,0 +1,371 @@ +package digestset + +import ( + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "math/rand" + "testing" + + digest "github.com/opencontainers/go-digest" +) + +func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) { + if d1 != d2 { + t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) + } +} + +func TestLookup(t *testing.T) { + digests := []digest.Digest{ + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup("54") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[3]) + + dgst, err = dset.Lookup("1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("9876") + if err == nil { + t.Fatal("Expected not found error looking up: 9876") + } + if err != ErrDigestNotFound { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: sha256:1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) + + dgst, err = dset.Lookup("sha256:12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) +} + +func TestAddDuplication(t *testing.T) { + digests := []digest.Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + if len(dset.entries) != 8 { + t.Fatal("Invalid dset size") + } + + if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 8 { + t.Fatal("Duplicate digest insert should not increase entries size") + } + + if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 9 { + t.Fatal("Insert with different algorithm should be allowed") + } +} + +func TestRemove(t *testing.T) { + digests, err := createDigests(10) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup(digests[0].String()) + if err != nil { + t.Fatal(err) + } + if dgst != digests[0] { + t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) + } + + if err := dset.Remove(digests[0]); err != nil { + t.Fatal(err) + } + + if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { + t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) + } +} + +func TestAll(t *testing.T) { + digests, err := createDigests(100) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + all := map[digest.Digest]struct{}{} + for _, dgst := range dset.All() { + all[dgst] = struct{}{} + } + + if len(all) != len(digests) { + t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) + } + + for i, dgst := range digests { + if _, ok := all[dgst]; !ok { + t.Fatalf("Missing element at position %d: %s", i, dgst) + } + } + +} + +func assertEqualShort(t *testing.T, actual, expected string) { + if actual != expected { + t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) + } +} + +func TestShortCodeTable(t *testing.T) { + digests := []digest.Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dump := ShortCodeTable(dset, 2) + + if len(dump) < len(digests) { + t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) + } + assertEqualShort(t, dump[digests[0]], "12341") + assertEqualShort(t, dump[digests[1]], "12345") + assertEqualShort(t, dump[digests[2]], "12346") + assertEqualShort(t, dump[digests[3]], "54") + assertEqualShort(t, dump[digests[4]], "6543") + assertEqualShort(t, dump[digests[5]], "64") + assertEqualShort(t, dump[digests[6]], "6542") + assertEqualShort(t, dump[digests[7]], "653") +} + +func createDigests(count int) ([]digest.Digest, error) { + r := rand.New(rand.NewSource(25823)) + digests := make([]digest.Digest, count) + for i := range digests { + h := sha256.New() + if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { + return nil, err + } + digests[i] = digest.NewDigest("sha256", h) + } + return digests, nil +} + +func benchAddNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchLookupNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + shorts := make([]string, 0, n) + for _, short := range ShortCodeTable(dset, shortLen) { + shorts = append(shorts, short) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err = dset.Lookup(shorts[i%n]); err != nil { + b.Fatal(err) + } + } +} + +func benchRemoveNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + b.StopTimer() + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for j := range digests { + if err = dset.Remove(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchShortCodeNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShortCodeTable(dset, shortLen) + } +} + +func BenchmarkAdd10(b *testing.B) { + benchAddNTable(b, 10) +} + +func BenchmarkAdd100(b *testing.B) { + benchAddNTable(b, 100) +} + +func BenchmarkAdd1000(b *testing.B) { + benchAddNTable(b, 1000) +} + +func BenchmarkRemove10(b *testing.B) { + benchRemoveNTable(b, 10) +} + +func BenchmarkRemove100(b *testing.B) { + benchRemoveNTable(b, 100) +} + +func BenchmarkRemove1000(b *testing.B) { + benchRemoveNTable(b, 1000) +} + +func BenchmarkLookup10(b *testing.B) { + benchLookupNTable(b, 10, 12) +} + +func BenchmarkLookup100(b *testing.B) { + benchLookupNTable(b, 100, 12) +} + +func BenchmarkLookup1000(b *testing.B) { + benchLookupNTable(b, 1000, 12) +} + +func BenchmarkShortCode10(b *testing.B) { + benchShortCodeNTable(b, 10, 12) +} +func BenchmarkShortCode100(b *testing.B) { + benchShortCodeNTable(b, 100, 12) +} +func BenchmarkShortCode1000(b *testing.B) { + benchShortCodeNTable(b, 1000, 12) +} diff --git a/doc.go b/doc.go new file mode 100644 index 00000000..bdd8cb70 --- /dev/null +++ b/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/errors.go b/errors.go new file mode 100644 index 00000000..020d3325 --- /dev/null +++ b/errors.go @@ -0,0 +1,115 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return "unverified manifest" +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/health/api/api.go b/health/api/api.go new file mode 100644 index 00000000..73fcc453 --- /dev/null +++ b/health/api/api.go @@ -0,0 +1,37 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/docker/distribution/health" +) + +var ( + updater = health.NewStatusUpdater() +) + +// DownHandler registers a manual_http_status that always returns an Error +func DownHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(errors.New("Manual Check")) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// UpHandler registers a manual_http_status that always returns nil +func UpHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(nil) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// init sets up the two endpoints to bring the service up and down +func init() { + health.Register("manual_http_status", updater) + http.HandleFunc("/debug/health/down", DownHandler) + http.HandleFunc("/debug/health/up", UpHandler) +} diff --git a/health/api/api_test.go b/health/api/api_test.go new file mode 100644 index 00000000..ec82154f --- /dev/null +++ b/health/api/api_test.go @@ -0,0 +1,86 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/health" +) + +// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes +// the status code of the response to 503 +// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus +func TestPOSTDownHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 1 { + t.Errorf("DownHandler didn't add an error check.") + } +} + +// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes +// the status code of the response to 200 +func TestPOSTUpHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + UpHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 0 { + t.Errorf("UpHandler didn't remove the error check.") + } +} diff --git a/health/checks/checks.go b/health/checks/checks.go new file mode 100644 index 00000000..7760f610 --- /dev/null +++ b/health/checks/checks.go @@ -0,0 +1,73 @@ +package checks + +import ( + "errors" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/docker/distribution/health" +) + +// FileChecker checks the existence of a file and returns an error +// if the file exists. +func FileChecker(f string) health.Checker { + return health.CheckFunc(func() error { + absoluteFilePath, err := filepath.Abs(f) + if err != nil { + return fmt.Errorf("failed to get absolute path for %q: %v", f, err) + } + + _, err = os.Stat(absoluteFilePath) + if err == nil { + return errors.New("file exists") + } else if os.IsNotExist(err) { + return nil + } + + return err + }) +} + +// HTTPChecker does a HEAD request and verifies that the HTTP status code +// returned matches statusCode. +func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker { + return health.CheckFunc(func() error { + client := http.Client{ + Timeout: timeout, + } + req, err := http.NewRequest("HEAD", r, nil) + if err != nil { + return errors.New("error creating request: " + r) + } + for headerName, headerValues := range headers { + for _, headerValue := range headerValues { + req.Header.Add(headerName, headerValue) + } + } + response, err := client.Do(req) + if err != nil { + return errors.New("error while checking: " + r) + } + if response.StatusCode != statusCode { + return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode)) + } + return nil + }) +} + +// TCPChecker attempts to open a TCP connection. +func TCPChecker(addr string, timeout time.Duration) health.Checker { + return health.CheckFunc(func() error { + conn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return errors.New("connection to " + addr + " failed") + } + conn.Close() + return nil + }) +} diff --git a/health/checks/checks_test.go b/health/checks/checks_test.go new file mode 100644 index 00000000..6b6dd14f --- /dev/null +++ b/health/checks/checks_test.go @@ -0,0 +1,25 @@ +package checks + +import ( + "testing" +) + +func TestFileChecker(t *testing.T) { + if err := FileChecker("/tmp").Check(); err == nil { + t.Errorf("/tmp was expected as exists") + } + + if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { + t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) + } +} + +func TestHTTPChecker(t *testing.T) { + if err := HTTPChecker("https://www.google.cybertron", 200, 0, nil).Check(); err == nil { + t.Errorf("Google on Cybertron was expected as not exists") + } + + if err := HTTPChecker("https://www.google.pt", 200, 0, nil).Check(); err != nil { + t.Errorf("Google at Portugal was expected as exists, error:%v", err) + } +} diff --git a/health/doc.go b/health/doc.go new file mode 100644 index 00000000..877f4dac --- /dev/null +++ b/health/doc.go @@ -0,0 +1,136 @@ +// Package health provides a generic health checking framework. +// The health package works expvar style. By importing the package the debug +// server is getting a "/debug/health" endpoint that returns the current +// status of the application. +// If there are no errors, "/debug/health" will return an HTTP 200 status, +// together with an empty JSON reply "{}". If there are any checks +// with errors, the JSON reply will include all the failed checks, and the +// response will be have an HTTP 503 status. +// +// A Check can either be run synchronously, or asynchronously. We recommend +// that most checks are registered as an asynchronous check, so a call to the +// "/debug/health" endpoint always returns immediately. This pattern is +// particularly useful for checks that verify upstream connectivity or +// database status, since they might take a long time to return/timeout. +// +// Installing +// +// To install health, just import it in your application: +// +// import "github.com/docker/distribution/health" +// +// You can also (optionally) import "health/api" that will add two convenience +// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add +// "manual" checks that allow the service to quickly be brought in/out of +// rotation. +// +// import _ "github.com/docker/distribution/health/api" +// +// # curl localhost:5001/debug/health +// {} +// # curl -X POST localhost:5001/debug/health/down +// # curl localhost:5001/debug/health +// {"manual_http_status":"Manual Check"} +// +// After importing these packages to your main application, you can start +// registering checks. +// +// Registering Checks +// +// The recommended way of registering checks is using a periodic Check. +// PeriodicChecks run on a certain schedule and asynchronously update the +// status of the check. This allows CheckStatus to return without blocking +// on an expensive check. +// +// A trivial example of a check that runs every 5 seconds and shuts down our +// server if the current minute is even, could be added as follows: +// +// func currentMinuteEvenCheck() error { +// m := time.Now().Minute() +// if m%2 == 0 { +// return errors.New("Current minute is even!") +// } +// return nil +// } +// +// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) +// +// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to +// implement the exact same check, but add a threshold of failures after which +// the check will be unhealthy. This is particularly useful for flaky Checks, +// ensuring some stability of the service when handling them. +// +// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) +// +// The lowest-level way to interact with the health package is calling +// "Register" directly. Register allows you to pass in an arbitrary string and +// something that implements "Checker" and runs your check. If your method +// returns an error with nil, it is considered a healthy check, otherwise it +// will make the health check endpoint "/debug/health" start returning a 503 +// and list the specific check that failed. +// +// Assuming you wish to register a method called "currentMinuteEvenCheck() +// error" you could do that by doing: +// +// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) +// +// CheckFunc is a convenience type that implements Checker. +// +// Another way of registering a check could be by using an anonymous function +// and the convenience method RegisterFunc. An example that makes the status +// endpoint always return an error: +// +// health.RegisterFunc("my_check", func() error { +// return Errors.new("This is an error!") +// })) +// +// Examples +// +// You could also use the health checker mechanism to ensure your application +// only comes up if certain conditions are met, or to allow the developer to +// take the service out of rotation immediately. An example that checks +// database connectivity and immediately takes the server out of rotation on +// err: +// +// updater = health.NewStatusUpdater() +// health.RegisterFunc("database_check", func() error { +// return updater.Check() +// })) +// +// conn, err := Connect(...) // database call here +// if err != nil { +// updater.Update(errors.New("Error connecting to the database: " + err.Error())) +// } +// +// You can also use the predefined Checkers that come included with the health +// package. First, import the checks: +// +// import "github.com/docker/distribution/health/checks +// +// After that you can make use of any of the provided checks. An example of +// using a `FileChecker` to take the application out of rotation if a certain +// file exists can be done as follows: +// +// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) +// +// After registering the check, it is trivial to take an application out of +// rotation from the console: +// +// # curl localhost:5001/debug/health +// {} +// # touch /tmp/disable +// # curl localhost:5001/debug/health +// {"fileChecker":"file exists"} +// +// FileChecker only accepts absolute or relative file path. It does not work +// properly with tilde(~). You should make sure that the application has +// proper permission(read and execute permission for directory along with +// the specified file path). Otherwise, the FileChecker will report error +// and file health check is not ok. +// +// You could also test the connectivity to a downstream service by using a +// "HTTPChecker", but ensure that you only mark the test unhealthy if there +// are a minimum of two failures in a row: +// +// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) +package health diff --git a/health/health.go b/health/health.go new file mode 100644 index 00000000..220282dc --- /dev/null +++ b/health/health.go @@ -0,0 +1,306 @@ +package health + +import ( + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// A Registry is a collection of checks. Most applications will use the global +// registry defined in DefaultRegistry. However, unit tests may need to create +// separate registries to isolate themselves from other tests. +type Registry struct { + mu sync.RWMutex + registeredChecks map[string]Checker +} + +// NewRegistry creates a new registry. This isn't necessary for normal use of +// the package, but may be useful for unit tests so individual tests have their +// own set of checks. +func NewRegistry() *Registry { + return &Registry{ + registeredChecks: make(map[string]Checker), + } +} + +// DefaultRegistry is the default registry where checks are registered. It is +// the registry used by the HTTP handler. +var DefaultRegistry *Registry + +// Checker is the interface for a Health Checker +type Checker interface { + // Check returns nil if the service is okay. + Check() error +} + +// CheckFunc is a convenience type to create functions that implement +// the Checker interface +type CheckFunc func() error + +// Check Implements the Checker interface to allow for any func() error method +// to be passed as a Checker +func (cf CheckFunc) Check() error { + return cf() +} + +// Updater implements a health check that is explicitly set. +type Updater interface { + Checker + + // Update updates the current status of the health check. + Update(status error) +} + +// updater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type updater struct { + mu sync.Mutex + status error +} + +// Check implements the Checker interface +func (u *updater) Check() error { + u.mu.Lock() + defer u.mu.Unlock() + + return u.status +} + +// Update implements the Updater interface, allowing asynchronous access to +// the status of a Checker. +func (u *updater) Update(status error) { + u.mu.Lock() + defer u.mu.Unlock() + + u.status = status +} + +// NewStatusUpdater returns a new updater +func NewStatusUpdater() Updater { + return &updater{} +} + +// thresholdUpdater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type thresholdUpdater struct { + mu sync.Mutex + status error + threshold int + count int +} + +// Check implements the Checker interface +func (tu *thresholdUpdater) Check() error { + tu.mu.Lock() + defer tu.mu.Unlock() + + if tu.count >= tu.threshold { + return tu.status + } + + return nil +} + +// thresholdUpdater implements the Updater interface, allowing asynchronous +// access to the status of a Checker. +func (tu *thresholdUpdater) Update(status error) { + tu.mu.Lock() + defer tu.mu.Unlock() + + if status == nil { + tu.count = 0 + } else if tu.count < tu.threshold { + tu.count++ + } + + tu.status = status +} + +// NewThresholdStatusUpdater returns a new thresholdUpdater +func NewThresholdStatusUpdater(t int) Updater { + return &thresholdUpdater{threshold: t} +} + +// PeriodicChecker wraps an updater to provide a periodic checker +func PeriodicChecker(check Checker, period time.Duration) Checker { + u := NewStatusUpdater() + go func() { + t := time.NewTicker(period) + for { + <-t.C + u.Update(check.Check()) + } + }() + + return u +} + +// PeriodicThresholdChecker wraps an updater to provide a periodic checker that +// uses a threshold before it changes status +func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { + tu := NewThresholdStatusUpdater(threshold) + go func() { + t := time.NewTicker(period) + for { + <-t.C + tu.Update(check.Check()) + } + }() + + return tu +} + +// CheckStatus returns a map with all the current health check errors +func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type + registry.mu.RLock() + defer registry.mu.RUnlock() + statusKeys := make(map[string]string) + for k, v := range registry.registeredChecks { + err := v.Check() + if err != nil { + statusKeys[k] = err.Error() + } + } + + return statusKeys +} + +// CheckStatus returns a map with all the current health check errors from the +// default registry. +func CheckStatus() map[string]string { + return DefaultRegistry.CheckStatus() +} + +// Register associates the checker with the provided name. +func (registry *Registry) Register(name string, check Checker) { + if registry == nil { + registry = DefaultRegistry + } + registry.mu.Lock() + defer registry.mu.Unlock() + _, ok := registry.registeredChecks[name] + if ok { + panic("Check already exists: " + name) + } + registry.registeredChecks[name] = check +} + +// Register associates the checker with the provided name in the default +// registry. +func Register(name string, check Checker) { + DefaultRegistry.Register(name, check) +} + +// RegisterFunc allows the convenience of registering a checker directly from +// an arbitrary func() error. +func (registry *Registry) RegisterFunc(name string, check func() error) { + registry.Register(name, CheckFunc(check)) +} + +// RegisterFunc allows the convenience of registering a checker in the default +// registry directly from an arbitrary func() error. +func RegisterFunc(name string, check func() error) { + DefaultRegistry.RegisterFunc(name, check) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + registry.Register(name, PeriodicChecker(CheckFunc(check), period)) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// in the default registry from an arbitrary func() error. +func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + DefaultRegistry.RegisterPeriodicFunc(name, period, check) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker in the default registry from an arbitrary func() error. +func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check) +} + +// StatusHandler returns a JSON blob with all the currently registered Health Checks +// and their corresponding status. +// Returns 503 if any Error status exists, 200 otherwise +func StatusHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + checks := CheckStatus() + status := http.StatusOK + + // If there is an error, return 503 + if len(checks) != 0 { + status = http.StatusServiceUnavailable + } + + statusResponse(w, r, status, checks) + } else { + http.NotFound(w, r) + } +} + +// Handler returns a handler that will return 503 response code if the health +// checks have failed. If everything is okay with the health checks, the +// handler will pass through to the provided handler. Use this handler to +// disable a web application when the health checks fail. +func Handler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks := CheckStatus() + if len(checks) != 0 { + errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. + WithDetail("health check failed: please see /debug/health")) + return + } + + handler.ServeHTTP(w, r) // pass through + }) +} + +// statusResponse completes the request with a response describing the health +// of the service. +func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { + p, err := json.Marshal(checks) + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) + p, err = json.Marshal(struct { + ServerError string `json:"server_error"` + }{ + ServerError: "Could not parse error message", + }) + status = http.StatusInternalServerError + + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) + return + } + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.WriteHeader(status) + if _, err := w.Write(p); err != nil { + context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) + } +} + +// Registers global /debug/health api endpoint, creates default registry +func init() { + DefaultRegistry = NewRegistry() + http.HandleFunc("/debug/health", StatusHandler) +} diff --git a/health/health_test.go b/health/health_test.go new file mode 100644 index 00000000..8d1a028b --- /dev/null +++ b/health/health_test.go @@ -0,0 +1,107 @@ +package health + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +// TestReturns200IfThereAreNoChecks ensures that the result code of the health +// endpoint is 200 if there are not currently registered checks. +func TestReturns200IfThereAreNoChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + StatusHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } +} + +// TestReturns503IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 503 if there are health checks with errors. +func TestReturns503IfThereAreErrorChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + // Create a manual error + Register("some_check", CheckFunc(func() error { + return errors.New("This Check did not succeed") + })) + + StatusHandler(recorder, req) + + if recorder.Code != 503 { + t.Errorf("Did not get a 503.") + } +} + +// TestHealthHandler ensures that our handler implementation correct protects +// the web application when things aren't so healthy. +func TestHealthHandler(t *testing.T) { + // clear out existing checks. + DefaultRegistry = NewRegistry() + + // protect an http server + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + + // wrap it in our health handler + handler = Handler(handler) + + // use this swap check status + updater := NewStatusUpdater() + Register("test_check", updater) + + // now, create a test server + server := httptest.NewServer(handler) + + checkUp := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting success status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusNoContent) + } + // NOTE(stevvooe): we really don't care about the body -- the format is + // not standardized or supported, yet. + } + + checkDown := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting down status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusServiceUnavailable { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusServiceUnavailable) + } + } + + // server should be up + checkUp(t, "initial health check") + + // now, we fail the health check + updater.Update(fmt.Errorf("the server is now out of commission")) + checkDown(t, "server should be down") // should be down + + // bring server back up + updater.Update(nil) + checkUp(t, "when server is back up") // now we should be back up. +} diff --git a/manifest/doc.go b/manifest/doc.go new file mode 100644 index 00000000..88367b0a --- /dev/null +++ b/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go new file mode 100644 index 00000000..3aa0662d --- /dev/null +++ b/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returns the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go new file mode 100644 index 00000000..09e6ed1f --- /dev/null +++ b/manifest/manifestlist/manifestlist_test.go @@ -0,0 +1,111 @@ +package manifestlist + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestListSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 2392, + "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "platform": { + "architecture": "sun4m", + "os": "sunos" + } + } + ] +}`) + +func TestManifestList(t *testing.T) { + manifestDescriptors := []ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "amd64", + OS: "linux", + Features: []string{"sse4"}, + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "sun4m", + OS: "sunos", + }, + }, + } + + deserialized, err := FromDescriptors(manifestDescriptors) + if err != nil { + t.Fatalf("error creating DeserializedManifestList: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifestList { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest list: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that the canonical field has the expected value. + if !bytes.Equal(expectedManifestListSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization)) + } + + var unmarshalled DeserializedManifestList + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + for i := range references { + if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + } +} diff --git a/manifest/schema1/config_builder.go b/manifest/schema1/config_builder.go new file mode 100644 index 00000000..a96dc3d2 --- /dev/null +++ b/manifest/schema1/config_builder.go @@ -0,0 +1,287 @@ +package schema1 + +import ( + "context" + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + Author: h.Author, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + descriptor := d.Descriptor() + + if err := descriptor.Digest.Validate(); err != nil { + return err + } + + mb.descriptors = append(mb.descriptors, descriptor) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/manifest/schema1/config_builder_test.go b/manifest/schema1/config_builder_test.go new file mode 100644 index 00000000..360febd7 --- /dev/null +++ b/manifest/schema1/config_builder_test.go @@ -0,0 +1,277 @@ +package schema1 + +import ( + "bytes" + "compress/gzip" + "context" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: mediaType, + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestEmptyTar(t *testing.T) { + // Confirm that gzippedEmptyTar expands to 1024 NULL bytes. + var decompressed [2048]byte + gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar)) + if err != nil { + t.Fatalf("NewReader returned error: %v", err) + } + n, err := gzipReader.Read(decompressed[:]) + if n != 1024 { + t.Fatalf("read returned %d bytes; expected 1024", n) + } + n, err = gzipReader.Read(decompressed[1024:]) + if n != 0 { + t.Fatalf("read returned %d bytes; expected 0", n) + } + if err != io.EOF { + t.Fatal("read did not return io.EOF") + } + gzipReader.Close() + for _, b := range decompressed[:1024] { + if b != 0 { + t.Fatal("nonzero byte in decompressed tar") + } + } + + // Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar. + dgst := digest.FromBytes(gzippedEmptyTar) + if dgst != digestSHA256GzippedEmptyTar { + t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst) + } +} + +func TestConfigBuilder(t *testing.T) { + imgJSON := `{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "author": "Alyssa P. Hacker \u003calyspdev@example.com\u003e", + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}` + + descriptors := []distribution.Descriptor{ + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("could not generate key for testing: %v", err) + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + + ref, err := reference.WithName("testrepo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + ref, err = reference.WithTag(ref, "testtag") + if err != nil { + t.Fatalf("could not add tag: %v", err) + } + + builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON)) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + signed, err := builder.Build(dcontext.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the gzipped empty layer tar was put in the blob store + _, err = bs.Stat(dcontext.Background(), digestSHA256GzippedEmptyTar) + if err != nil { + t.Fatal("gzipped empty tar was not put in the blob store") + } + + manifest := signed.(*SignedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 1 { + t.Fatal("SchemaVersion != 1") + } + if manifest.Name != "testrepo" { + t.Fatal("incorrect name in manifest") + } + if manifest.Tag != "testtag" { + t.Fatal("incorrect tag in manifest") + } + if manifest.Architecture != "amd64" { + t.Fatal("incorrect arch in manifest") + } + + expectedFSLayers := []FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if len(manifest.FSLayers) != len(expectedFSLayers) { + t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers)) + } + if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) { + t.Fatal("wrong FSLayers list") + } + + expectedV1Compatibility := []string{ + `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`, + `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker \u003calyspdev@example.com\u003e"}`, + `{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`, + `{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`, + `{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`, + `{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`, + } + + if len(manifest.History) != len(expectedV1Compatibility) { + t.Fatalf("wrong number of history entries: %d", len(manifest.History)) + } + for i := range expectedV1Compatibility { + if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] { + t.Errorf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility) + } + } +} diff --git a/manifest/schema1/manifest.go b/manifest/schema1/manifest.go new file mode 100644 index 00000000..65042a75 --- /dev/null +++ b/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/manifest/schema1/manifest_test.go b/manifest/schema1/manifest_test.go new file mode 100644 index 00000000..05bb8ec5 --- /dev/null +++ b/manifest/schema1/manifest_test.go @@ -0,0 +1,136 @@ +package schema1 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/libtrust" +) + +type testEnv struct { + name, tag string + invalidSigned *SignedManifest + signed *SignedManifest + pk libtrust.PrivateKey +} + +func TestManifestMarshaling(t *testing.T) { + env := genEnv(t) + + // Check that the all field is the same as json.MarshalIndent with these + // parameters. + p, err := json.MarshalIndent(env.signed, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + + if !bytes.Equal(p, env.signed.all) { + t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.all), string(p)) + } +} + +func TestManifestUnmarshaling(t *testing.T) { + env := genEnv(t) + + var signed SignedManifest + if err := json.Unmarshal(env.signed.all, &signed); err != nil { + t.Fatalf("error unmarshaling signed manifest: %v", err) + } + + if !reflect.DeepEqual(&signed, env.signed) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) + } + +} + +func TestManifestVerification(t *testing.T) { + env := genEnv(t) + + publicKeys, err := Verify(env.signed) + if err != nil { + t.Fatalf("error verifying manifest: %v", err) + } + + if len(publicKeys) == 0 { + t.Fatalf("no public keys found in signature") + } + + var found bool + publicKey := env.pk.PublicKey() + // ensure that one of the extracted public keys matches the private key. + for _, candidate := range publicKeys { + if candidate.KeyID() == publicKey.KeyID() { + found = true + break + } + } + + if !found { + t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) + } + + // Check that an invalid manifest fails verification + _, err = Verify(env.invalidSigned) + if err != nil { + t.Fatalf("Invalid manifest should not pass Verify()") + } +} + +func genEnv(t *testing.T) *testEnv { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating test key: %v", err) + } + + name, tag := "foo/bar", "test" + + invalid := Manifest{ + Versioned: SchemaVersion, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + valid := Manifest{ + Versioned: SchemaVersion, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + }, + History: []History{ + { + V1Compatibility: "", + }, + }, + } + + sm, err := Sign(&valid, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + invalidSigned, err := Sign(&invalid, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + return &testEnv{ + name: name, + tag: tag, + invalidSigned: invalidSigned, + signed: sm, + pk: pk, + } +} diff --git a/manifest/schema1/reference_builder.go b/manifest/schema1/reference_builder.go new file mode 100644 index 00000000..a4f6032c --- /dev/null +++ b/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "context" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/manifest/schema1/reference_builder_test.go b/manifest/schema1/reference_builder_test.go new file mode 100644 index 00000000..9eaa666c --- /dev/null +++ b/manifest/schema1/reference_builder_test.go @@ -0,0 +1,108 @@ +package schema1 + +import ( + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest { + u := &Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "foo/bar", + Tag: "latest", + Architecture: "amd64", + } + + for i := len(refs) - 1; i >= 0; i-- { + u.FSLayers = append(u.FSLayers, FSLayer{ + BlobSum: refs[i].Digest, + }) + u.History = append(u.History, History{ + V1Compatibility: refs[i].History.V1Compatibility, + }) + } + + signedManifest, err := Sign(u, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + return signedManifest +} + +func TestReferenceBuilder(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + r1 := Reference{ + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 1, + History: History{V1Compatibility: "{\"a\" : 1 }"}, + } + r2 := Reference{ + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 2, + History: History{V1Compatibility: "{\"\a\" : 2 }"}, + } + + handCrafted := makeSignedManifest(t, pk, []Reference{r1, r2}) + + ref, err := reference.WithName(handCrafted.Manifest.Name) + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + ref, err = reference.WithTag(ref, handCrafted.Manifest.Tag) + if err != nil { + t.Fatalf("could not add tag: %v", err) + } + + b := NewReferenceManifestBuilder(pk, ref, handCrafted.Manifest.Architecture) + _, err = b.Build(context.Background()) + if err == nil { + t.Fatal("Expected error building zero length manifest") + } + + err = b.AppendReference(r1) + if err != nil { + t.Fatal(err) + } + + err = b.AppendReference(r2) + if err != nil { + t.Fatal(err) + } + + refs := b.References() + if len(refs) != 2 { + t.Fatalf("Unexpected reference count : %d != %d", 2, len(refs)) + } + + // Ensure ordering + if refs[0].Digest != r2.Digest { + t.Fatalf("Unexpected reference : %v", refs[0]) + } + + m, err := b.Build(context.Background()) + if err != nil { + t.Fatal(err) + } + + built, ok := m.(*SignedManifest) + if !ok { + t.Fatalf("unexpected type from Build() : %T", built) + } + + d1 := digest.FromBytes(built.Canonical) + d2 := digest.FromBytes(handCrafted.Canonical) + if d1 != d2 { + t.Errorf("mismatching canonical JSON") + } +} diff --git a/manifest/schema1/sign.go b/manifest/schema1/sign.go new file mode 100644 index 00000000..c862dd81 --- /dev/null +++ b/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/manifest/schema1/verify.go b/manifest/schema1/verify.go new file mode 100644 index 00000000..ef59065c --- /dev/null +++ b/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/docker/libtrust" + "github.com/sirupsen/logrus" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/manifest/schema2/builder.go b/manifest/schema2/builder.go new file mode 100644 index 00000000..3facaae6 --- /dev/null +++ b/manifest/schema2/builder.go @@ -0,0 +1,85 @@ +package schema2 + +import ( + "context" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configMediaType is media type used to describe configuration + configMediaType string + + // configJSON references + configJSON []byte + + // dependencies is a list of descriptors that gets built by successive + // calls to AppendReference. In case of image configuration these are layers. + dependencies []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configMediaType: configMediaType, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.dependencies)), + } + copy(m.Layers, mb.dependencies) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = mb.configMediaType + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = mb.configMediaType + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.dependencies = append(mb.dependencies, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.dependencies +} diff --git a/manifest/schema2/builder_test.go b/manifest/schema2/builder_test.go new file mode 100644 index 00000000..cde73242 --- /dev/null +++ b/manifest/schema2/builder_test.go @@ -0,0 +1,210 @@ +package schema2 + +import ( + "context" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: "application/octet-stream", + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestBuilder(t *testing.T) { + imgJSON := []byte(`{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}`) + configDigest := digest.FromBytes(imgJSON) + + descriptors := []distribution.Descriptor{ + { + Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 5312, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + Size: 235231, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 639152, + MediaType: MediaTypeLayer, + }, + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + builder := NewManifestBuilder(bs, MediaTypeImageConfig, imgJSON) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + built, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the config was put in the blob store + _, err = bs.Stat(context.Background(), configDigest) + if err != nil { + t.Fatal("config was not put in the blob store") + } + + manifest := built.(*DeserializedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 2 { + t.Fatal("SchemaVersion != 2") + } + + target := manifest.Target() + if target.Digest != configDigest { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeImageConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 3153 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := manifest.References() + expected := append([]distribution.Descriptor{manifest.Target()}, descriptors...) + if !reflect.DeepEqual(references, expected) { + t.Fatal("References() does not match the descriptors added") + } +} diff --git a/manifest/schema2/manifest.go b/manifest/schema2/manifest.go new file mode 100644 index 00000000..a2708c75 --- /dev/null +++ b/manifest/schema2/manifest.go @@ -0,0 +1,138 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeImageConfig specifies the mediaType for the image configuration. + MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + + // MediaTypeUncompressedLayer is the mediaType used for layers which + // are not compressed. + MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/manifest/schema2/manifest_test.go b/manifest/schema2/manifest_test.go new file mode 100644 index 00000000..86226606 --- /dev/null +++ b/manifest/schema2/manifest_test.go @@ -0,0 +1,111 @@ +package schema2 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 153263, + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" + } + ] +}`) + +func TestManifest(t *testing.T) { + manifest := Manifest{ + Versioned: SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: MediaTypeImageConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + MediaType: MediaTypeLayer, + }, + }, + } + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifest { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&manifest, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that canonical field matches expected value. + if !bytes.Equal(expectedManifestSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization)) + } + + var unmarshalled DeserializedManifest + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + target := deserialized.Target() + if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeImageConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 985 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + + if !reflect.DeepEqual(references[0], target) { + t.Fatalf("first reference should be target: %v != %v", references[0], target) + } + + // Test the second reference + if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) + } + if references[1].MediaType != MediaTypeLayer { + t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) + } + if references[1].Size != 153263 { + t.Fatalf("unexpected size in reference: %d", references[0].Size) + } +} diff --git a/manifest/versioned.go b/manifest/versioned.go new file mode 100644 index 00000000..caa6b14e --- /dev/null +++ b/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/manifests.go b/manifests.go new file mode 100644 index 00000000..1816baea --- /dev/null +++ b/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/metrics/prometheus.go b/metrics/prometheus.go new file mode 100644 index 00000000..b5a53214 --- /dev/null +++ b/metrics/prometheus.go @@ -0,0 +1,13 @@ +package metrics + +import "github.com/docker/go-metrics" + +const ( + // NamespacePrefix is the namespace of prometheus metrics + NamespacePrefix = "registry" +) + +var ( + // StorageNamespace is the prometheus namespace of blob/cache related operations + StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) +) diff --git a/notifications/bridge.go b/notifications/bridge.go new file mode 100644 index 00000000..8f6386d3 --- /dev/null +++ b/notifications/bridge.go @@ -0,0 +1,214 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/uuid" + "github.com/opencontainers/go-digest" +) + +type bridge struct { + ub URLBuilder + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name reference.Named) (string, error) + BuildBlobURL(ref reference.Canonical) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { + return &bridge{ + ub: ub, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: context.RemoteAddr(r), + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPush, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPull, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createManifestDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) +} + +func (b *bridge) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) +} + +func (b *bridge) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + event, err := b.createBlobEvent(EventActionMount, repo, desc) + if err != nil { + return err + } + event.Target.FromRepository = fromRepo.Name() + return b.sink.Write(*event) +} + +func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { + manifestEvent, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + event.Target.Digest = dgst + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + + mt, p, err := sm.Payload() + if err != nil { + return nil, err + } + + // Ensure we have the canonical manifest descriptor here + _, desc, err := distribution.UnmarshalManifest(mt, p) + if err != nil { + return nil, err + } + + event.Target.MediaType = mt + event.Target.Length = desc.Size + event.Target.Size = desc.Size + event.Target.Digest = desc.Digest + + ref, err := reference.WithDigest(repo, event.Target.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createBlobDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Digest = dgst + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEvent(action string, repo reference.Named, desc distribution.Descriptor) (*Event, error) { + event := b.createEvent(action) + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo.Name() + + ref, err := reference.WithDigest(repo, desc.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.Generate().String(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/notifications/bridge_test.go b/notifications/bridge_test.go new file mode 100644 index 00000000..86350993 --- /dev/null +++ b/notifications/bridge_test.go @@ -0,0 +1,222 @@ +package notifications + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +var ( + // common environment for expected manifest events. + + repo = "test/repo" + source = SourceRecord{ + Addr: "remote.test", + InstanceID: uuid.Generate().String(), + } + ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/", false)) + + actor = ActorRecord{ + Name: "test", + } + request = RequestRecord{} + m = schema1.Manifest{ + Name: repo, + Tag: "latest", + } + + sm *schema1.SignedManifest + payload []byte + dgst digest.Digest +) + +func TestEventBridgeManifestPulled(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.ManifestPulled(repoRef, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushed(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.ManifestPushed(repoRef, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushedWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.ManifestPushed(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPulledWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.ManifestPulled(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkDeleted(t, EventActionDelete, events...) + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.ManifestDeleted(repoRef, dgst); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func createTestEnv(t *testing.T, fn testSinkFn) Listener { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating private key: %v", err) + } + + sm, err = schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + payload = sm.Canonical + dgst = digest.FromBytes(payload) + + return NewBridge(ub, source, actor, request, fn) +} + +func checkDeleted(t *testing.T, action string, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +func checkCommonManifest(t *testing.T, action string, events ...Event) { + checkCommon(t, events...) + + event := events[0] + if event.Action != action { + t.Fatalf("unexpected event action: %q != %q", event.Action, action) + } + + repoRef, _ := reference.WithName(repo) + ref, _ := reference.WithDigest(repoRef, dgst) + u, err := ub.BuildManifestURL(ref) + if err != nil { + t.Fatalf("error building expected url: %v", err) + } + + if event.Target.URL != u { + t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) + } +} + +func checkCommon(t *testing.T, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Length != int64(len(payload)) { + t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +type testSinkFn func(events ...Event) error + +func (tsf testSinkFn) Write(events ...Event) error { + return tsf(events...) +} + +func (tsf testSinkFn) Close() error { return nil } + +func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { + if err != nil { + panic(err) + } + + return ub +} diff --git a/notifications/endpoint.go b/notifications/endpoint.go new file mode 100644 index 00000000..285995ec --- /dev/null +++ b/notifications/endpoint.go @@ -0,0 +1,96 @@ +package notifications + +import ( + "github.com/docker/distribution/configuration" + "net/http" + "time" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration + IgnoredMediaTypes []string + Transport *http.Transport `json:"-"` + Ignore configuration.Ignore +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } + + if ec.Transport == nil { + ec.Transport = http.DefaultTransport.(*http.Transport) + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.Transport, endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + mediaTypes := append(config.Ignore.MediaTypes, config.IgnoredMediaTypes...) + endpoint.Sink = newIgnoredSink(endpoint.Sink, mediaTypes, config.Ignore.Actions) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/notifications/event.go b/notifications/event.go new file mode 100644 index 00000000..9651cd1b --- /dev/null +++ b/notifications/event.go @@ -0,0 +1,160 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionMount = "mount" + EventActionDelete = "delete" +) + +const ( + // EventsMediaType is the mediatype for the json event envelope. If the + // Event, ActorRecord, SourceRecord or Envelope structs change, the version + // number should be incremented. + EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + // LayerMediaType is the media type for image rootfs diffs (aka "layers") + // used by Docker. We don't expect this to change for quite a while. + layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // TODO(stevvooe): Use http.DetectContentType for layers, maybe. + + distribution.Descriptor + + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + + // Repository identifies the named repository. + Repository string `json:"repository,omitempty"` + + // FromRepository identifies the named repository which a blob was mounted + // from if appropriate. + FromRepository string `json:"fromRepository,omitempty"` + + // URL provides a direct link to the content. + URL string `json:"url,omitempty"` + + // Tag provides the tag + Tag string `json:"tag,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorization context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorization context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/notifications/event_test.go b/notifications/event_test.go new file mode 100644 index 00000000..0981a7ad --- /dev/null +++ b/notifications/event_test.go @@ -0,0 +1,157 @@ +package notifications + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/distribution/manifest/schema1" +) + +// TestEventJSONFormat provides silly test to detect if the event format or +// envelope has changed. If this code fails, the revision of the protocol may +// need to be incremented. +func TestEventEnvelopeJSONFormat(t *testing.T) { + var expected = strings.TrimSpace(` +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+prettyjws", + "size": 1, + "digest": "sha256:0123456789abcdef0", + "length": 1, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 2, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "length": 2, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 3, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", + "length": 3, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} + `) + + tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) + if err != nil { + t.Fatalf("error creating time: %v", err) + } + + var prototype Event + prototype.Action = EventActionPush + prototype.Timestamp = tm + prototype.Actor.Name = "test-actor" + prototype.Request.ID = "asdfasdf" + prototype.Request.Addr = "client.local" + prototype.Request.Host = "registrycluster.local" + prototype.Request.Method = "PUT" + prototype.Request.UserAgent = "test/0.1" + prototype.Source.Addr = "hostname.local:port" + + var manifestPush Event + manifestPush = prototype + manifestPush.ID = "asdf-asdf-asdf-asdf-0" + manifestPush.Target.Digest = "sha256:0123456789abcdef0" + manifestPush.Target.Length = 1 + manifestPush.Target.Size = 1 + manifestPush.Target.MediaType = schema1.MediaTypeSignedManifest + manifestPush.Target.Repository = "library/test" + manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush0 Event + layerPush0 = prototype + layerPush0.ID = "asdf-asdf-asdf-asdf-1" + layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + layerPush0.Target.Length = 2 + layerPush0.Target.Size = 2 + layerPush0.Target.MediaType = layerMediaType + layerPush0.Target.Repository = "library/test" + layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush1 Event + layerPush1 = prototype + layerPush1.ID = "asdf-asdf-asdf-asdf-2" + layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" + layerPush1.Target.Length = 3 + layerPush1.Target.Size = 3 + layerPush1.Target.MediaType = layerMediaType + layerPush1.Target.Repository = "library/test" + layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var envelope Envelope + envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling envelope: %v", err) + } + if string(p) != expected { + t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) + } +} diff --git a/notifications/http.go b/notifications/http.go new file mode 100644 index 00000000..15751619 --- /dev/null +++ b/notifications/http.go @@ -0,0 +1,150 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, transport *http.Transport, listeners ...httpStatusListener) *httpSink { + if transport == nil { + transport = http.DefaultTransport.(*http.Transport) + } + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: transport, + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + defer resp.Body.Close() + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/notifications/http_test.go b/notifications/http_test.go new file mode 100644 index 00000000..de47f789 --- /dev/null +++ b/notifications/http_test.go @@ -0,0 +1,201 @@ +package notifications + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "mime" + "net" + "net/http" + "net/http/httptest" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/manifest/schema1" +) + +// TestHTTPSink mocks out an http endpoint and notifies it under a couple of +// conditions, ensuring correct behavior. +func TestHTTPSink(t *testing.T) { + serverHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + t.Fatalf("unexpected request method: %v", r.Method) + return + } + + // Extract the content type and make sure it matches + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) + return + } + + if mediaType != EventsMediaType { + w.WriteHeader(http.StatusUnsupportedMediaType) + t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) + return + } + + var envelope Envelope + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&envelope); err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error decoding request body: %v", err) + return + } + + // Let caller choose the status + status, err := strconv.Atoi(r.FormValue("status")) + if err != nil { + t.Logf("error parsing status: %v", err) + + // May just be empty, set status to 200 + status = http.StatusOK + } + + w.WriteHeader(status) + }) + server := httptest.NewTLSServer(serverHandler) + + metrics := newSafeMetrics() + sink := newHTTPSink(server.URL, 0, nil, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + + // first make sure that the default transport gives x509 untrusted cert error + events := []Event{} + err := sink.Write(events...) + if !strings.Contains(err.Error(), "x509") { + t.Fatal("TLS server with default transport should give unknown CA error") + } + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // make sure that passing in the transport no longer gives this error + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + sink = newHTTPSink(server.URL, 0, nil, tr, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + err = sink.Write(events...) + if err != nil { + t.Fatalf("unexpected error writing events: %v", err) + } + + // reset server to standard http server and sink to a basic sink + server = httptest.NewServer(serverHandler) + sink = newHTTPSink(server.URL, 0, nil, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + var expectedMetrics EndpointMetrics + expectedMetrics.Statuses = make(map[string]int) + + closeL, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("unexpected error creating listener: %v", err) + } + defer closeL.Close() + go func() { + for { + c, err := closeL.Accept() + if err != nil { + return + } + c.Close() + } + }() + + for _, tc := range []struct { + events []Event // events to send + url string + failure bool // true if there should be a failure. + statusCode int // if not set, no status code should be incremented. + }{ + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest)}, + }, + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest), + createTestEvent("push", "library/test", layerMediaType), + createTestEvent("push", "library/test", layerMediaType), + }, + }, + { + statusCode: http.StatusTemporaryRedirect, + }, + { + statusCode: http.StatusBadRequest, + failure: true, + }, + { + // Case where connection is immediately closed + url: closeL.Addr().String(), + failure: true, + }, + } { + + if tc.failure { + expectedMetrics.Failures += len(tc.events) + } else { + expectedMetrics.Successes += len(tc.events) + } + + if tc.statusCode > 0 { + expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) + } + + url := tc.url + if url == "" { + url = server.URL + "/" + } + // setup endpoint to respond with expected status code. + url += fmt.Sprintf("?status=%v", tc.statusCode) + sink.url = url + + t.Logf("testcase: %v, fail=%v", url, tc.failure) + // Try a simple event emission. + err := sink.Write(tc.events...) + + if !tc.failure { + if err != nil { + t.Fatalf("unexpected error send event: %v", err) + } + } else { + if err == nil { + t.Fatalf("the endpoint should have rejected the request") + } + } + + if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { + t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) + } + } + + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // double close returns error + if err := sink.Close(); err == nil { + t.Fatalf("second close should have returned error: %v", err) + } + +} + +func createTestEvent(action, repo, typ string) Event { + event := createEvent(action) + + event.Target.MediaType = typ + event.Target.Repository = repo + + return *event +} diff --git a/notifications/listener.go b/notifications/listener.go new file mode 100644 index 00000000..52ec0ee7 --- /dev/null +++ b/notifications/listener.go @@ -0,0 +1,216 @@ +package notifications + +import ( + "context" + "net/http" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestDeleted(repo reference.Named, dgst digest.Digest) error +} + +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo reference.Named, desc distribution.Descriptor) error + BlobPulled(repo reference.Named, desc distribution.Descriptor) error + BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error + BlobDeleted(repo reference.Named, desc digest.Digest) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + BlobListener +} + +type repositoryListener struct { + distribution.Repository + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo distribution.Repository, listener Listener) distribution.Repository { + return &repositoryListener{ + Repository: repo, + listener: listener, + } +} + +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return &manifestServiceListener{ + ManifestService: manifests, + parent: rl, + }, nil +} + +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, + } +} + +type manifestServiceListener struct { + distribution.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := msl.ManifestService.Delete(ctx, dgst) + if err == nil { + if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) + } + } + + return err +} + +func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm, err := msl.ManifestService.Get(ctx, dgst, options...) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := msl.ManifestService.Put(ctx, sm, options...) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return dgst, err +} + +type blobServiceListener struct { + distribution.BlobStore + parent *repositoryListener +} + +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) + if err == nil { + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) + } + } + + return desc, err +} + +func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx, options...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + } + return nil, err + } + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := bsl.BlobStore.Delete(ctx, dgst) + if err == nil { + if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) + } + } + + return err +} + +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, + } +} + +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener +} + +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) + if err == nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + } + } + + return committed, err +} diff --git a/notifications/listener_test.go b/notifications/listener_test.go new file mode 100644 index 00000000..a5849807 --- /dev/null +++ b/notifications/listener_test.go @@ -0,0 +1,205 @@ +package notifications + +import ( + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +func TestListener(t *testing.T) { + ctx := context.Background() + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect, storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + tl := &testListener{ + ops: make(map[string]int), + } + + repoRef, _ := reference.WithName("foo/bar") + repository, err := registry.Repository(ctx, repoRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + repository = Listen(repository, tl) + + // Now take the registry through a number of operations + checkExerciseRepository(t, repository) + + expectedOps := map[string]int{ + "manifest:push": 1, + "manifest:pull": 1, + "manifest:delete": 1, + "layer:push": 2, + "layer:pull": 2, + "layer:delete": 2, + } + + if !reflect.DeepEqual(tl.ops, expectedOps) { + t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) + } + +} + +type testListener struct { + ops map[string]int +} + +func (tl *testListener) ManifestPushed(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { + tl.ops["manifest:push"]++ + + return nil +} + +func (tl *testListener) ManifestPulled(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { + tl.ops["manifest:pull"]++ + return nil +} + +func (tl *testListener) ManifestDeleted(repo reference.Named, d digest.Digest) error { + tl.ops["manifest:delete"]++ + return nil +} + +func (tl *testListener) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + tl.ops["layer:push"]++ + return nil +} + +func (tl *testListener) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + tl.ops["layer:pull"]++ + return nil +} + +func (tl *testListener) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + tl.ops["layer:mount"]++ + return nil +} + +func (tl *testListener) BlobDeleted(repo reference.Named, d digest.Digest) error { + tl.ops["layer:delete"]++ + return nil +} + +// checkExerciseRegistry takes the registry through all of its operations, +// carrying out generic checks. +func checkExerciseRepository(t *testing.T, repository distribution.Repository) { + // TODO(stevvooe): This would be a nice testutil function. Basically, it + // takes the registry through a common set of operations. This could be + // used to make cross-cutting updates by changing internals that affect + // update counts. Basically, it would make writing tests a lot easier. + + ctx := context.Background() + tag := "thetag" + // todo: change this to use Builder + + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: repository.Named().Name(), + Tag: tag, + } + + var blobDigests []digest.Digest + blobs := repository.Blobs(ctx) + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating test layer: %v", err) + } + dgst := digest.Digest(ds) + blobDigests = append(blobDigests, dgst) + + wr, err := blobs.Create(ctx) + if err != nil { + t.Fatalf("error creating layer upload: %v", err) + } + + // Use the resumes, as well! + wr, err = blobs.Resume(ctx, wr.ID()) + if err != nil { + t.Fatalf("error resuming layer upload: %v", err) + } + + io.Copy(wr, rs) + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ + BlobSum: dgst, + }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + + // Then fetch the blobs + if rc, err := blobs.Open(ctx, dgst); err != nil { + t.Fatalf("error fetching layer: %v", err) + } else { + defer rc.Close() + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating key: %v", err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + manifests, err := repository.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + var digestPut digest.Digest + if digestPut, err = manifests.Put(ctx, sm); err != nil { + t.Fatalf("unexpected error putting the manifest: %v", err) + } + + dgst := digest.FromBytes(sm.Canonical) + if dgst != digestPut { + t.Fatalf("mismatching digest from payload and put") + } + + _, err = manifests.Get(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + err = manifests.Delete(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + + for _, d := range blobDigests { + err = blobs.Delete(ctx, d) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + + } +} diff --git a/notifications/metrics.go b/notifications/metrics.go new file mode 100644 index 00000000..a20af168 --- /dev/null +++ b/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevant counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevant events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/notifications/metrics_test.go b/notifications/metrics_test.go new file mode 100644 index 00000000..03a08e2c --- /dev/null +++ b/notifications/metrics_test.go @@ -0,0 +1,28 @@ +package notifications + +import ( + "encoding/json" + "expvar" + "testing" +) + +func TestMetricsExpvar(t *testing.T) { + endpointsVar := expvar.Get("registry").(*expvar.Map).Get("notifications").(*expvar.Map).Get("endpoints") + + var v interface{} + if err := json.Unmarshal([]byte(endpointsVar.String()), &v); err != nil { + t.Fatalf("unexpected error unmarshaling endpoints: %v", err) + } + if v != nil { + t.Fatalf("expected nil, got %#v", v) + } + + NewEndpoint("x", "y", EndpointConfig{}) + + if err := json.Unmarshal([]byte(endpointsVar.String()), &v); err != nil { + t.Fatalf("unexpected error unmarshaling endpoints: %v", err) + } + if slice, ok := v.([]interface{}); !ok || len(slice) != 1 { + t.Logf("expected one-element []interface{}, got %#v", v) + } +} diff --git a/notifications/sinks.go b/notifications/sinks.go new file mode 100644 index 00000000..14b692a3 --- /dev/null +++ b/notifications/sinks.go @@ -0,0 +1,392 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shuts down the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// ignoredSink discards events with ignored target media types and actions. +// passes the rest along. +type ignoredSink struct { + Sink + ignoreMediaTypes map[string]bool + ignoreActions map[string]bool +} + +func newIgnoredSink(sink Sink, ignored []string, ignoreActions []string) Sink { + if len(ignored) == 0 { + return sink + } + + ignoredMap := make(map[string]bool) + for _, mediaType := range ignored { + ignoredMap[mediaType] = true + } + + ignoredActionsMap := make(map[string]bool) + for _, action := range ignoreActions { + ignoredActionsMap[action] = true + } + + return &ignoredSink{ + Sink: sink, + ignoreMediaTypes: ignoredMap, + ignoreActions: ignoredActionsMap, + } +} + +// Write discards events with ignored target media types and passes the rest +// along. +func (imts *ignoredSink) Write(events ...Event) error { + var kept []Event + for _, e := range events { + if !imts.ignoreMediaTypes[e.Target.MediaType] { + kept = append(kept, e) + } + } + if len(kept) == 0 { + return nil + } + + var results []Event + for _, e := range kept { + if !imts.ignoreActions[e.Action] { + results = append(results, e) + } + } + if len(results) == 0 { + return nil + } + return imts.Sink.Write(results...) +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker heuristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a successful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// heuristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/notifications/sinks_test.go b/notifications/sinks_test.go new file mode 100644 index 00000000..9613fe8a --- /dev/null +++ b/notifications/sinks_test.go @@ -0,0 +1,260 @@ +package notifications + +import ( + "fmt" + "math/rand" + "reflect" + "sync" + "time" + + "github.com/sirupsen/logrus" + + "testing" +) + +func TestBroadcaster(t *testing.T) { + const nEvents = 1000 + var sinks []Sink + + for i := 0; i < 10; i++ { + sinks = append(sinks, &testSink{}) + } + + b := NewBroadcaster(sinks...) + + var block []Event + var wg sync.WaitGroup + for i := 1; i <= nEvents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := b.Write(block...); err != nil { + t.Fatalf("error writing block of length %d: %v", len(block), err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() // Wait until writes complete + checkClose(t, b) + + // Iterate through the sinks and check that they all have the expected length. + for _, sink := range sinks { + ts := sink.(*testSink) + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != nEvents { + t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + } + +} + +func TestEventQueue(t *testing.T) { + const nevents = 1000 + var ts testSink + metrics := newSafeMetrics() + eq := newEventQueue( + // delayed sync simulates destination slower than channel comms + &delayedSink{ + Sink: &ts, + delay: time.Millisecond * 1, + }, metrics.eventQueueListener()) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= nevents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := eq.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, eq) + + ts.mu.Lock() + defer ts.mu.Unlock() + metrics.Lock() + defer metrics.Unlock() + + if len(ts.events) != nevents { + t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + + if metrics.Events != nevents { + t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) + } + + if metrics.Pending != 0 { + t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) + } +} + +func TestIgnoredSink(t *testing.T) { + blob := createTestEvent("push", "library/test", "blob") + manifest := createTestEvent("pull", "library/test", "manifest") + + type testcase struct { + ignoreMediaTypes []string + ignoreActions []string + expected []Event + } + + cases := []testcase{ + {nil, nil, []Event{blob, manifest}}, + {[]string{"other"}, []string{"other"}, []Event{blob, manifest}}, + {[]string{"blob"}, []string{"other"}, []Event{manifest}}, + {[]string{"blob", "manifest"}, []string{"other"}, nil}, + {[]string{"other"}, []string{"push"}, []Event{manifest}}, + {[]string{"other"}, []string{"pull"}, []Event{blob}}, + {[]string{"other"}, []string{"pull", "push"}, nil}, + } + + for _, c := range cases { + ts := &testSink{} + s := newIgnoredSink(ts, c.ignoreMediaTypes, c.ignoreActions) + + if err := s.Write(blob, manifest); err != nil { + t.Fatalf("error writing event: %v", err) + } + + ts.mu.Lock() + if !reflect.DeepEqual(ts.events, c.expected) { + t.Fatalf("unexpected events: %#v != %#v", ts.events, c.expected) + } + ts.mu.Unlock() + } +} + +func TestRetryingSink(t *testing.T) { + + // Make a sync that fails most of the time, ensuring that all the events + // make it through. + var ts testSink + flaky := &flakySink{ + rate: 1.0, // start out always failing. + Sink: &ts, + } + s := newRetryingSink(flaky, 3, 10*time.Millisecond) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= 100; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + // Above 50, set the failure rate lower + if i > 50 { + s.mu.Lock() + flaky.rate = 0.90 + s.mu.Unlock() + } + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + defer wg.Done() + if err := s.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, s) + + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != 100 { + t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) + } +} + +type testSink struct { + events []Event + mu sync.Mutex + closed bool +} + +func (ts *testSink) Write(events ...Event) error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.events = append(ts.events, events...) + return nil +} + +func (ts *testSink) Close() error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.closed = true + + logrus.Infof("closing testSink") + return nil +} + +type delayedSink struct { + Sink + delay time.Duration +} + +func (ds *delayedSink) Write(events ...Event) error { + time.Sleep(ds.delay) + return ds.Sink.Write(events...) +} + +type flakySink struct { + Sink + rate float64 +} + +func (fs *flakySink) Write(events ...Event) error { + if rand.Float64() < fs.rate { + return fmt.Errorf("error writing %d events", len(events)) + } + + return fs.Sink.Write(events...) +} + +func checkClose(t *testing.T, sink Sink) { + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing: %v", err) + } + + // second close should not crash but should return an error. + if err := sink.Close(); err == nil { + t.Fatalf("no error on double close") + } + + // Write after closed should be an error + if err := sink.Write([]Event{}...); err == nil { + t.Fatalf("write after closed did not have an error") + } else if err != ErrSinkClosed { + t.Fatalf("error should be ErrSinkClosed") + } +} diff --git a/project/dev-image/Dockerfile b/project/dev-image/Dockerfile new file mode 100644 index 00000000..1e2a8471 --- /dev/null +++ b/project/dev-image/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:14.04 + +ENV GOLANG_VERSION 1.4rc1 +ENV GOPATH /var/cache/drone +ENV GOROOT /usr/local/go +ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin + +ENV LANG C +ENV LC_ALL C + +RUN apt-get update && apt-get install -y \ + wget ca-certificates git mercurial bzr \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ + tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ + rm go${GOLANG_VERSION}.linux-amd64.tar.gz + +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/project/hooks/README.md b/project/hooks/README.md new file mode 100644 index 00000000..eda88696 --- /dev/null +++ b/project/hooks/README.md @@ -0,0 +1,6 @@ +Git Hooks +========= + +To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. + +As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/project/hooks/configure-hooks.sh b/project/hooks/configure-hooks.sh new file mode 100755 index 00000000..6afea8a1 --- /dev/null +++ b/project/hooks/configure-hooks.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +cd $(dirname $0) + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +set -e +set -x + +# Just in case the directory doesn't exist +mkdir -p $REPO_ROOT/.git/hooks + +ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/project/hooks/pre-commit b/project/hooks/pre-commit new file mode 100755 index 00000000..3ee2e913 --- /dev/null +++ b/project/hooks/pre-commit @@ -0,0 +1,29 @@ +#!/bin/sh + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +cd $REPO_ROOT + +GOFMT_ERRORS=$(gofmt -s -l . 2>&1) +if [ -n "$GOFMT_ERRORS" ]; then + printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr + exit 1 +fi + +GOLINT_ERRORS=$(golint ./... 2>&1) +if [ -n "$GOLINT_ERRORS" ]; then + printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr + exit 1 +fi + +GOVET_ERRORS=$(go vet ./... 2>&1) +GOVET_STATUS=$? +if [ "$GOVET_STATUS" -ne "0" ]; then + printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr + exit $GOVET_STATUS +fi diff --git a/reference/helpers.go b/reference/helpers.go new file mode 100644 index 00000000..978df7ea --- /dev/null +++ b/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/reference/normalize.go b/reference/normalize.go new file mode 100644 index 00000000..2d71fc5e --- /dev/null +++ b/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/reference/normalize_test.go b/reference/normalize_test.go new file mode 100644 index 00000000..a881972a --- /dev/null +++ b/reference/normalize_test.go @@ -0,0 +1,625 @@ +package reference + +import ( + "strconv" + "testing" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +func TestValidateReferenceName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // This test case was moved from invalid to valid since it is valid input + // when specified with a hostname, it removes the ambiguity from about + // whether the value is an identifier or repository name + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + _, err := ParseNormalizedNamed(name) + if err == nil { + t.Fatalf("Expected invalid repo name for %q", name) + } + } + + for _, name := range validRepoNames { + _, err := ParseNormalizedNamed(name) + if err != nil { + t.Fatalf("Error parsing repo name %s, got: %q", name, err) + } + } +} + +func TestValidateRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + // Allow multiple hyphens as well. + "docker---rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + + // Consecutive underscores. + "dock__er/docker", + } + for _, repositoryName := range validRepositoryNames { + _, err := ParseNormalizedNamed(repositoryName) + if err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive periods. + "dock..er/docker", + "dock_.er/docker", + "dock-.er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if _, err := ParseNormalizedNamed(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type tcase struct { + RemoteName, FamiliarName, FullName, AmbiguousName, Domain string + } + + tcases := []tcase{ + { + RemoteName: "fooo/bar", + FamiliarName: "fooo/bar", + FullName: "docker.io/fooo/bar", + AmbiguousName: "index.docker.io/fooo/bar", + Domain: "docker.io", + }, + { + RemoteName: "library/ubuntu", + FamiliarName: "ubuntu", + FullName: "docker.io/library/ubuntu", + AmbiguousName: "library/ubuntu", + Domain: "docker.io", + }, + { + RemoteName: "nonlibrary/ubuntu", + FamiliarName: "nonlibrary/ubuntu", + FullName: "docker.io/nonlibrary/ubuntu", + AmbiguousName: "", + Domain: "docker.io", + }, + { + RemoteName: "other/library", + FamiliarName: "other/library", + FullName: "docker.io/other/library", + AmbiguousName: "", + Domain: "docker.io", + }, + { + RemoteName: "private/moonbase", + FamiliarName: "127.0.0.1:8000/private/moonbase", + FullName: "127.0.0.1:8000/private/moonbase", + AmbiguousName: "", + Domain: "127.0.0.1:8000", + }, + { + RemoteName: "privatebase", + FamiliarName: "127.0.0.1:8000/privatebase", + FullName: "127.0.0.1:8000/privatebase", + AmbiguousName: "", + Domain: "127.0.0.1:8000", + }, + { + RemoteName: "private/moonbase", + FamiliarName: "example.com/private/moonbase", + FullName: "example.com/private/moonbase", + AmbiguousName: "", + Domain: "example.com", + }, + { + RemoteName: "privatebase", + FamiliarName: "example.com/privatebase", + FullName: "example.com/privatebase", + AmbiguousName: "", + Domain: "example.com", + }, + { + RemoteName: "private/moonbase", + FamiliarName: "example.com:8000/private/moonbase", + FullName: "example.com:8000/private/moonbase", + AmbiguousName: "", + Domain: "example.com:8000", + }, + { + RemoteName: "privatebasee", + FamiliarName: "example.com:8000/privatebasee", + FullName: "example.com:8000/privatebasee", + AmbiguousName: "", + Domain: "example.com:8000", + }, + { + RemoteName: "library/ubuntu-12.04-base", + FamiliarName: "ubuntu-12.04-base", + FullName: "docker.io/library/ubuntu-12.04-base", + AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", + Domain: "docker.io", + }, + { + RemoteName: "library/foo", + FamiliarName: "foo", + FullName: "docker.io/library/foo", + AmbiguousName: "docker.io/foo", + Domain: "docker.io", + }, + { + RemoteName: "library/foo/bar", + FamiliarName: "library/foo/bar", + FullName: "docker.io/library/foo/bar", + AmbiguousName: "", + Domain: "docker.io", + }, + { + RemoteName: "store/foo/bar", + FamiliarName: "store/foo/bar", + FullName: "docker.io/store/foo/bar", + AmbiguousName: "", + Domain: "docker.io", + }, + } + + for _, tcase := range tcases { + refStrings := []string{tcase.FamiliarName, tcase.FullName} + if tcase.AmbiguousName != "" { + refStrings = append(refStrings, tcase.AmbiguousName) + } + + var refs []Named + for _, r := range refStrings { + named, err := ParseNormalizedNamed(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + } + + for _, r := range refs { + if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.FullName, r.String(); expected != actual { + t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.Domain, Domain(r); expected != actual { + t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.RemoteName, Path(r); expected != actual { + t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) + } + + } + } +} + +func TestParseReferenceWithTagAndDigest(t *testing.T) { + shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" + ref, err := ParseNormalizedNamed(shortRef) + if err != nil { + t.Fatal(err) + } + if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } + + if _, isTagged := ref.(NamedTagged); !isTagged { + t.Fatalf("Reference from %q should support tag", ref) + } + if _, isCanonical := ref.(Canonical); !isCanonical { + t.Fatalf("Reference from %q should support digest", ref) + } + if expected, actual := shortRef, FamiliarString(ref); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } +} + +func TestInvalidReferenceComponents(t *testing.T) { + if _, err := ParseNormalizedNamed("-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid name") + } + ref, err := ParseNormalizedNamed("busybox") + if err != nil { + t.Fatal(err) + } + if _, err := WithTag(ref, "-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid tag") + } + if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { + t.Fatal("Expected WithDigest to detect invalid digest") + } +} + +func equalReference(r1, r2 Reference) bool { + switch v1 := r1.(type) { + case digestReference: + if v2, ok := r2.(digestReference); ok { + return v1 == v2 + } + case repository: + if v2, ok := r2.(repository); ok { + return v1 == v2 + } + case taggedReference: + if v2, ok := r2.(taggedReference); ok { + return v1 == v2 + } + case canonicalReference: + if v2, ok := r2.(canonicalReference); ok { + return v1 == v2 + } + case reference: + if v2, ok := r2.(reference); ok { + return v1 == v2 + } + } + return false +} + +func TestParseAnyReference(t *testing.T) { + tcases := []struct { + Reference string + Equivalent string + Expected Reference + Digests []digest.Digest + }{ + { + Reference: "redis", + Equivalent: "docker.io/library/redis", + }, + { + Reference: "redis:latest", + Equivalent: "docker.io/library/redis:latest", + }, + { + Reference: "docker.io/library/redis:latest", + Equivalent: "docker.io/library/redis:latest", + }, + { + Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "dmcgowan/myapp", + Equivalent: "docker.io/dmcgowan/myapp", + }, + { + Reference: "dmcgowan/myapp:latest", + Equivalent: "docker.io/dmcgowan/myapp:latest", + }, + { + Reference: "docker.io/mcgowan/myapp:latest", + Equivalent: "docker.io/mcgowan/myapp:latest", + }, + { + Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Digests: []digest.Digest{ + digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Digests: []digest.Digest{ + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1c", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Digests: []digest.Digest{ + digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1", + Equivalent: "docker.io/library/dbcc1", + Digests: []digest.Digest{ + digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1c", + Equivalent: "docker.io/library/dbcc1c", + Digests: []digest.Digest{ + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + } + + for _, tcase := range tcases { + var ref Reference + var err error + if len(tcase.Digests) == 0 { + ref, err = ParseAnyReference(tcase.Reference) + } else { + ds := digestset.NewSet() + for _, dgst := range tcase.Digests { + if err := ds.Add(dgst); err != nil { + t.Fatalf("Error adding digest %s: %v", dgst.String(), err) + } + } + ref, err = ParseAnyReferenceWithSet(tcase.Reference, ds) + } + if err != nil { + t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) + } + if ref.String() != tcase.Equivalent { + t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) + } + + expected := tcase.Expected + if expected == nil { + expected, err = Parse(tcase.Equivalent) + if err != nil { + t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) + } + } + if !equalReference(ref, expected) { + t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) + } + } +} + +func TestNormalizedSplitHostname(t *testing.T) { + testcases := []struct { + input string + domain string + name string + }{ + { + input: "test.com/foo", + domain: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + domain: "docker.io", + name: "test_com/foo", + }, + { + input: "docker/migrator", + domain: "docker.io", + name: "docker/migrator", + }, + { + input: "test.com:8080/foo", + domain: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + domain: "test-com:8080", + name: "foo", + }, + { + input: "foo", + domain: "docker.io", + name: "library/foo", + }, + { + input: "xn--n3h.com/foo", + domain: "xn--n3h.com", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + domain: "xn--n3h.com:18080", + name: "foo", + }, + { + input: "docker.io/foo", + domain: "docker.io", + name: "library/foo", + }, + { + input: "docker.io/library/foo", + domain: "docker.io", + name: "library/foo", + }, + { + input: "docker.io/library/foo/bar", + domain: "docker.io", + name: "library/foo/bar", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := ParseNormalizedNamed(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + domain, name := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +func TestMatchError(t *testing.T) { + named, err := ParseAnyReference("foo") + if err != nil { + t.Fatal(err) + } + _, err = FamiliarMatch("[-x]", named) + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestMatch(t *testing.T) { + matchCases := []struct { + reference string + pattern string + expected bool + }{ + { + reference: "foo", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/any/bat", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/a/bar", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/b/baz", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/*/baz:tag", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/c/baz:tag", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "*/foo/c/baz", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "example.com/foo/c/baz", + expected: true, + }, + } + for _, c := range matchCases { + named, err := ParseAnyReference(c.reference) + if err != nil { + t.Fatal(err) + } + actual, err := FamiliarMatch(c.pattern, named) + if err != nil { + t.Fatal(err) + } + if actual != c.expected { + t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) + } + } +} diff --git a/reference/reference.go b/reference/reference.go new file mode 100644 index 00000000..2f66cca8 --- /dev/null +++ b/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/reference/reference_test.go b/reference/reference_test.go new file mode 100644 index 00000000..16b871f9 --- /dev/null +++ b/reference/reference_test.go @@ -0,0 +1,659 @@ +package reference + +import ( + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/opencontainers/go-digest" +) + +func TestReferenceParse(t *testing.T) { + // referenceTestcases is a unified set of testcases for + // testing the parsing of references + referenceTestcases := []struct { + // input is the repository name or name component testcase + input string + // err is the error expected from Parse, or nil + err error + // repository is the string representation for the reference + repository string + // domain is the domain expected in the reference + domain string + // tag is the tag for the reference + tag string + // digest is the digest for the reference (enforces digest reference) + digest string + }{ + { + input: "test_com", + repository: "test_com", + }, + { + input: "test.com:tag", + repository: "test.com", + tag: "tag", + }, + { + input: "test.com:5000", + repository: "test.com", + tag: "5000", + }, + { + input: "test.com/repo:tag", + domain: "test.com", + repository: "test.com/repo", + tag: "tag", + }, + { + input: "test:5000/repo", + domain: "test:5000", + repository: "test:5000/repo", + }, + { + input: "test:5000/repo:tag", + domain: "test:5000", + repository: "test:5000/repo", + tag: "tag", + }, + { + input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + domain: "test:5000", + repository: "test:5000/repo", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + domain: "test:5000", + repository: "test:5000/repo", + tag: "tag", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo", + domain: "test:5000", + repository: "test:5000/repo", + }, + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "repo@sha256:ffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestInvalidLength, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestUnsupported, + }, + { + input: "Uppercase:tag", + err: ErrNameContainsUppercase, + }, + // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. + // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 + //{ + // input: "Uppercase/lowercase:tag", + // err: ErrNameContainsUppercase, + //}, + { + input: "test:5000/Uppercase/lowercase:tag", + err: ErrNameContainsUppercase, + }, + { + input: "lowercase:Uppercase", + repository: "lowercase", + tag: "Uppercase", + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", + domain: "a", + repository: strings.Repeat("a/", 127) + "a", + tag: "tag-puts-this-over-max", + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + domain: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + }, + { + input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", + domain: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + tag: "some-long-tag", + }, + { + input: "b.gcr.io/test.example.com/my-app:test.example.com", + domain: "b.gcr.io", + repository: "b.gcr.io/test.example.com/my-app", + tag: "test.example.com", + }, + { + input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode + domain: "xn--n3h.com", + repository: "xn--n3h.com/myimage", + tag: "xn--n3h.com", + }, + { + input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode + domain: "xn--7o8h.com", + repository: "xn--7o8h.com/myimage", + tag: "xn--7o8h.com", + digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "foo_bar.com:8080", + repository: "foo_bar.com", + tag: "8080", + }, + { + input: "foo/foo_bar.com:8080", + domain: "foo", + repository: "foo/foo_bar.com", + tag: "8080", + }, + } + for _, testcase := range referenceTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + repo, err := Parse(testcase.input) + if testcase.err != nil { + if err == nil { + failf("missing expected error: %v", testcase.err) + } else if testcase.err != err { + failf("mismatched error: got %v, expected %v", err, testcase.err) + } + continue + } else if err != nil { + failf("unexpected parse error: %v", err) + continue + } + if repo.String() != testcase.input { + failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) + } + + if named, ok := repo.(Named); ok { + if named.Name() != testcase.repository { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) + } + domain, _ := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + } + } else if testcase.repository != "" || testcase.domain != "" { + failf("expected named type, got %T", repo) + } + + tagged, ok := repo.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", repo) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := repo.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", repo) + } + } else if ok { + failf("unexpected digested type") + } + + } +} + +// TestWithNameFailure tests cases where WithName should fail. Cases where it +// should succeed are covered by TestSplitHostname, below. +func TestWithNameFailure(t *testing.T) { + testcases := []struct { + input string + err error + }{ + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + _, err := WithName(testcase.input) + if err == nil { + failf("no error parsing name. expected: %s", testcase.err) + } + } +} + +func TestSplitHostname(t *testing.T) { + testcases := []struct { + input string + domain string + name string + }{ + { + input: "test.com/foo", + domain: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + domain: "", + name: "test_com/foo", + }, + { + input: "test:8080/foo", + domain: "test:8080", + name: "foo", + }, + { + input: "test.com:8080/foo", + domain: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + domain: "test-com:8080", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + domain: "xn--n3h.com:18080", + name: "foo", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + domain, name := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +type serializationType struct { + Description string + Field Field +} + +func TestSerialization(t *testing.T) { + testcases := []struct { + description string + input string + name string + tag string + digest string + err error + }{ + { + description: "empty value", + err: ErrNameEmpty, + }, + { + description: "just a name", + input: "example.com:8000/named", + name: "example.com:8000/named", + }, + { + description: "name with a tag", + input: "example.com:8000/named:tagged", + name: "example.com:8000/named", + tag: "tagged", + }, + { + description: "name with digest", + input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", + name: "other.com/named", + digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + m := map[string]string{ + "Description": testcase.description, + "Field": testcase.input, + } + b, err := json.Marshal(m) + if err != nil { + failf("error marshalling: %v", err) + } + t := serializationType{} + + if err := json.Unmarshal(b, &t); err != nil { + if testcase.err == nil { + failf("error unmarshalling: %v", err) + } + if err != testcase.err { + failf("wrong error, expected %v, got %v", testcase.err, err) + } + + continue + } else if testcase.err != nil { + failf("expected error unmarshalling: %v", testcase.err) + } + + if t.Description != testcase.description { + failf("wrong description, expected %q, got %q", testcase.description, t.Description) + } + + ref := t.Field.Reference() + + if named, ok := ref.(Named); ok { + if named.Name() != testcase.name { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) + } + } else if testcase.name != "" { + failf("expected named type, got %T", ref) + } + + tagged, ok := ref.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", ref) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := ref.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", ref) + } + } else if ok { + failf("unexpected digested type") + } + + t = serializationType{ + Description: testcase.description, + Field: AsField(ref), + } + + b2, err := json.Marshal(t) + if err != nil { + failf("error marshing serialization type: %v", err) + } + + if string(b) != string(b2) { + failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) + } + + // Ensure t.Field is not implementing "Reference" directly, getting + // around the Reference type system + var fieldInterface interface{} = t.Field + if _, ok := fieldInterface.(Reference); ok { + failf("field should not implement Reference interface") + } + + } +} + +func TestWithTag(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + tag: "tag", + combined: "test.com/foo:tag", + }, + { + name: "foo", + tag: "tag2", + combined: "foo:tag2", + }, + { + name: "test.com:8000/foo", + tag: "tag4", + combined: "test.com:8000/foo:tag4", + }, + { + name: "test.com:8000/foo", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.digest != "" { + canonical, err := WithDigest(named, testcase.digest) + if err != nil { + failf("error adding digest") + } + named = canonical + } + + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("WithTag failed: %s", err) + } + if tagged.String() != testcase.combined { + failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) + } + } +} + +func TestWithDigest(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "latest", + combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.tag != "" { + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("error adding tag") + } + named = tagged + } + digested, err := WithDigest(named, testcase.digest) + if err != nil { + failf("WithDigest failed: %s", err) + } + if digested.String() != testcase.combined { + failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) + } + } +} + +func TestParseNamed(t *testing.T) { + testcases := []struct { + input string + domain string + name string + err error + }{ + { + input: "test.com/foo", + domain: "test.com", + name: "foo", + }, + { + input: "test:8080/foo", + domain: "test:8080", + name: "foo", + }, + { + input: "test_com/foo", + err: ErrNameNotCanonical, + }, + { + input: "test.com", + err: ErrNameNotCanonical, + }, + { + input: "foo", + err: ErrNameNotCanonical, + }, + { + input: "library/foo", + err: ErrNameNotCanonical, + }, + { + input: "docker.io/library/foo", + domain: "docker.io", + name: "library/foo", + }, + // Ambiguous case, parser will add "library/" to foo + { + input: "docker.io/foo", + err: ErrNameNotCanonical, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := ParseNamed(testcase.input) + if err != nil && testcase.err == nil { + failf("error parsing name: %s", err) + continue + } else if err == nil && testcase.err != nil { + failf("parsing succeded: expected error %v", testcase.err) + continue + } else if err != testcase.err { + failf("unexpected error %v, expected %v", err, testcase.err) + continue + } else if err != nil { + continue + } + + domain, name := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} diff --git a/reference/regexp.go b/reference/regexp.go new file mode 100644 index 00000000..78603493 --- /dev/null +++ b/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/reference/regexp_test.go b/reference/regexp_test.go new file mode 100644 index 00000000..09bc8192 --- /dev/null +++ b/reference/regexp_test.go @@ -0,0 +1,553 @@ +package reference + +import ( + "regexp" + "strings" + "testing" +) + +type regexpMatch struct { + input string + match bool + subs []string +} + +func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { + matches := r.FindStringSubmatch(m.input) + if m.match && matches != nil { + if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { + t.Fatalf("Bad match result %#v for %q", matches, m.input) + } + if len(matches) < (len(m.subs) + 1) { + t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) + } + for i := range m.subs { + if m.subs[i] != matches[i+1] { + t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) + } + } + } else if m.match { + t.Errorf("Expected match for %q", m.input) + } else if matches != nil { + t.Errorf("Unexpected match for %q", m.input) + } +} + +func TestDomainRegexp(t *testing.T) { + hostcases := []regexpMatch{ + { + input: "test.com", + match: true, + }, + { + input: "test.com:10304", + match: true, + }, + { + input: "test.com:http", + match: false, + }, + { + input: "localhost", + match: true, + }, + { + input: "localhost:8080", + match: true, + }, + { + input: "a", + match: true, + }, + { + input: "a.b", + match: true, + }, + { + input: "ab.cd.com", + match: true, + }, + { + input: "a-b.com", + match: true, + }, + { + input: "-ab.com", + match: false, + }, + { + input: "ab-.com", + match: false, + }, + { + input: "ab.c-om", + match: true, + }, + { + input: "ab.-com", + match: false, + }, + { + input: "ab.com-", + match: false, + }, + { + input: "0101.com", + match: true, // TODO(dmcgowan): valid if this should be allowed + }, + { + input: "001a.com", + match: true, + }, + { + input: "b.gbc.io:443", + match: true, + }, + { + input: "b.gbc.io", + match: true, + }, + { + input: "xn--n3h.com", // ☃.com in punycode + match: true, + }, + { + input: "Asdf.com", // uppercase character + match: true, + }, + } + r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) + for i := range hostcases { + checkRegexp(t, r, hostcases[i]) + } +} + +func TestFullNameRegexp(t *testing.T) { + if anchoredNameRegexp.NumSubexp() != 2 { + t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", + anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "", + match: false, + }, + { + input: "short", + match: true, + subs: []string{"", "short"}, + }, + { + input: "simple/name", + match: true, + subs: []string{"simple", "name"}, + }, + { + input: "library/ubuntu", + match: true, + subs: []string{"library", "ubuntu"}, + }, + { + input: "docker/stevvooe/app", + match: true, + subs: []string{"docker", "stevvooe/app"}, + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, + }, + { + input: "aa/aa/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/bb/bb/bb"}, + }, + { + input: "a/a/a/a", + match: true, + subs: []string{"a", "a/a/a"}, + }, + { + input: "a/a/a/a/", + match: false, + }, + { + input: "a//a/a", + match: false, + }, + { + input: "a", + match: true, + subs: []string{"", "a"}, + }, + { + input: "a/aa", + match: true, + subs: []string{"a", "aa"}, + }, + { + input: "a/aa/a", + match: true, + subs: []string{"a", "aa/a"}, + }, + { + input: "foo.com", + match: true, + subs: []string{"", "foo.com"}, + }, + { + input: "foo.com/", + match: false, + }, + { + input: "foo.com:8080/bar", + match: true, + subs: []string{"foo.com:8080", "bar"}, + }, + { + input: "foo.com:http/bar", + match: false, + }, + { + input: "foo.com/bar", + match: true, + subs: []string{"foo.com", "bar"}, + }, + { + input: "foo.com/bar/baz", + match: true, + subs: []string{"foo.com", "bar/baz"}, + }, + { + input: "localhost:8080/bar", + match: true, + subs: []string{"localhost:8080", "bar"}, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + match: true, + subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, + }, + { + input: "blog.foo.com/bar/baz", + match: true, + subs: []string{"blog.foo.com", "bar/baz"}, + }, + { + input: "a^a", + match: false, + }, + { + input: "aa/asdf$$^/aa", + match: false, + }, + { + input: "asdf$$^/aa", + match: false, + }, + { + input: "aa-a/a", + match: true, + subs: []string{"aa-a", "a"}, + }, + { + input: strings.Repeat("a/", 128) + "a", + match: true, + subs: []string{"a", strings.Repeat("a/", 127) + "a"}, + }, + { + input: "a-/a/a/a", + match: false, + }, + { + input: "foo.com/a-/a/a", + match: false, + }, + { + input: "-foo/bar", + match: false, + }, + { + input: "foo/bar-", + match: false, + }, + { + input: "foo-/bar", + match: false, + }, + { + input: "foo/-bar", + match: false, + }, + { + input: "_foo/bar", + match: false, + }, + { + input: "foo_bar", + match: true, + subs: []string{"", "foo_bar"}, + }, + { + input: "foo_bar.com", + match: true, + subs: []string{"", "foo_bar.com"}, + }, + { + input: "foo_bar.com:8080", + match: false, + }, + { + input: "foo_bar.com:8080/app", + match: false, + }, + { + input: "foo.com/foo_bar", + match: true, + subs: []string{"foo.com", "foo_bar"}, + }, + { + input: "____/____", + match: false, + }, + { + input: "_docker/_docker", + match: false, + }, + { + input: "docker_/docker_", + match: false, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "xn--n3h.com/myimage", // ☃.com in punycode + match: true, + subs: []string{"xn--n3h.com", "myimage"}, + }, + { + input: "xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"xn--7o8h.com", "myimage"}, + }, + { + input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"example.com", "xn--7o8h.com/myimage"}, + }, + { + input: "example.com/some_separator__underscore/myimage", + match: true, + subs: []string{"example.com", "some_separator__underscore/myimage"}, + }, + { + input: "example.com/__underscore/myimage", + match: false, + }, + { + input: "example.com/..dots/myimage", + match: false, + }, + { + input: "example.com/.dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "docker./docker", + match: false, + }, + { + input: ".docker/docker", + match: false, + }, + { + input: "docker-/docker", + match: false, + }, + { + input: "-docker/docker", + match: false, + }, + { + input: "do..cker/docker", + match: false, + }, + { + input: "do__cker:8080/docker", + match: false, + }, + { + input: "do__cker/docker", + match: true, + subs: []string{"", "do__cker/docker"}, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "registry.io/foo/project--id.module--name.ver---sion--name", + match: true, + subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, + }, + { + input: "Asdf.com/foo/bar", // uppercase character in hostname + match: true, + }, + { + input: "Foo/FarB", // uppercase characters in remote name + match: false, + }, + } + for i := range testcases { + checkRegexp(t, anchoredNameRegexp, testcases[i]) + } +} + +func TestReferenceRegexp(t *testing.T) { + if ReferenceRegexp.NumSubexp() != 3 { + t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", + ReferenceRegexp, ReferenceRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "registry.com:8080/myapp:tag", + match: true, + subs: []string{"registry.com:8080/myapp", "tag", ""}, + }, + { + input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@sha256:badbadbadbad", + match: false, + }, + { + input: "registry.com:8080/myapp:invalid~tag", + match: false, + }, + { + input: "bad_hostname.com:8080/myapp:tag", + match: false, + }, + { + input:// localhost treated as name, missing tag with 8080 as tag + "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: false, + }, + { + // localhost will be treated as an image name without a host + input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@bad", + match: false, + }, + { + input: "registry.com:8080/myapp@2bad", + match: false, // TODO(dmcgowan): Support this as valid + }, + } + + for i := range testcases { + checkRegexp(t, ReferenceRegexp, testcases[i]) + } + +} + +func TestIdentifierRegexp(t *testing.T) { + fullCases := []regexpMatch{ + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: true, + }, + { + input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", + match: false, + }, + { + input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", + match: false, + }, + } + + shortCases := []regexpMatch{ + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: true, + }, + { + input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", + match: true, + }, + { + input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", + match: false, + }, + { + input: "da304", + match: false, + }, + { + input: "da304e", + match: true, + }, + } + + for i := range fullCases { + checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) + } + + for i := range shortCases { + checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) + } +} diff --git a/registry.go b/registry.go new file mode 100644 index 00000000..a3a80ab8 --- /dev/null +++ b/registry.go @@ -0,0 +1,113 @@ +package distribution + +import ( + "context" + + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/registry/api/errcode/errors.go b/registry/api/errcode/errors.go new file mode 100644 index 00000000..6d9bb4b6 --- /dev/null +++ b/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/registry/api/errcode/errors_test.go b/registry/api/errcode/errors_test.go new file mode 100644 index 00000000..54e7a736 --- /dev/null +++ b/registry/api/errcode/errors_test.go @@ -0,0 +1,185 @@ +package errcode + +import ( + "encoding/json" + "net/http" + "reflect" + "strings" + "testing" +) + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + if len(errorCodeToDescriptors) == 0 { + t.Fatal("errors aren't loaded!") + } + + for ec, desc := range errorCodeToDescriptors { + if ec != desc.Code { + t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) + } + + if idToDescriptors[desc.Value].Code != ec { + t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) + } + + if ec.Message() != desc.Message { + t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) + } + + // Test (de)serializing the ErrorCode + p, err := json.Marshal(ec) + if err != nil { + t.Fatalf("couldn't marshal ec %v: %v", ec, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", ec) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) + } + + expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) + if ec.Error() != expectedErrorString { + t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) + } + } + +} + +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs = append(errs, ErrorCodeTest1) + errs = append(errs, ErrorCodeTest2.WithDetail( + map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := `{"errors":[` + + `{"code":"TEST1","message":"test error 1"},` + + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + + `]}` + + if string(p) != expectedJSON { + t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) + } + + // Now test the reverse + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Test the arg substitution stuff + e1 := unmarshaled[3].(Error) + exp1 := `Sorry "BOOGIE" isn't valid` + if e1.Message != exp1 { + t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) + } + + exp1 = "test3: " + exp1 + if e1.Error() != exp1 { + t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) + } + + // Test again with a single value this time + errs = Errors{ErrorCodeUnknown} + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + // Now test the reverse + unmarshaled = nil + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Verify that calling WithArgs() more than once does the right thing. + // Meaning creates a new Error and uses the ErrorCode Message + e1 = ErrorCodeTest3.WithArgs("test1") + e2 := e1.WithArgs("test2") + if &e1 == &e2 { + t.Fatalf("args: e2 and e1 should not be the same, but they are") + } + if e2.Message != `Sorry "test2" isn't valid` { + t.Fatalf("e2 had wrong message: %q", e2.Message) + } + + // Verify that calling WithDetail() more than once does the right thing. + // Meaning creates a new Error and overwrites the old detail field + e1 = ErrorCodeTest3.WithDetail("stuff1") + e2 = e1.WithDetail("stuff2") + if &e1 == &e2 { + t.Fatalf("detail: e2 and e1 should not be the same, but they are") + } + if e2.Detail != `stuff2` { + t.Fatalf("e2 had wrong detail: %q", e2.Detail) + } + +} diff --git a/registry/api/errcode/handler.go b/registry/api/errcode/handler.go new file mode 100644 index 00000000..d77e7047 --- /dev/null +++ b/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/registry/api/errcode/register.go b/registry/api/errcode/register.go new file mode 100644 index 00000000..d1e8826c --- /dev/null +++ b/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/registry/api/v2/descriptors.go b/registry/api/v2/descriptors.go new file mode 100644 index 00000000..a9616c58 --- /dev/null +++ b/registry/api/v2/descriptors.go @@ -0,0 +1,1596 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/registry/api/v2/doc.go b/registry/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/registry/api/v2/errors.go b/registry/api/v2/errors.go new file mode 100644 index 00000000..97d6923a --- /dev/null +++ b/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/registry/api/v2/headerparser.go b/registry/api/v2/headerparser.go new file mode 100644 index 00000000..9bc41a3a --- /dev/null +++ b/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/registry/api/v2/headerparser_test.go b/registry/api/v2/headerparser_test.go new file mode 100644 index 00000000..b8c37490 --- /dev/null +++ b/registry/api/v2/headerparser_test.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "testing" +) + +func TestParseForwardedHeader(t *testing.T) { + for _, tc := range []struct { + name string + raw string + expected map[string]string + expectedRest string + expectedError bool + }{ + { + name: "empty", + raw: "", + }, + { + name: "one pair", + raw: " key = value ", + expected: map[string]string{"key": "value"}, + }, + { + name: "two pairs", + raw: " key1 = value1; key2=value2", + expected: map[string]string{"key1": "value1", "key2": "value2"}, + }, + { + name: "uppercase parameter", + raw: "KeY=VaL", + expected: map[string]string{"key": "VaL"}, + }, + { + name: "missing key=value pair - be tolerant", + raw: "key=val;", + expected: map[string]string{"key": "val"}, + }, + { + name: "quoted values", + raw: `key="val";param = "[[ $((1 + 1)) == 3 ]] && echo panic!;" ; p=" abcd "`, + expected: map[string]string{"key": "val", "param": "[[ $((1 + 1)) == 3 ]] && echo panic!;", "p": " abcd "}, + }, + { + name: "empty quoted value", + raw: `key=""`, + expected: map[string]string{"key": ""}, + }, + { + name: "quoted double quotes", + raw: `key="\"value\""`, + expected: map[string]string{"key": `"value"`}, + }, + { + name: "quoted backslash", + raw: `key="\"\\\""`, + expected: map[string]string{"key": `"\"`}, + }, + { + name: "ignore subsequent elements", + raw: "key=a, param= b", + expected: map[string]string{"key": "a"}, + expectedRest: " param= b", + }, + { + name: "empty element - be tolerant", + raw: " , key=val", + expectedRest: " key=val", + }, + { + name: "obscure key", + raw: `ob₷C&r€ = value`, + expected: map[string]string{`ob₷c&r€`: "value"}, + }, + { + name: "duplicate parameter", + raw: "key=a; p=b; key=c", + expectedError: true, + }, + { + name: "empty parameter", + raw: "=value", + expectedError: true, + }, + { + name: "empty value", + raw: "key= ", + expectedError: true, + }, + { + name: "empty value before a new element ", + raw: "key=,", + expectedError: true, + }, + { + name: "empty value before a new pair", + raw: "key=;", + expectedError: true, + }, + { + name: "just parameter", + raw: "key", + expectedError: true, + }, + { + name: "missing key-value", + raw: "a=b;;", + expectedError: true, + }, + { + name: "unclosed quoted value", + raw: `key="value`, + expectedError: true, + }, + { + name: "escaped terminating dquote", + raw: `key="value\"`, + expectedError: true, + }, + { + name: "just a quoted value", + raw: `"key=val"`, + expectedError: true, + }, + { + name: "quoted key", + raw: `"key"=val`, + expectedError: true, + }, + } { + parsed, rest, err := parseForwardedHeader(tc.raw) + if err != nil && !tc.expectedError { + t.Errorf("[%s] got unexpected error: %v", tc.name, err) + } + if err == nil && tc.expectedError { + t.Errorf("[%s] got unexpected non-error", tc.name) + } + if err != nil || tc.expectedError { + continue + } + for key, value := range tc.expected { + v, exists := parsed[key] + if !exists { + t.Errorf("[%s] missing expected parameter %q", tc.name, key) + continue + } + if v != value { + t.Errorf("[%s] got unexpected value for parameter %q: %q != %q", tc.name, key, v, value) + } + } + for key, value := range parsed { + if _, exists := tc.expected[key]; !exists { + t.Errorf("[%s] got unexpected key/value pair: %q=%q", tc.name, key, value) + } + } + + if rest != tc.expectedRest { + t.Errorf("[%s] got unexpected unparsed string: %q != %q", tc.name, rest, tc.expectedRest) + } + } +} diff --git a/registry/api/v2/routes.go b/registry/api/v2/routes.go new file mode 100644 index 00000000..5b80d5be --- /dev/null +++ b/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/registry/api/v2/routes_test.go b/registry/api/v2/routes_test.go new file mode 100644 index 00000000..6c77e281 --- /dev/null +++ b/registry/api/v2/routes_test.go @@ -0,0 +1,355 @@ +package v2 + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" +) + +type routeTestCase struct { + RequestURI string + ExpectedURI string + Vars map[string]string + RouteName string + StatusCode int +} + +// TestRouter registers a test handler with all the routes and ensures that +// each route returns the expected path variables. Not method verification is +// present. This not meant to be exhaustive but as check to ensure that the +// expected variables are extracted. +// +// This may go away as the application structure comes together. +func TestRouter(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBase, + RequestURI: "/v2/", + Vars: map[string]string{}, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/manifests/bar", + Vars: map[string]string{ + "name": "foo", + "reference": "bar", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/tag", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "tag", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "sha256:abcdef01234567890", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/tags/list", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar/baz", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "sha256:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlobUpload, + RequestURI: "/v2/foo/bar/blobs/uploads/", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "uuid", + }, + }, + { + // support uuid proper + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + }, + }, + { + // supports urlsafe base64 + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + }, + }, + { + // does not match + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", + StatusCode: http.StatusNotFound, + }, + { + // Check ambiguity: ensure we can distinguish between tags for + // "foo/bar/image/image" and image for "foo/bar/image" with tag + // "tags" + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/manifests/tags", + Vars: map[string]string{ + "name": "foo/bar/manifests", + "reference": "tags", + }, + }, + { + // This case presents an ambiguity between foo/bar with tag="tags" + // and list tags for "foo/bar/manifest" + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/manifests/tags/list", + Vars: map[string]string{ + "name": "foo/bar/manifests", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", + Vars: map[string]string{ + "name": "locahost:8080/foo/bar/baz", + "reference": "tag", + }, + }, + } + + checkTestRouter(t, testCases, "", true) + checkTestRouter(t, testCases, "/prefix/", true) +} + +func TestRouterWithPathTraversals(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/../bar/baz/tags/list", + ExpectedURI: "/v2/bar/baz/tags/list", + Vars: map[string]string{ + "name": "bar/baz", + }, + }, + } + checkTestRouter(t, testCases, "", false) +} + +func TestRouterWithBadCharacters(t *testing.T) { + if testing.Short() { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/blobs/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/不bar/tags/list", + StatusCode: http.StatusNotFound, + }, + } + checkTestRouter(t, testCases, "", true) + } else { + // in the long version we're going to fuzz the router + // with random UTF8 characters not in the 128 bit ASCII range. + // These are not valid characters for the router and we expect + // 404s on every test. + rand.Seed(time.Now().UTC().UnixNano()) + testCases := make([]routeTestCase, 1000) + for idx := range testCases { + testCases[idx] = routeTestCase{ + RouteName: RouteNameTags, + RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), + StatusCode: http.StatusNotFound, + } + } + checkTestRouter(t, testCases, "", true) + } +} + +func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { + router := RouterWithPrefix(prefix) + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range testCases { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI + // Register the endpoint + route := router.GetRoute(testcase.RouteName) + if route == nil { + t.Fatalf("route for name %q not found", testcase.RouteName) + } + + route.Handler(testHandler) + + u := server.URL + testcase.RequestURI + + resp, err := http.Get(u) + + if err != nil { + t.Fatalf("error issuing get request: %v", err) + } + + if testcase.StatusCode == 0 { + // Override default, zero-value + testcase.StatusCode = http.StatusOK + } + if testcase.ExpectedURI == "" { + // Override default, zero-value + testcase.ExpectedURI = testcase.RequestURI + } + + if resp.StatusCode != testcase.StatusCode { + t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) + } + + if testcase.StatusCode != http.StatusOK { + resp.Body.Close() + // We don't care about json response. + continue + } + + dec := json.NewDecoder(resp.Body) + + var actualRouteInfo routeTestCase + if err := dec.Decode(&actualRouteInfo); err != nil { + t.Fatalf("error reading json response: %v", err) + } + // Needs to be set out of band + actualRouteInfo.StatusCode = resp.StatusCode + + if actualRouteInfo.RequestURI != testcase.ExpectedURI { + t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) + } + + if actualRouteInfo.RouteName != testcase.RouteName { + t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) + } + + // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want + // that to make the comparison fail. We're otherwise done with the testcase so empty the + // testcase.ExpectedURI + testcase.ExpectedURI = "" + if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { + t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) + } + + resp.Body.Close() + } + +} + +// -------------- START LICENSED CODE -------------- +// The following code is derivative of https://github.com/google/gofuzz +// gofuzz is licensed under the Apache License, Version 2.0, January 2004, +// a copy of which can be found in the LICENSE file at the root of this +// repository. + +// These functions allow us to generate strings containing only multibyte +// characters that are invalid in our URLs. They are used above for fuzzing +// to ensure we always get 404s on these invalid strings +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose() rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +func randomString(length int) string { + runes := make([]rune, length) + for i := range runes { + runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() + } + return string(runes) +} + +// -------------- END LICENSED CODE -------------- diff --git a/registry/api/v2/urls.go b/registry/api/v2/urls.go new file mode 100644 index 00000000..1337bdb1 --- /dev/null +++ b/registry/api/v2/urls.go @@ -0,0 +1,266 @@ +package v2 + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + default: + return "", fmt.Errorf("reference must have a tag or digest") + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/registry/api/v2/urls_test.go b/registry/api/v2/urls_test.go new file mode 100644 index 00000000..4f854b23 --- /dev/null +++ b/registry/api/v2/urls_test.go @@ -0,0 +1,520 @@ +package v2 + +import ( + "fmt" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/docker/distribution/reference" +) + +type urlBuilderTestCase struct { + description string + expectedPath string + expectedErr error + build func() (string, error) +} + +func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + fooBarRef, _ := reference.WithName("foo/bar") + return []urlBuilderTestCase{ + { + description: "test base url", + expectedPath: "/v2/", + expectedErr: nil, + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expectedPath: "/v2/foo/bar/tags/list", + expectedErr: nil, + build: func() (string, error) { + return urlBuilder.BuildTagsURL(fooBarRef) + }, + }, + { + description: "test manifest url tagged ref", + expectedPath: "/v2/foo/bar/manifests/tag", + expectedErr: nil, + build: func() (string, error) { + ref, _ := reference.WithTag(fooBarRef, "tag") + return urlBuilder.BuildManifestURL(ref) + }, + }, + { + description: "test manifest url bare ref", + expectedPath: "", + expectedErr: fmt.Errorf("reference must have a tag or digest"), + build: func() (string, error) { + return urlBuilder.BuildManifestURL(fooBarRef) + }, + }, + { + description: "build blob url", + expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + expectedErr: nil, + build: func() (string, error) { + ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(ref) + }, + }, + { + description: "build blob upload url", + expectedPath: "/v2/foo/bar/blobs/uploads/", + expectedErr: nil, + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL(fooBarRef) + }, + }, + { + description: "build blob upload url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + expectedErr: nil, + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ + "size": []string{"10000"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + expectedErr: nil, + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + expectedErr: nil, + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, + }) + }, + }, + } +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + roots := []string{ + "http://example.com", + "https://example.com", + "http://localhost:5000", + "https://localhost:5443", + } + + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root + expectedURL + } + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } + } + doTest(true) + doTest(false) +} + +func TestURLBuilderWithPrefix(t *testing.T) { + roots := []string{ + "http://example.com/prefix/", + "https://example.com/prefix/", + "http://localhost:5000/prefix/", + "https://localhost:5443/prefix/", + } + + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root[0:len(root)-1] + expectedURL + } + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } + } + doTest(true) + doTest(false) +} + +type builderFromRequestTestCase struct { + request *http.Request + base string +} + +func TestBuilderFromRequest(t *testing.T) { + u, err := url.Parse("http://example.com") + if err != nil { + t.Fatal(err) + } + + testRequests := []struct { + name string + request *http.Request + base string + configHost url.URL + }{ + { + name: "no forwarded header", + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com", + }, + { + name: "https protocol forwarded with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Custom-Forwarded-Proto": []string{"https"}, + }}, + base: "http://example.com", + }, + { + name: "forwarded protocol is the same", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded host with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com"}, + }}, + base: "http://first.example.com", + }, + { + name: "forwarded multiple hosts a with non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, + }}, + base: "http://first.example.com", + }, + { + name: "host configured in config file takes priority", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, + }}, + base: "https://third.example.com:5000", + configHost: url.URL{ + Scheme: "https", + Host: "third.example.com:5000", + }, + }, + { + name: "forwarded host and port with just one non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com:443"}, + }}, + base: "http://first.example.com:443", + }, + { + name: "forwarded port with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"example.com:5000"}, + "X-Forwarded-Port": []string{"5000"}, + }}, + base: "http://example.com:5000", + }, + { + name: "forwarded multiple ports with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Port": []string{"443 , 5001"}, + }}, + base: "http://example.com", + }, + { + name: "forwarded standard port with non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded standard port with non-standard headers and explicit port", + request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{u.Host + ":443"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com:443", + }, + { + name: "several non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{" first.example.com:12345 "}, + }}, + base: "https://first.example.com:12345", + }, + { + name: "forwarded host with port supplied takes priority", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com:5000"}, + "X-Forwarded-Port": []string{"80"}, + }}, + base: "http://first.example.com:5000", + }, + { + name: "malformed forwarded port", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com"}, + "X-Forwarded-Port": []string{"abcd"}, + }}, + base: "http://first.example.com", + }, + { + name: "forwarded protocol and addr using standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`proto=https;host="192.168.22.30:80"`}, + }}, + base: "https://192.168.22.30:80", + }, + { + name: "forwarded host takes priority over for", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:5000";for="192.168.22.30"`}, + }}, + base: "http://reg.example.com:5000", + }, + { + name: "forwarded host and protocol using standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host=reg.example.com;proto=https`}, + }}, + base: "https://reg.example.com", + }, + { + name: "process just the first standard forwarded header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:88";proto=http`, `host=reg.example.com;proto=https`}, + }}, + base: "http://reg.example.com:88", + }, + { + name: "process just the first list element of standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:443";proto=https, host="reg.example.com:80";proto=http`}, + }}, + base: "https://reg.example.com:443", + }, + { + name: "IPv6 address use host", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`for="2607:f0d0:1002:51::4";host="[2607:f0d0:1002:51::4]:5001"`}, + "X-Forwarded-Port": []string{"5002"}, + }}, + base: "http://[2607:f0d0:1002:51::4]:5001", + }, + { + name: "IPv6 address with port", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="[2607:f0d0:1002:51::4]:4000"`}, + "X-Forwarded-Port": []string{"5001"}, + }}, + base: "http://[2607:f0d0:1002:51::4]:4000", + }, + { + name: "non-standard and standard forward headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{``}, + "Forwarded": []string{`host=first.example.com; proto=https`}, + }}, + base: "https://first.example.com", + }, + { + name: "standard header takes precedence over non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`http`}, + "Forwarded": []string{`host=second.example.com; proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://second.example.com", + }, + { + name: "incomplete standard header uses default", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`for=127.0.0.1`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "http://" + u.Host, + }, + { + name: "standard with just proto", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://" + u.Host, + }, + } + + doTest := func(relative bool) { + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, relative) + } else { + builder = NewURLBuilderFromRequest(tr.request, relative) + } + + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL + } + + if buildURL != expectedURL { + t.Errorf("[relative=%t, request=%q, case=%q]: %q != %q", relative, tr.name, testCase.description, buildURL, expectedURL) + } + } + } + } + + doTest(true) + doTest(false) +} + +func TestBuilderFromRequestWithPrefix(t *testing.T) { + u, err := url.Parse("http://example.com/prefix/v2/") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + configHost url.URL + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com/prefix/", + }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://subdomain.example.com/prefix/", + configHost: url.URL{ + Scheme: "https", + Host: "subdomain.example.com", + Path: "/prefix/", + }, + }, + } + + var relative bool + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, false) + } else { + builder = NewURLBuilderFromRequest(tr.request, false) + } + + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue + } + + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base[0:len(tr.base)-1] + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL + } + + } + + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) + } + } + } +} diff --git a/registry/auth/auth.go b/registry/auth/auth.go new file mode 100644 index 00000000..91c7af3f --- /dev/null +++ b/registry/auth/auth.go @@ -0,0 +1,201 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.SetHeaders(w) +// w.WriteHeader(http.StatusUnauthorized) +// return +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "context" + "errors" + "fmt" + "net/http" +) + +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication fails. + ErrAuthenticationFailure = errors.New("authentication failure") +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Class string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// CredentialAuthenticator is an object which is able to authenticate credentials +type CredentialAuthenticator interface { + AuthenticateUser(username, password string) error +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case UserKey: + return uic.user + case UserNameKey: + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// WithResources returns a context with the authorized resources. +func WithResources(ctx context.Context, resources []Resource) context.Context { + return resourceContext{ + Context: ctx, + resources: resources, + } +} + +type resourceContext struct { + context.Context + resources []Resource +} + +type resourceKey struct{} + +func (rc resourceContext) Value(key interface{}) interface{} { + if key == (resourceKey{}) { + return rc.resources + } + + return rc.Context.Value(key) +} + +// AuthorizedResources returns the list of resources which have +// been authorized for this request. +func AuthorizedResources(ctx context.Context) []Resource { + if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok { + return resources + } + + return nil +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/registry/auth/htpasswd/access.go b/registry/auth/htpasswd/access.go new file mode 100644 index 00000000..8f40913b --- /dev/null +++ b/registry/auth/htpasswd/access.go @@ -0,0 +1,116 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "context" + "fmt" + "net/http" + "os" + "sync" + "time" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +type accessController struct { + realm string + path string + modtime time.Time + mu sync.Mutex + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + return &accessController{realm: realm.(string), path: path.(string)}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := dcontext.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: auth.ErrInvalidCredential, + } + } + + // Dynamically parsing the latest account list + fstat, err := os.Stat(ac.path) + if err != nil { + return nil, err + } + + lastModified := fstat.ModTime() + ac.mu.Lock() + if ac.htpasswd == nil || !ac.modtime.Equal(lastModified) { + ac.modtime = lastModified + + f, err := os.Open(ac.path) + if err != nil { + ac.mu.Unlock() + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + ac.mu.Unlock() + return nil, err + } + ac.htpasswd = h + } + localHTPasswd := ac.htpasswd + ac.mu.Unlock() + + if err := localHTPasswd.authenticateUser(username, password); err != nil { + dcontext.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: auth.ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/registry/auth/htpasswd/access_test.go b/registry/auth/htpasswd/access_test.go new file mode 100644 index 00000000..553f05cf --- /dev/null +++ b/registry/auth/htpasswd/access_test.go @@ -0,0 +1,122 @@ +package htpasswd + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestBasicAccessController(t *testing.T) { + testRealm := "The-Shire" + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + ctx := context.Background() + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + var userNumber = 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + nonbcrypt := map[string]struct{}{ + "bilbo": {}, + "DeokMan": {}, + } + + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) + + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } + } + } + +} diff --git a/registry/auth/htpasswd/htpasswd.go b/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 00000000..b10b256c --- /dev/null +++ b/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,82 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/docker/distribution/registry/auth" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return auth.ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return auth.ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if a syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/registry/auth/htpasswd/htpasswd_test.go b/registry/auth/htpasswd/htpasswd_test.go new file mode 100644 index 00000000..309c359a --- /dev/null +++ b/registry/auth/htpasswd/htpasswd_test.go @@ -0,0 +1,85 @@ +package htpasswd + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/registry/auth/silly/access.go b/registry/auth/silly/access.go new file mode 100644 index 00000000..f7bbe6e0 --- /dev/null +++ b/registry/auth/silly/access.go @@ -0,0 +1,102 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "context" + "fmt" + "net/http" + "strings" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := dcontext.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + ctx = auth.WithUser(ctx, auth.UserInfo{Name: "silly"}) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey)) + + return ctx, nil + +} + +type challenge struct { + realm string + service string + scope string +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/registry/auth/silly/access_test.go b/registry/auth/silly/access_test.go new file mode 100644 index 00000000..0a5103e6 --- /dev/null +++ b/registry/auth/silly/access_test.go @@ -0,0 +1,71 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(context.Background(), r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/registry/auth/token/accesscontroller.go b/registry/auth/token/accesscontroller.go new file mode 100644 index 00000000..3086c2cf --- /dev/null +++ b/registry/auth/token/accesscontroller.go @@ -0,0 +1,273 @@ +package token + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + if pemBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + } + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := dcontext.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + ctx = auth.WithResources(ctx, token.resources()) + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/registry/auth/token/stringset.go b/registry/auth/token/stringset.go new file mode 100644 index 00000000..1d04f104 --- /dev/null +++ b/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/registry/auth/token/token.go b/registry/auth/token/token.go new file mode 100644 index 00000000..7f87d496 --- /dev/null +++ b/registry/auth/token/token.go @@ -0,0 +1,378 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/libtrust" + log "github.com/sirupsen/logrus" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." + // Leeway is the Duration that will be added to NBF and EXP claim + // checks to account for clock skew as per https://tools.ietf.org/html/rfc7519#section-4.1.5 + Leeway = 60 * time.Second +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Class string `json:"class,omitempty"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Infof("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Infof("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Infof("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentTime := time.Now() + + ExpWithLeeway := time.Unix(t.Claims.Expiration, 0).Add(Leeway) + if currentTime.After(ExpWithLeeway) { + log.Infof("token not to be used after %s - currently %s", ExpWithLeeway, currentTime) + return ErrInvalidToken + } + + NotBeforeWithLeeway := time.Unix(t.Claims.NotBefore, 0).Add(-Leeway) + if currentTime.Before(NotBeforeWithLeeway) { + log.Infof("token not to be used before %s - currently %s", NotBeforeWithLeeway, currentTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Info("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Info(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Infof("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case rawJWK != nil: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) resources() []auth.Resource { + if t.Claims == nil { + return nil + } + + resourceSet := map[auth.Resource]struct{}{} + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Class: resourceActions.Class, + Name: resourceActions.Name, + } + resourceSet[resource] = struct{}{} + } + + resources := make([]auth.Resource, 0, len(resourceSet)) + for resource := range resourceSet { + resources = append(resources, resource) + } + + return resources +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/registry/auth/token/token_test.go b/registry/auth/token/token_test.go new file mode 100644 index 00000000..03dce6fa --- /dev/null +++ b/registry/auth/token/token_test.go @@ -0,0 +1,531 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int, now time.Time, exp time.Time) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to make signing key with chain: %s", err) + } + + var rawJWK json.RawMessage + rawJWK, err = signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: &rawJWK, + } + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: exp.Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i, time.Now(), time.Now().Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + } +} + +// This tests that we don't fail tokens with nbf within +// the defined leeway in seconds +func TestLeeway(t *testing.T) { + var ( + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(1) + if err != nil { + t.Fatal(err) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: nil, + TrustedKeys: trustedKeys, + } + + // nbf verification should pass within leeway + futureNow := time.Now().Add(time.Duration(5) * time.Second) + token, err := makeTestToken(issuer, audience, access, rootKeys[0], 0, futureNow, futureNow.Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + + // nbf verification should fail with a skew larger than leeway + futureNow = time.Now().Add(time.Duration(61) * time.Second) + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, futureNow, futureNow.Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err == nil { + t.Fatal("Verification should fail for token with nbf in the future outside leeway") + } + + // exp verification should pass within leeway + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, time.Now(), time.Now().Add(-59*time.Second)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + + // exp verification should fail with a skew larger than leeway + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, time.Now(), time.Now().Add(-60*time.Second)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err == nil { + t.Fatal("Verification should fail for token with exp in the future outside leeway") + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + ctx := context.WithRequest(context.Background(), req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, time.Now(), time.Now().Add(5*time.Minute), // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("token accessController did not set auth.user context") + } + + if userInfo.Name != "foo" { + t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) + } + + // 5. Supply a token with full admin rights, which is represented as "*". + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{"*"}, + }}, + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + _, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } +} + +// This tests that newAccessController can handle PEM blocks in the certificate +// file other than certificates, for example a private key. +func TestNewAccessControllerPemBlock(t *testing.T) { + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + // Add something other than a certificate to the rootcertbundle + file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + t.Fatal(err) + } + keyBlock, err := rootKeys[0].PEMBlock() + if err != nil { + t.Fatal(err) + } + err = pem.Encode(file, keyBlock) + if err != nil { + t.Fatal(err) + } + err = file.Close() + if err != nil { + t.Fatal(err) + } + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + ac, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + if len(ac.(*accessController).rootCerts.Subjects()) != 2 { + t.Fatal("accessController has the wrong number of certificates") + } +} diff --git a/registry/auth/token/util.go b/registry/auth/token/util.go new file mode 100644 index 00000000..d7f95be4 --- /dev/null +++ b/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/registry/client/auth/api_version.go b/registry/client/auth/api_version.go new file mode 100644 index 00000000..7d8f1d95 --- /dev/null +++ b/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/registry/client/auth/challenge/addr.go b/registry/client/auth/challenge/addr.go new file mode 100644 index 00000000..2c3ebe16 --- /dev/null +++ b/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/registry/client/auth/challenge/authchallenge.go b/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 00000000..6e3f1ccc --- /dev/null +++ b/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/registry/client/auth/challenge/authchallenge_test.go b/registry/client/auth/challenge/authchallenge_test.go new file mode 100644 index 00000000..d4986b39 --- /dev/null +++ b/registry/client/auth/challenge/authchallenge_test.go @@ -0,0 +1,127 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + challenge := challenges[0] + + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) + } + + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) + } + + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) + } + +} + +func TestAuthChallengeNormalization(t *testing.T) { + testAuthChallengeNormalization(t, "reg.EXAMPLE.com") + testAuthChallengeNormalization(t, "bɿɒʜɔiÉ¿-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") + testAuthChallengeNormalization(t, "reg.example.com:80") + testAuthChallengeConcurrent(t, "reg.EXAMPLE.com") +} + +func testAuthChallengeNormalization(t *testing.T, host string) { + + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + + err = scm.AddResponse(resp) + if err != nil { + t.Fatal(err) + } + + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + lowered.Host = canonicalAddr(&lowered) + c, err := scm.GetChallenges(lowered) + if err != nil { + t.Fatal(err) + } + + if len(c) == 0 { + t.Fatal("Expected challenge for lower-cased-host URL") + } +} + +func testAuthChallengeConcurrent(t *testing.T, host string) { + + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + var s sync.WaitGroup + s.Add(2) + go func() { + defer s.Done() + for i := 0; i < 200; i++ { + err = scm.AddResponse(resp) + if err != nil { + t.Error(err) + } + } + }() + go func() { + defer s.Done() + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + for k := 0; k < 200; k++ { + _, err := scm.GetChallenges(lowered) + if err != nil { + t.Error(err) + } + } + }() + s.Wait() +} diff --git a/registry/client/auth/session.go b/registry/client/auth/session.go new file mode 100644 index 00000000..db86c9b0 --- /dev/null +++ b/registry/client/auth/session.go @@ -0,0 +1,532 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" +) + +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges challenge.Manager + handlers []AuthenticationHandler + transport http.RoundTripper +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + pingPath := req.URL.Path + if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { + pingPath = pingPath[:v2Root+4] + } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { + pingPath = pingPath[:v1Root] + "/v2/" + } else { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: pingPath, + } + + challenges, err := ea.challenges.GetChallenges(ping) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, c := range challenges { + if c.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + +type tokenHandler struct { + header http.Header + creds CredentialStore + transport http.RoundTripper + clock clock + + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time + + logger Logger +} + +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Class string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + repoType := "repository" + // Keep existing format for image class to maintain backwards compatibility + // with authorization servers which do not support the expanded grammar. + if rs.Class != "" && rs.Class != "image" { + repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) + } + return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) +} + +// RegistryScope represents a token scope for access +// to resources in the registry. +type RegistryScope struct { + Name string + Actions []string +} + +// String returns the string representation of the user +// using the scope grammar +func (rs RegistryScope) String() string { + return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) +} + +// Logger defines the injectable logging interface, used on TokenHandlers. +type Logger interface { + Debugf(format string, args ...interface{}) +} + +func logDebugf(logger Logger, format string, args ...interface{}) { + if logger == nil { + return + } + logger.Debugf(format, args...) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope + Logger Logger +} + +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) +} + +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, + logger: options.Logger, + } + + return handler +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + return nil +} + +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + if hasScope(scopes, scope) { + continue + } + scopes = append(scopes, scope) + addedScopes = true + } + + now := th.clock.Now() + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) + if err != nil { + return "", err + } + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil + } + + return th.tokenCache, nil +} + +func hasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } + + resp, err := th.client().PostForm(realm.String(), form) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) + if err != nil { + return "", time.Time{}, err + } + + reqParams := req.URL.Query() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) + } + + if th.creds != nil { + username, password := th.creds.Basic(realm) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", time.Time{}, ErrNoToken + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return ErrNoBasicAuthCredentials +} diff --git a/registry/client/auth/session_test.go b/registry/client/auth/session_test.go new file mode 100644 index 00000000..4f54c75c --- /dev/null +++ b/registry/client/auth/session_test.go @@ -0,0 +1,866 @@ +package auth + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/testutil" +) + +// An implementation of clock for providing fake time data. +type fakeClock struct { + current time.Time +} + +// Now implements clock +func (fc *fakeClock) Now() time.Time { return fc.current } + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +type testAuthenticationWrapper struct { + headers http.Header + authCheck func(string) bool + next http.Handler +} + +func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || !w.authCheck(auth) { + h := rw.Header() + for k, values := range w.headers { + h[k] = values + } + rw.WriteHeader(http.StatusUnauthorized) + return + } + w.next.ServeHTTP(rw, r) +} + +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { + h := testutil.NewHandler(rrm) + wrapper := &testAuthenticationWrapper{ + + headers: http.Header(map[string][]string{ + "X-API-Version": {"registry/2.0"}, + "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, + "WWW-Authenticate": {authenticate}, + }), + authCheck: authCheck, + next: h, + } + + s := httptest.NewServer(wrapper) + return s.URL, s.Close +} + +// ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func ping(manager challenge.Manager, endpoint, versionHeader string) ([]APIVersion, error) { + resp, err := http.Get(endpoint) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return nil, err + } + + return APIVersions(resp, versionHeader), err +} + +type testCredentialStore struct { + username string + password string + refreshTokens map[string]string +} + +func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { + return tcs.username, tcs.password +} + +func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string { + return tcs.refreshTokens[service] +} + +func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) { + if tcs.refreshTokens != nil { + tcs.refreshTokens[service] = token + } +} + +func TestEndpointAuthorizeToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"badtoken"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := challenge.NewSimpleManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 3 { + t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) + } + if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) + } + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func TestEndpointAuthorizeRefreshToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + refreshToken1 := "0123456790abcdef" + refreshToken2 := "0123456790fedcba" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + { + // In the future this test may fail and require using basic auth to get a different refresh token + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)), + }, + }, + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + creds := &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + // Try with refresh token setting + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := challenge.NewSimpleManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } + + if creds.refreshTokens[service] != refreshToken2 { + t.Fatalf("Refresh token not set after change") + } + + // Try with bad token + e3, c3 := testServerWithAuth(m, authenicate, validCheck) + defer c3() + + challengeManager3 := challenge.NewSimpleManager() + versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client3 := &http.Client{Transport: transport3} + + req, _ = http.NewRequest("GET", e3+"/v2/hello", nil) + resp, err = client3.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func TestEndpointAuthorizeV2RefreshToken(t *testing.T) { + service := "localhost.localdomain" + scope1 := "registry:catalog:search" + refreshToken1 := "0123456790abcdef" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v1/search", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + tho := TokenHandlerOptions{ + Credentials: &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + }, + Scopes: []Scope{ + RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + }, + } + + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandlerWithOptions(tho))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v1/search", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func TestEndpointAuthorizeTokenBasic(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken"}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + basicCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + clock := &fakeClock{current: time.Now()} + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + timeIncrement := 1000 * time.Second + for i := 0; i < 4; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + // This test sets things up such that the token was issued one increment + // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. + // This will mean that the token expires after 3 increments instead of 4. + clock := &fakeClock{current: time.Now()} + timeIncrement := 1000 * time.Second + firstIssuedAt := clock.Now() + clock.current = clock.current.Add(timeIncrement) + secondIssuedAt := clock.current.Add(2 * timeIncrement) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn + // so this loop should have one fewer iteration. + for i := 0; i < 3; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeBasic(t *testing.T) { + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + username := "user1" + password := "funSecretPa$$word" + authenicate := fmt.Sprintf("Basic realm=localhost") + validCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} diff --git a/registry/client/blob_writer.go b/registry/client/blob_writer.go new file mode 100644 index 00000000..695bf852 --- /dev/null +++ b/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/registry/client/blob_writer_test.go b/registry/client/blob_writer_test.go new file mode 100644 index 00000000..099dca4f --- /dev/null +++ b/registry/client/blob_writer_test.go @@ -0,0 +1,211 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +// Test implements distribution.BlobWriter +var _ distribution.BlobWriter = &httpBlobUpload{} + +func TestUploadReadFrom(t *testing.T) { + _, b := newRandomBlob(64) + repo := "test/upload/readfrom" + locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + // Test Valid case + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {"0-63"}, + }), + }, + }, + // Test invalid range + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {""}, + }), + }, + }, + // Test 404 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + }, + }, + // Test 400 valid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte(` + { "errors": + [ + { + "code": "BLOB_UPLOAD_INVALID", + "message": "blob upload invalid", + "detail": "more detail" + } + ] + } `), + }, + }, + // Test 400 invalid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte("something bad happened"), + }, + }, + // Test 500 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + }) + + e, c := testServer(m) + defer c() + + blobUpload := &httpBlobUpload{ + client: &http.Client{}, + } + + // Valid case + blobUpload.location = e + locationPath + n, err := blobUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + if n != 64 { + t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) + } + + // Bad range + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when bad range received") + } + + // 404 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) + } + + // 400 valid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(errcode.Errors); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if len(uploadErr) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) + } else { + v2Err, ok := uploadErr[0].(errcode.Error) + if !ok { + t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) + } + if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { + t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) + } + if expected := "blob upload invalid"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) + } + if expected := "more detail"; v2Err.Detail.(string) != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) + } + } + + // 400 invalid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else { + respStr := string(uploadErr.Response) + if expected := "something bad happened"; respStr != expected { + t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) + } + } + + // 500 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { + t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) + } +} diff --git a/registry/client/errors.go b/registry/client/errors.go new file mode 100644 index 00000000..52d49d5d --- /dev/null +++ b/registry/client/errors.go @@ -0,0 +1,139 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/registry/client/errors_test.go b/registry/client/errors_test.go new file mode 100644 index 00000000..ca9dddd1 --- /dev/null +++ b/registry/client/errors_test.go @@ -0,0 +1,104 @@ +package client + +import ( + "bytes" + "io" + "net/http" + "strings" + "testing" +) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +func TestHandleErrorResponse401ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: action requires authentication" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: authentication required" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" + response := &http.Response{ + Status: "400 Bad Request", + StatusCode: 400, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "digest invalid: provided digest does not match" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { + json := `{"randomkey": "randomvalue"}` + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { + response := &http.Response{ + Status: "501 Not Implemented", + StatusCode: 501, + Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, + } + err := HandleErrorResponse(response) + + expectedMsg := "received unexpected HTTP status: 501 Not Implemented" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} diff --git a/registry/client/repository.go b/registry/client/repository.go new file mode 100644 index 00000000..d8e2c795 --- /dev/null +++ b/registry/client/repository.go @@ -0,0 +1,869 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/opencontainers/go-digest" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + listURLStr, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + listURL, err := url.Parse(listURLStr) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(listURL.String()) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + listURL = listURL.ResolveReference(linkURL) + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.Parse(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: + // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + mediaTypes []string + ) + + for _, option := range options { + switch opt := option.(type) { + case distribution.WithTagOption: + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + case contentDigestOption: + contentDgst = opt.digest + case distribution.WithManifestMediaTypesOption: + mediaTypes = opt.MediaTypes + default: + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + if len(mediaTypes) == 0 { + mediaTypes = distribution.ManifestMediaTypes() + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range mediaTypes { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.Digester() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go new file mode 100644 index 00000000..4018a928 --- /dev/null +++ b/registry/client/repository_test.go @@ -0,0 +1,1362 @@ +package client + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +func newRandomBlob(size int) (digest.Digest, []byte) { + b := make([]byte, size) + if n, err := rand.Read(b); err != nil { + panic(err) + } else if n != size { + panic("unable to read enough bytes") + } + + return digest.FromBytes(b), b +} + +func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) +} + +func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { + headers := map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + } + if link != "" { + headers["Link"] = append(headers["Link"], link) + } + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: route, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(headers), + }, + }) +} + +func TestBlobDelete(t *testing.T) { + dgst, _ := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.WithName("test.example.com/repo1") + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + err = l.Delete(ctx, dgst) + if err != nil { + t.Errorf("Error deleting blob: %s", err.Error()) + } + +} + +func TestBlobFetch(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + repo, _ := reference.WithName("test.example.com/repo1") + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + b, err := l.Get(ctx, d1) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b, b1) != 0 { + t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) + } + + // TODO(dmcgowan): Test for unknown blob case +} + +func TestBlobExistsNoContentLength(t *testing.T) { + var m testutil.RequestResponseMap + + repo, _ := reference.WithName("biff") + dgst, content := newRandomBlob(1024) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + _, err = l.Stat(ctx, dgst) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "missing content-length heade") { + t.Fatalf("Expected missing content-length error message") + } + +} + +func TestBlobExists(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + repo, _ := reference.WithName("test.example.com/repo1") + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + stat, err := l.Stat(ctx, d1) + if err != nil { + t.Fatal(err) + } + + if stat.Digest != d1 { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) + } + + if stat.Size != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) + } + + // TODO(dmcgowan): Test error cases and ErrBlobUnknown case +} + +func TestBlobUploadChunked(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + chunks := [][]byte{ + b1[0:256], + b1[256:512], + b1[512:513], + b1[513:1024], + } + repo, _ := reference.WithName("test.example.com/uploadrepo") + uuids := []string{uuid.Generate().String()} + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, + "Docker-Upload-UUID": {uuids[0]}, + "Range": {"0-0"}, + }), + }, + }) + offset := 0 + for i, chunk := range chunks { + uuids = append(uuids, uuid.Generate().String()) + newOffset := offset + len(chunk) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], + Body: chunk, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, + "Docker-Upload-UUID": {uuids[i+1]}, + "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, + }), + }, + }) + offset = newOffset + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(offset)}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) + } + + for _, chunk := range chunks { + n, err := upload.Write(chunk) + if err != nil { + t.Fatal(err) + } + if n != len(chunk) { + t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) + } + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobUploadMonolithic(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.WithName("test.example.com/uploadrepo") + uploadID := uuid.Generate().String() + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Range": {"0-0"}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, + Body: b1, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(b1))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) + } + + n, err := upload.ReadFrom(bytes.NewReader(b1)) + if err != nil { + t.Fatal(err) + } + if n != int64(len(b1)) { + t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobMount(t *testing.T) { + dgst, content := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.WithName("test.example.com/uploadrepo") + + sourceRepo, _ := reference.WithName("test.example.com/sourcerepo") + canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + + l := r.Blobs(ctx) + + bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatalf("Expected blob writer to be nil, was %v", bw) + } + + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if ebm.From.Digest() != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) + } + if ebm.From.Name() != sourceRepo.Name() { + t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) + } + } else { + t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) + } +} + +func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { + blobs := make([]schema1.FSLayer, blobCount) + history := make([]schema1.History, blobCount) + + for i := 0; i < blobCount; i++ { + dgst, blob := newRandomBlob((i % 5) * 16) + + blobs[i] = schema1.FSLayer{BlobSum: dgst} + history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + } + + m := schema1.Manifest{ + Name: name.String(), + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + panic(err) + } + + return sm, digest.FromBytes(sm.Canonical), sm.Canonical +} + +func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest := digest.FromBytes(content) + getReqWithEtag := testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + Headers: http.Header(map[string][]string{ + "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, + }), + } + + var getRespWithEtag testutil.Response + if actualDigest.String() == dgst { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusNotModified, + Body: []byte{}, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, + }), + } + } else { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, + }), + } + + } + *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) +} + +func contentDigestString(mediatype string, content []byte) string { + if mediatype == schema1.MediaTypeSignedManifest { + m, _, _ := distribution.UnmarshalManifest(mediatype, content) + content = m.(*schema1.SignedManifest).Canonical + } + return digest.Canonical.FromBytes(content).String() +} + +func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {contentDigestString(mediatype, content)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {digest.Canonical.FromBytes(content).String()}, + }), + }, + }) +} + +func addTestManifestWithoutDigestHeader(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + }), + }, + }) +} + +func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { + if m1.Name != m2.Name { + return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) + } + if m1.Tag != m2.Tag { + return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) + } + if len(m1.FSLayers) != len(m2.FSLayers) { + return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + } + for i := range m1.FSLayers { + if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { + return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) + } + } + if len(m1.History) != len(m2.History) { + return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) + } + for i := range m1.History { + if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { + return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) + } + } + return nil +} + +func TestV1ManifestFetch(t *testing.T) { + ctx := context.Background() + repo, _ := reference.WithName("test.example.com/repo") + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "badcontenttype", "text/html", pl, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + ok, err := ms.Exists(ctx, dgst) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatal(err) + } + v1manifest, ok := manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err := checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + var contentDigest digest.Digest + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"), ReturnContentDigest(&contentDigest)) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + if contentDigest != dgst { + t.Fatalf("Unexpected returned content digest %v, expected %v", contentDigest, dgst) + } + + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestFetchWithEtag(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo/by/tag") + _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + clientManifestService, ok := ms.(*manifests) + if !ok { + panic("wrong type for client manifest service") + } + _, err = clientManifestService.Get(ctx, d1, distribution.WithTag("latest"), AddEtagToTag("latest", d1.String())) + if err != distribution.ErrManifestNotModified { + t.Fatal(err) + } +} + +func TestManifestFetchWithAccept(t *testing.T) { + ctx := context.Background() + repo, _ := reference.WithName("test.example.com/repo") + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + headers := make(chan []string, 1) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + headers <- req.Header["Accept"] + })) + defer close(headers) + defer s.Close() + + r, err := NewRepository(repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + // the media types we send + mediaTypes []string + // the expected Accept headers the server should receive + expect []string + // whether to sort the request and response values for comparison + sort bool + }{ + { + mediaTypes: []string{}, + expect: distribution.ManifestMediaTypes(), + sort: true, + }, + { + mediaTypes: []string{"test1", "test2"}, + expect: []string{"test1", "test2"}, + }, + { + mediaTypes: []string{"test1"}, + expect: []string{"test1"}, + }, + { + mediaTypes: []string{""}, + expect: []string{""}, + }, + } + for _, testCase := range testCases { + ms.Get(ctx, dgst, distribution.WithManifestMediaTypes(testCase.mediaTypes)) + actual := <-headers + if testCase.sort { + sort.Strings(actual) + sort.Strings(testCase.expect) + } + if !reflect.DeepEqual(actual, testCase.expect) { + t.Fatalf("unexpected Accept header values: %v", actual) + } + } +} + +func TestManifestDelete(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo/delete") + _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Delete(ctx, dgst1); err != nil { + t.Fatal(err) + } + if err := ms.Delete(ctx, dgst2); err == nil { + t.Fatal("Expected error deleting unknown manifest") + } + // TODO(dmcgowan): Check for specific unknown error +} + +func TestManifestPut(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo/delete") + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) + + _, payload, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/other", + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + + putDgst := digest.FromBytes(m1.Canonical) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/" + putDgst.String(), + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {putDgst.String()}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1, distribution.WithTag(m1.Tag)); err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1); err != nil { + t.Fatal(err) + } + + // TODO(dmcgowan): Check for invalid input error +} + +func TestManifestTags(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo/tags/list") + tagsList := []byte(strings.TrimSpace(` +{ + "name": "test.example.com/repo/tags/list", + "tags": [ + "tag1", + "tag2", + "funtag" + ] +} + `)) + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + } + e, c := testServer(m) + defer c() + + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } + // TODO(dmcgowan): Check for error cases +} + +func TestObtainsErrorForMissingTag(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo") + + var m testutil.RequestResponseMap + var errors errcode.Errors + errors = append(errors, v2.ErrorCodeManifestUnknown.WithDetail("unknown manifest")) + errBytes, err := json.Marshal(errors) + if err != nil { + t.Fatal(err) + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/1.0.0", + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + Body: errBytes, + Headers: http.Header(map[string][]string{ + "Content-Type": {"application/json; charset=utf-8"}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + + tagService := r.Tags(ctx) + + _, err = tagService.Get(ctx, "1.0.0") + if err == nil { + t.Fatalf("Expected an error") + } + if !strings.Contains(err.Error(), "manifest unknown") { + t.Fatalf("Expected unknown manifest error message") + } +} + +func TestObtainsManifestForTagWithoutHeaders(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo") + + var m testutil.RequestResponseMap + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifestWithoutDigestHeader(repo, "1.0.0", schema1.MediaTypeSignedManifest, pl, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + + tagService := r.Tags(ctx) + + desc, err := tagService.Get(ctx, "1.0.0") + if err != nil { + t.Fatalf("Expected no error") + } + if desc.Digest != dgst { + t.Fatalf("Unexpected digest") + } +} +func TestManifestTagsPaginated(t *testing.T) { + s := httptest.NewServer(http.NotFoundHandler()) + defer s.Close() + + repo, _ := reference.WithName("test.example.com/repo/tags/list") + tagsList := []string{"tag1", "tag2", "funtag"} + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + body, err := json.Marshal(map[string]interface{}{ + "name": "test.example.com/repo/tags/list", + "tags": []string{tagsList[i]}, + }) + if err != nil { + t.Fatal(err) + } + queryParams := make(map[string][]string) + if i > 0 { + queryParams["n"] = []string{"1"} + queryParams["last"] = []string{tagsList[i-1]} + } + + // Test both relative and absolute links. + relativeLink := "/v2/" + repo.Name() + "/tags/list?n=1&last=" + tagsList[i] + var link string + switch i { + case 0: + link = relativeLink + case len(tagsList) - 1: + link = "" + default: + link = s.URL + relativeLink + } + + headers := http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(body))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }) + if link != "" { + headers.Set("Link", fmt.Sprintf(`<%s>; rel="next"`, link)) + } + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + QueryParams: queryParams, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: body, + Headers: headers, + }, + }) + } + + s.Config.Handler = testutil.NewHandler(m) + + r, err := NewRepository(repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(tags, err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } +} + +func TestManifestUnauthorized(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo") + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusUnauthorized, + Body: []byte("garbage"), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = ms.Get(ctx, dgst) + if err == nil { + t.Fatal("Expected error fetching manifest") + } + v2Err, ok := err.(errcode.Error) + if !ok { + t.Fatalf("Unexpected error type: %#v", err) + } + if v2Err.Code != errcode.ErrorCodeUnauthorized { + t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) + } + if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) + } +} + +func TestCatalog(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=5", + []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 5) + + r, err := NewRegistry(e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 3 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestCatalogInParts(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=2", + []byte("{\"repositories\":[\"bar\", \"baz\"]}"), + "", &m) + addTestCatalog( + "/v2/_catalog?last=baz&n=2", + []byte("{\"repositories\":[\"foo\"]}"), + "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 2) + + r, err := NewRegistry(e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != nil { + t.Fatal(err) + } + + if numFilled != 2 { + t.Fatalf("Got wrong number of repos") + } + + numFilled, err = r.Repositories(ctx, entries, "baz") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 1 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestSanitizeLocation(t *testing.T) { + for _, testcase := range []struct { + description string + location string + source string + expected string + err error + }{ + { + description: "ensure relative location correctly resolved", + location: "/v2/foo/baasdf", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf", + }, + { + description: "ensure parameters are preserved", + location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + }, + { + description: "ensure new hostname overidden", + location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + source: "http://blahalaja.com/v1", + expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + s, err := sanitizeLocation(testcase.location, testcase.source) + if err != testcase.err { + if testcase.err != nil { + fatalf("expected error: %v != %v", err, testcase) + } else { + fatalf("unexpected error sanitizing: %v", err) + } + } + + if s != testcase.expected { + fatalf("bad sanitize: %q != %q", s, testcase.expected) + } + } +} diff --git a/registry/client/transport/http_reader.go b/registry/client/transport/http_reader.go new file mode 100644 index 00000000..e5ff09d7 --- /dev/null +++ b/registry/client/transport/http_reader.go @@ -0,0 +1,251 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case os.SEEK_CUR: + newOffset += offset + case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case os.SEEK_SET: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/registry/client/transport/transport.go b/registry/client/transport/transport.go new file mode 100644 index 00000000..30e45fab --- /dev/null +++ b/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/registry/doc.go b/registry/doc.go new file mode 100644 index 00000000..a1ba7f3a --- /dev/null +++ b/registry/doc.go @@ -0,0 +1,2 @@ +// Package registry provides the main entrypoints for running a registry. +package registry diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go new file mode 100644 index 00000000..98dcaa71 --- /dev/null +++ b/registry/handlers/api_test.go @@ -0,0 +1,2625 @@ +package handlers + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + _ "github.com/docker/distribution/registry/storage/driver/testdriver" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" + "github.com/opencontainers/go-digest" +) + +var headerConfig = http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, +} + +const ( + // digestSha256EmptyTar is the canonical sha256 digest of empty data + digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + +// TestCatalogAPI tests the /v2/_catalog endpoint +func TestCatalogAPI(t *testing.T) { + chunkLen := 2 + env := newTestEnv(t, false) + defer env.Shutdown() + + values := url.Values{ + "last": []string{""}, + "n": []string{strconv.Itoa(chunkLen)}} + + catalogURL, err := env.builder.BuildCatalogURL(values) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + // ----------------------------------- + // try to get an empty catalog + resp, err := http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + var ctlg struct { + Repositories []string `json:"repositories"` + } + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // we haven't pushed anything to the registry yet + if len(ctlg.Repositories) != 0 { + t.Fatalf("repositories has unexpected values") + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + + // ----------------------------------- + // push something to the registry and try again + images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} + + for _, image := range images { + createRepository(env, t, image, "sometag") + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != chunkLen { + t.Fatalf("repositories has unexpected values") + } + + for _, image := range images[:chunkLen] { + if !contains(ctlg.Repositories, image) { + t.Fatalf("didn't find our repository '%s' in the catalog", image) + } + } + + link := resp.Header.Get("Link") + if link == "" { + t.Fatalf("repositories has less data than expected") + } + + newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) + + // ----------------------------------- + // get the last chunk of data + + catalogURL, err = env.builder.BuildCatalogURL(newValues) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != 1 { + t.Fatalf("repositories has unexpected values") + } + + lastImage := images[len(images)-1] + if !contains(ctlg.Repositories, lastImage) { + t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) + } + + link = resp.Header.Get("Link") + if link != "" { + t.Fatalf("catalog has unexpected data") + } +} + +func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { + re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") + matches := re.FindStringSubmatch(urlStr) + + if len(matches) != 2 { + t.Fatalf("Catalog link address response was incorrect") + } + linkURL, _ := url.Parse(matches[1]) + urlValues := linkURL.Query() + + if urlValues.Get("n") != strconv.Itoa(numEntries) { + t.Fatalf("Catalog link entry size is incorrect") + } + + if urlValues.Get("last") != last { + t.Fatal("Catalog link last entry is incorrect") + } + + return urlValues +} + +func contains(elems []string, e string) bool { + for _, elem := range elems { + if elem == e { + return true + } + } + return false +} + +func TestURLPrefix(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Prefix = "/test/" + config.HTTP.Headers = headerConfig + + env := newTestEnvWithConfig(t, &config) + defer env.Shutdown() + + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + parsed, _ := url.Parse(baseURL) + if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { + t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) +} + +type blobArgs struct { + imageName reference.Named + layerFile io.ReadSeeker + layerDigest digest.Digest +} + +func makeBlobArgs(t *testing.T) blobArgs { + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + args := blobArgs{ + layerFile: layerFile, + layerDigest: layerDigest, + } + args.imageName, _ = reference.WithName("foo/bar") + return args +} + +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { + deleteEnabled := false + env1 := newTestEnv(t, deleteEnabled) + defer env1.Shutdown() + args := makeBlobArgs(t) + testBlobAPI(t, env1, args) + + deleteEnabled = true + env2 := newTestEnv(t, deleteEnabled) + defer env2.Shutdown() + args = makeBlobArgs(t) + testBlobAPI(t, env2, args) + +} + +func TestBlobDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + + args := makeBlobArgs(t) + env = testBlobAPI(t, env, args) + testBlobDelete(t, env, args) +} + +func TestRelativeURL(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Headers = headerConfig + config.HTTP.RelativeURLs = false + env := newTestEnvWithConfig(t, &config) + defer env.Shutdown() + ref, _ := reference.WithName("foo/bar") + uploadURLBaseAbs, _ := startPushLayer(t, env, ref) + + u, err := url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + args := makeBlobArgs(t) + resp, err := doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } + + config.HTTP.RelativeURLs = true + args = makeBlobArgs(t) + uploadURLBaseRelative, _ := startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseRelative) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Absolute URL returned from blob upload chunk with relative configuration") + } + + // Start a new upload in absolute mode to get a valid base URL + config.HTTP.RelativeURLs = false + uploadURLBaseAbs, _ = startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + // Complete upload with relative URLs enabled to ensure the final location is relative + config.HTTP.RelativeURLs = true + resp, err = doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } +} + +func TestBlobDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + args := makeBlobArgs(t) + + imageName := args.imageName + layerDigest := args.layerDigest + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting when disabled: %v", err) + } + + checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) +} + +func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + // ----------------------------------- + // Test fetch for non-existent content + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) + + // ------------------------------------------ + // Start an upload, check the status then cancel + uploadURLBase, uploadUUID := startPushLayer(t, env, imageName) + + // A status check should work + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Range": []string{"0-0"}, + "Docker-Upload-UUID": []string{uploadUUID}, + }) + + req, err := http.NewRequest("DELETE", uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error creating delete request: %v", err) + } + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error sending delete request: %v", err) + } + + checkResponse(t, "deleting upload", resp, http.StatusNoContent) + + // A status check should result in 404 + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) + + // ----------------------------------------- + // Do layer push with an empty body and different digest + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error doing bad layer push: %v", err) + } + + checkResponse(t, "bad layer push", resp, http.StatusBadRequest) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error digesting empty buffer: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + + // This is a valid but empty tarfile! + emptyTar := bytes.Repeat([]byte("\x00"), 1024) + emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) + if err != nil { + t.Fatalf("unexpected error digesting empty tar: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) + + // ------------------------------------------ + // Now, actually do successful upload. + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------------------------ + // Now, push just a chunk + layerFile.Seek(0, 0) + + canonicalDigester := digest.Canonical.Digester() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + layerFile.Seek(0, 0) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) + finishUpload(t, env.builder, imageName, uploadURLBase, dgst) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // Verify the body + verifier := layerDigest.Verifier() + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + t.Fatalf("response body did not pass verification") + } + + // ---------------- + // Fetch the layer with an invalid digest + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) + resp, err = http.Get(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + + // Cache headers + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, + "Cache-Control": []string{"max-age=31536000"}, + }) + + // Matching etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Non-matching etag, gives 200 + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", "") + resp, err = http.DefaultClient.Do(req) + checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) + + // Missing tests: + // - Upload the same tar file under and different repository and + // ensure the content remains uncorrupted. + return env +} + +func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { + // Upload a layer + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf(err.Error()) + } + // --------------- + // Delete a layer + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Try and get it back + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) + + // Delete already deleted layer + resp, err = httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusNotFound) + + // ---------------- + // Attempt to delete a layer with an invalid digest + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) + resp, err = httpDelete(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) + + // ---------------- + // Reupload previously deleted blob + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + layerFile.Seek(0, os.SEEK_SET) + canonicalDigester := digest.Canonical.Digester() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + // ------------------------ + // Use a head request to see if it exists + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) +} + +func TestDeleteDisabled(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + + imageName, _ := reference.WithName("foo/bar") + // "build" our layer file + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) +} + +func TestDeleteReadOnly(t *testing.T) { + env := newTestEnv(t, true) + defer env.Shutdown() + + imageName, _ := reference.WithName("foo/bar") + // "build" our layer file + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + env.app.readOnly = true + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer in read-only mode", resp, http.StatusMethodNotAllowed) +} + +func TestStartPushReadOnly(t *testing.T) { + env := newTestEnv(t, true) + defer env.Shutdown() + env.app.readOnly = true + + imageName, _ := reference.WithName("foo/bar") + + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "starting push in read-only mode", resp, http.StatusMethodNotAllowed) +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + // defer resp.Body.Close() + return resp, err +} + +type manifestArgs struct { + imageName reference.Named + mediaType string + manifest distribution.Manifest + dgst digest.Digest +} + +func TestManifestAPI(t *testing.T) { + schema1Repo, _ := reference.WithName("foo/schema1") + schema2Repo, _ := reference.WithName("foo/schema2") + + deleteEnabled := false + env1 := newTestEnv(t, deleteEnabled) + defer env1.Shutdown() + testManifestAPISchema1(t, env1, schema1Repo) + schema2Args := testManifestAPISchema2(t, env1, schema2Repo) + testManifestAPIManifestList(t, env1, schema2Args) + + deleteEnabled = true + env2 := newTestEnv(t, deleteEnabled) + defer env2.Shutdown() + testManifestAPISchema1(t, env2, schema1Repo) + schema2Args = testManifestAPISchema2(t, env2, schema2Repo) + testManifestAPIManifestList(t, env2, schema2Args) +} + +// storageManifestErrDriverFactory implements the factory.StorageDriverFactory interface. +type storageManifestErrDriverFactory struct{} + +const ( + repositoryWithManifestNotFound = "manifesttagnotfound" + repositoryWithManifestInvalidPath = "manifestinvalidpath" + repositoryWithManifestBadLink = "manifestbadlink" + repositoryWithGenericStorageError = "genericstorageerr" +) + +func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + // Initialize the mock driver + var errGenericStorage = errors.New("generic storage error") + return &mockErrorDriver{ + returnErrs: []mockErrorMapping{ + { + pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithManifestNotFound), + content: nil, + err: storagedriver.PathNotFoundError{}, + }, + { + pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithManifestInvalidPath), + content: nil, + err: storagedriver.InvalidPathError{}, + }, + { + pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithManifestBadLink), + content: []byte("this is a bad sha"), + err: nil, + }, + { + pathMatch: fmt.Sprintf("%s/_manifests/tags", repositoryWithGenericStorageError), + content: nil, + err: errGenericStorage, + }, + }, + }, nil +} + +type mockErrorMapping struct { + pathMatch string + content []byte + err error +} + +// mockErrorDriver implements StorageDriver to force storage error on manifest request +type mockErrorDriver struct { + storagedriver.StorageDriver + returnErrs []mockErrorMapping +} + +func (dr *mockErrorDriver) GetContent(ctx context.Context, path string) ([]byte, error) { + for _, returns := range dr.returnErrs { + if strings.Contains(path, returns.pathMatch) { + return returns.content, returns.err + } + } + return nil, errors.New("Unknown storage error") +} + +func TestGetManifestWithStorageError(t *testing.T) { + factory.Register("storagemanifesterror", &storageManifestErrDriverFactory{}) + config := configuration.Configuration{ + Storage: configuration.Storage{ + "storagemanifesterror": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Headers = headerConfig + env1 := newTestEnvWithConfig(t, &config) + defer env1.Shutdown() + + repo, _ := reference.WithName(repositoryWithManifestNotFound) + testManifestWithStorageError(t, env1, repo, http.StatusNotFound, v2.ErrorCodeManifestUnknown) + + repo, _ = reference.WithName(repositoryWithGenericStorageError) + testManifestWithStorageError(t, env1, repo, http.StatusInternalServerError, errcode.ErrorCodeUnknown) + + repo, _ = reference.WithName(repositoryWithManifestInvalidPath) + testManifestWithStorageError(t, env1, repo, http.StatusInternalServerError, errcode.ErrorCodeUnknown) + + repo, _ = reference.WithName(repositoryWithManifestBadLink) + testManifestWithStorageError(t, env1, repo, http.StatusInternalServerError, errcode.ErrorCodeUnknown) +} + +func TestManifestDelete(t *testing.T) { + schema1Repo, _ := reference.WithName("foo/schema1") + schema2Repo, _ := reference.WithName("foo/schema2") + + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + schema1Args := testManifestAPISchema1(t, env, schema1Repo) + testManifestDelete(t, env, schema1Args) + schema2Args := testManifestAPISchema2(t, env, schema2Repo) + testManifestDelete(t, env, schema2Args) +} + +func TestManifestDeleteDisabled(t *testing.T) { + schema1Repo, _ := reference.WithName("foo/schema1") + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + testManifestDeleteDisabled(t, env, schema1Repo) +} + +func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { + ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar) + manifestURL, err := env.builder.BuildManifestURL(ref) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + resp, err := httpDelete(manifestURL) + if err != nil { + t.Fatalf("unexpected error deleting manifest %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) +} + +func testManifestWithStorageError(t *testing.T, env *testEnv, imageName reference.Named, expectedStatusCode int, expectedErrorCode errcode.ErrorCode) { + tag := "latest" + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode) + return +} + +func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "thetag" + args := manifestArgs{imageName: imageName} + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName.Name(), + Tag: tag, + FSLayers: []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + { + V1Compatibility: "", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestInvalid: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // sign the manifest and still get some interesting errors. + sm, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) + defer resp.Body.Close() + checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, + v2.ErrorCodeManifestBlobUnknown, v2.ErrorCodeDigestInvalid) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + v2.ErrorCodeDigestInvalid: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get an unverified manifest + // error. + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + dgst := digest.FromBytes(signedManifest.Canonical) + args.manifest = signedManifest + args.dgst = dgst + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) + checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema1.SignedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema1.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { + t.Fatalf("manifests do not match") + } + + // check signature was roundtripped + signatures, err := fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) + } + + // Re-sign, push and pull the same digest + sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) + if err != nil { + t.Fatal(err) + + } + + // Re-push with a few different Content-Types. The official schema1 + // content type should work, as should application/json with/without a + // charset. + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "re-fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // check only 1 signature is returned + signatures, err = fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // Attempt to put a manifest with mismatching FSLayer and History array cardinalities + + unsignedManifest.History = append(unsignedManifest.History, schema1.History{ + V1Compatibility: "", + }) + invalidSigned, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest") + } + + resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) + checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) + + return args +} + +func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "schema2tag" + args := manifestArgs{ + imageName: imageName, + mediaType: schema2.MediaTypeManifest, + } + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push manifest with missing config and missing layers + manifest := &schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeImageConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeLayer, + }, + { + Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", + Size: 6863, + MediaType: schema2.MediaTypeLayer, + }, + }, + } + + resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 3, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push a config, and reference it in the manifest + sampleConfig := []byte(`{ + "architecture": "amd64", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + } + ], + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + } + }`) + sampleConfigDigest := digest.FromBytes(sampleConfig) + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) + manifest.Config.Digest = sampleConfigDigest + manifest.Config.Size = int64(len(sampleConfig)) + + // The manifest should still be invalid, because its layer doesn't exist + resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range manifest.Layers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + manifest.Layers[i].Digest = dgst + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the manifest with all layers pushed. + deserializedManifest, err := schema2.FromStruct(*manifest) + if err != nil { + t.Fatalf("could not create DeserializedManifest: %v", err) + } + _, canonical, err := deserializedManifest.Payload() + if err != nil { + t.Fatalf("could not get manifest payload: %v", err) + } + dgst := digest.FromBytes(canonical) + args.dgst = dgst + args.manifest = deserializedManifest + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema2.DeserializedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema2.DeserializedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + for i := range manifest.Layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. + + return args +} + +func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + tag := "manifestlisttag" + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // -------------------------------- + // Attempt to push manifest list that refers to an unknown manifest + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "amd64", + OS: "linux", + }, + }, + }, + } + + resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) + defer resp.Body.Close() + checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // ------------------- + // Push a manifest list that references an actual manifest + manifestList.Manifests[0].Digest = args.dgst + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + t.Fatalf("could not create DeserializedManifestList: %v", err) + } + _, canonical, err := deserializedManifestList.Payload() + if err != nil { + t.Fatalf("could not get manifest list payload: %v", err) + } + dgst := digest.FromBytes(canonical) + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + // multiple headers in mixed list format to ensure we parse correctly server-side + req.Header.Set("Accept", fmt.Sprintf(` %s ; q=0.8 , %s ; q=0.5 `, manifestlist.MediaTypeManifestList, schema1.MediaTypeSignedManifest)) + req.Header.Add("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest list: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestList manifestlist.DeserializedManifestList + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifestList); err != nil { + t.Fatalf("error decoding fetched manifest list: %v", err) + } + + _, fetchedCanonical, err := fetchedManifestList.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifest lists do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest list by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestListByDigest manifestlist.DeserializedManifestList + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestListByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + layers := args.manifest.(*schema2.DeserializedManifest).Layers + for i := range layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + manifest := args.manifest + + ref, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(ref) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) + checkResponse(t, "putting manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + unknownRef, _ := reference.WithDigest(imageName, unknownDigest) + unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // -------------------- + // Upload manifest by tag + tag := "atag" + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := env.builder.BuildManifestURL(tagRef) + resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) + checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + var tagsResponse tagsAPIResponse + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // --------------- + // Delete by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // Ensure that the tag is not listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 0 { + t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) + } + +} + +type testEnv struct { + pk libtrust.PrivateKey + ctx context.Context + config configuration.Configuration + app *App + server *httptest.Server + builder *v2.URLBuilder +} + +func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Proxy: configuration.Proxy{ + RemoteURL: "http://example.com", + }, + } + + return newTestEnvWithConfig(t, &config) + +} + +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + + config.HTTP.Headers = headerConfig + + return newTestEnvWithConfig(t, &config) +} + +func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { + ctx := context.Background() + + app := NewApp(ctx, config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + return &testEnv{ + pk: pk, + ctx: ctx, + config: *config, + app: app, + server: server, + builder: builder, + } +} + +func (t *testEnv) Shutdown() { + t.server.CloseClientConnections() + t.server.Close() +} + +func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { + var body []byte + + switch m := v.(type) { + case *schema1.SignedManifest: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + case *manifestlist.DeserializedManifestList: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + default: + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, env *testEnv, name reference.Named) (location string, uuid string) { + layerUploadURL, err := env.builder.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + u, err := url.Parse(layerUploadURL) + if err != nil { + t.Fatalf("error parsing layer upload URL: %v", err) + } + + base, err := url.Parse(env.server.URL) + if err != nil { + t.Fatalf("error parsing server URL: %v", err) + } + + layerUploadURL = base.ResolveReference(u).String() + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) + + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatalf("error parsing location header: %v", err) + } + + uuid = path.Base(u.Path) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + "Docker-Upload-UUID": []string{uuid}, + }) + + return resp.Header.Get("Location"), uuid +} + +// doPushLayer pushes the layer content returning the url on success returning +// the response. If you're only expecting a successful response, use pushLayer. +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + "digest": []string{dgst.String()}, + }.Encode() + + uploadURL := u.String() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, body) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + return http.DefaultClient.Do(req) +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { + digester := digest.Canonical.Digester() + + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + sha256Dgst := digester.Digest() + + ref, _ := reference.WithDigest(name, sha256Dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{sha256Dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + ref, _ := reference.WithDigest(name, dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + }.Encode() + + uploadURL := u.String() + + digester := digest.Canonical.Digester() + + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + + return resp, digester.Digest(), err +} + +func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { + resp, dgst, err := doPushChunk(t, uploadURLBase, body) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting chunk", resp, http.StatusAccepted) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + checkHeaders(t, resp, http.Header{ + "Range": []string{fmt.Sprintf("0-%d", length-1)}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location"), dgst +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } + + // We expect the headers included in the configuration, unless the + // status code is 405 (Method Not Allowed), which means the handler + // doesn't even get called. + if resp.StatusCode != 405 && !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { + t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +// checkBodyHasErrorCodes ensures the body is an error body and has the +// expected error codes, returning the error structure, the json slice and a +// count of the errors by code. +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading body %s: %v", msg, err) + } + + var errs errcode.Errors + if err := json.Unmarshal(p, &errs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(errs) == 0 { + t.Fatalf("expected errors in response") + } + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + + expected := map[errcode.ErrorCode]struct{}{} + counts := map[errcode.ErrorCode]int{} + + // Initialize map with zeros for expected + for _, code := range errorCodes { + expected[code] = struct{}{} + counts[code] = 0 + } + + for _, e := range errs { + err, ok := e.(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", e) + } + if _, ok := expected[err.ErrorCode()]; !ok { + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) + } + counts[err.ErrorCode()]++ + } + + // Ensure that counts of expected errors were all non-zero + for code := range expected { + if counts[code] == 0 { + t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) + } + } + + return errs, p, counts +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { + continue + } + } + + for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { + if hv != v { + t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) + } + } + } + } +} + +func checkErr(t *testing.T, err error, msg string) { + if err != nil { + t.Fatalf("unexpected error %s: %v", msg, err) + } +} + +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { + imageNameRef, err := reference.WithName(imageName) + if err != nil { + t.Fatalf("unable to parse reference: %v", err) + } + + unsignedManifest := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + }, + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + uploadURLBase, _ := startPushLayer(t, env, imageNameRef) + pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) + } + + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + dgst := digest.FromBytes(signedManifest.Canonical) + + // Create this repository by tag to ensure the tag mapping is made in the registry + tagRef, _ := reference.WithTag(imageNameRef, tag) + manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + digestRef, _ := reference.WithDigest(imageNameRef, dgst) + location, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building location URL") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{location}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + return dgst +} + +// Test mutation operations on a registry configured as a cache. Ensure that they return +// appropriate errors. +func TestRegistryAsCacheMutationAPIs(t *testing.T) { + deleteEnabled := true + env := newTestEnvMirror(t, deleteEnabled) + defer env.Shutdown() + + imageName, _ := reference.WithName("foo/bar") + tag := "latest" + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + // Manifest upload + m := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName.Name(), + Tag: tag, + FSLayers: []schema1.FSLayer{}, + History: []schema1.History{}, + } + + sm, err := schema1.Sign(m, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) + checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Manifest Delete + resp, err = httpDelete(manifestURL) + checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob upload initialization + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob Delete + ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar) + blobURL, err := env.builder.BuildBlobURL(ref) + resp, err = httpDelete(blobURL) + checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + +} + +// TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter +// that implements http.ContextNotifier. +func TestCheckContextNotifier(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + + // Register a new endpoint for testing + env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { + return handlers.MethodHandler{ + "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, ok := w.(http.CloseNotifier); !ok { + t.Fatal("could not cast ResponseWriter to CloseNotifier") + } + w.WriteHeader(200) + }), + } + })) + + resp, err := http.Get(env.server.URL + "/unittest/reponame/") + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) + } +} + +func TestProxyManifestGetByTag(t *testing.T) { + truthConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + truthConfig.HTTP.Headers = headerConfig + + imageName, _ := reference.WithName("foo/bar") + tag := "latest" + + truthEnv := newTestEnvWithConfig(t, &truthConfig) + defer truthEnv.Shutdown() + // create a repository in the truth registry + dgst := createRepository(truthEnv, t, imageName.Name(), tag) + + proxyConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + }, + Proxy: configuration.Proxy{ + RemoteURL: truthEnv.server.URL, + }, + } + proxyConfig.HTTP.Headers = headerConfig + + proxyEnv := newTestEnvWithConfig(t, &proxyConfig) + defer proxyEnv.Shutdown() + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp, err := http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest from proxy by digest") + defer resp.Body.Close() + + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // Create another manifest in the remote with the same image/tag pair + newDigest := createRepository(truthEnv, t, imageName.Name(), tag) + if dgst == newDigest { + t.Fatalf("non-random test data") + } + + // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{newDigest.String()}, + }) +} diff --git a/registry/handlers/app.go b/registry/handlers/app.go new file mode 100644 index 00000000..a40a4df3 --- /dev/null +++ b/registry/handlers/app.go @@ -0,0 +1,1065 @@ +package handlers + +import ( + "context" + cryptorand "crypto/rand" + "expvar" + "fmt" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/health/checks" + prometheus "github.com/docker/distribution/metrics" + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/distribution/version" + "github.com/docker/go-metrics" + "github.com/docker/libtrust" + "github.com/garyburd/redigo/redis" + "github.com/gorilla/mux" + "github.com/sirupsen/logrus" +) + +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + +// defaultCheckInterval is the default time in between health checks +const defaultCheckInterval = 10 * time.Second + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + context.Context + + Config *configuration.Configuration + + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. + accessController auth.AccessController // main access controller for application + + // httpHost is a parsed representation of the http.host parameter from + // the configuration. Only the Scheme and Host fields are used. + httpHost url.URL + + // events contains notification related configuration. + events struct { + sink notifications.Sink + source notifications.SourceRecord + } + + redis *redis.Pool + + // trustKey is a deprecated key used to sign manifests converted to + // schema1 for backward compatibility. It should not be used for any + // other purposes. + trustKey libtrust.PrivateKey + + // isCache is true if this registry is configured as a pull through cache + isCache bool + + // readOnly is true if the registry is in a read-only maintenance mode + readOnly bool +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(ctx context.Context, config *configuration.Configuration) *App { + app := &App{ + Config: config, + Context: ctx, + router: v2.RouterWithPrefix(config.HTTP.Prefix), + isCache: config.Proxy.RemoteURL != "", + } + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, manifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + + // override the storage driver's UA string for registry outbound HTTP requests + storageParams := config.Storage.Parameters() + if storageParams == nil { + storageParams = make(configuration.Parameters) + } + storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version()) + + var err error + app.driver, err = factory.Create(config.Storage.Type(), storageParams) + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + purgeConfig := uploadPurgeDefaultConfig() + if mc, ok := config.Storage["maintenance"]; ok { + if v, ok := mc["uploadpurging"]; ok { + purgeConfig, ok = v.(map[interface{}]interface{}) + if !ok { + panic("uploadpurging config key must contain additional keys") + } + } + if v, ok := mc["readonly"]; ok { + readOnly, ok := v.(map[interface{}]interface{}) + if !ok { + panic("readonly config key must contain additional keys") + } + if readOnlyEnabled, ok := readOnly["enabled"]; ok { + app.readOnly, ok = readOnlyEnabled.(bool) + if !ok { + panic("readonly's enabled config key must have a boolean value") + } + } + } + } + + startUploadPurger(app, app.driver, dcontext.GetLogger(app), purgeConfig) + + app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) + if err != nil { + panic(err) + } + + app.configureSecret(config) + app.configureEvents(config) + app.configureRedis(config) + app.configureLogHook(config) + + options := registrymiddleware.GetRegistryOptions() + if config.Compatibility.Schema1.TrustKey != "" { + app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) + if err != nil { + panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err)) + } + } else { + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + } + + options = append(options, storage.Schema1SigningKey(app.trustKey)) + + if config.HTTP.Host != "" { + u, err := url.Parse(config.HTTP.Host) + if err != nil { + panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) + } + app.httpHost = *u + } + + if app.isCache { + options = append(options, storage.DisableDigestResumption) + } + + // configure deletion + if d, ok := config.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok := e.(bool); ok && deleteEnabled { + options = append(options, storage.EnableDelete) + } + } + } + + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := config.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + } + if redirectDisabled { + dcontext.GetLogger(app).Infof("backend redirection disabled") + } else { + options = append(options, storage.EnableRedirect) + } + + if !config.Validation.Enabled { + config.Validation.Enabled = !config.Validation.Disabled + } + + // configure validation + if config.Validation.Enabled { + if len(config.Validation.Manifests.URLs.Allow) == 0 && len(config.Validation.Manifests.URLs.Deny) == 0 { + // If Allow and Deny are empty, allow nothing. + options = append(options, storage.ManifestURLsAllowRegexp(regexp.MustCompile("^$"))) + } else { + if len(config.Validation.Manifests.URLs.Allow) > 0 { + for i, s := range config.Validation.Manifests.URLs.Allow { + // Validate via compilation. + if _, err := regexp.Compile(s); err != nil { + panic(fmt.Sprintf("validation.manifests.urls.allow: %s", err)) + } + // Wrap with non-capturing group. + config.Validation.Manifests.URLs.Allow[i] = fmt.Sprintf("(?:%s)", s) + } + re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Allow, "|")) + options = append(options, storage.ManifestURLsAllowRegexp(re)) + } + if len(config.Validation.Manifests.URLs.Deny) > 0 { + for i, s := range config.Validation.Manifests.URLs.Deny { + // Validate via compilation. + if _, err := regexp.Compile(s); err != nil { + panic(fmt.Sprintf("validation.manifests.urls.deny: %s", err)) + } + // Wrap with non-capturing group. + config.Validation.Manifests.URLs.Deny[i] = fmt.Sprintf("(?:%s)", s) + } + re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Deny, "|")) + options = append(options, storage.ManifestURLsDenyRegexp(re)) + } + } + } + + // configure storage caches + if cc, ok := config.Storage["cache"]; ok { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + dcontext.GetLogger(app).Infof("using redis blob descriptor cache") + case "inmemory": + cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + dcontext.GetLogger(app).Infof("using inmemory blob descriptor cache") + default: + if v != "" { + dcontext.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + } + + app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"]) + if err != nil { + panic(err) + } + + authType := config.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + dcontext.GetLogger(app).Debugf("configured %q access controller", authType) + } + + // configure as a pull through cache + if config.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + dcontext.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) + } + + return app +} + +// RegisterHealthChecks is an awful hack to defer health check registration +// control to callers. This should only ever be called once per registry +// process, typically in a main function. The correct way would be register +// health checks outside of app, since multiple apps may exist in the same +// process. Because the configuration and app are tightly coupled, +// implementing this properly will require a refactor. This method may panic +// if called twice in the same process. +func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { + if len(healthRegistries) > 1 { + panic("RegisterHealthChecks called with more than one registry") + } + healthRegistry := health.DefaultRegistry + if len(healthRegistries) == 1 { + healthRegistry = healthRegistries[0] + } + + if app.Config.Health.StorageDriver.Enabled { + interval := app.Config.Health.StorageDriver.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + storageDriverCheck := func() error { + _, err := app.driver.Stat(app, "/") // "/" should always exist + if _, ok := err.(storagedriver.PathNotFoundError); ok { + err = nil // pass this through, backend is responding, but this path doesn't exist. + } + return err + } + + if app.Config.Health.StorageDriver.Threshold != 0 { + healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) + } else { + healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) + } + } + + for _, fileChecker := range app.Config.Health.FileCheckers { + interval := fileChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + dcontext.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) + } + + for _, httpChecker := range app.Config.Health.HTTPCheckers { + interval := httpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + statusCode := httpChecker.StatusCode + if statusCode == 0 { + statusCode = 200 + } + + checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) + + if httpChecker.Threshold != 0 { + dcontext.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) + healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) + } else { + dcontext.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) + healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) + } + } + + for _, tcpChecker := range app.Config.Health.TCPCheckers { + interval := tcpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) + + if tcpChecker.Threshold != 0 { + dcontext.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) + } else { + dcontext.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) + } + } +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + handler := app.dispatcher(dispatch) + + // Chain the handler with prometheus instrumented handler + if app.Config.HTTP.Debug.Prometheus.Enabled { + namespace := metrics.NewNamespace(prometheus.NamespacePrefix, "http", nil) + httpMetrics := namespace.NewDefaultHttpMetrics(strings.Replace(routeName, "-", "_", -1)) + metrics.Register(namespace) + handler = metrics.InstrumentHandler(httpMetrics, handler) + } + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(handler) +} + +// configureEvents prepares the event sink for action. +func (app *App) configureEvents(configuration *configuration.Configuration) { + // Configure all of the endpoint sinks. + var sinks []notifications.Sink + for _, endpoint := range configuration.Notifications.Endpoints { + if endpoint.Disabled { + dcontext.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) + continue + } + + dcontext.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ + Timeout: endpoint.Timeout, + Threshold: endpoint.Threshold, + Backoff: endpoint.Backoff, + Headers: endpoint.Headers, + IgnoredMediaTypes: endpoint.IgnoredMediaTypes, + Ignore: endpoint.Ignore, + }) + + sinks = append(sinks, endpoint) + } + + // NOTE(stevvooe): Moving to a new queuing implementation is as easy as + // replacing broadcaster with a rabbitmq implementation. It's recommended + // that the registry instances also act as the workers to keep deployment + // simple. + app.events.sink = notifications.NewBroadcaster(sinks...) + + // Populate registry event source + hostname, err := os.Hostname() + if err != nil { + hostname = configuration.HTTP.Addr + } else { + // try to pick the port off the config + _, port, err := net.SplitHostPort(configuration.HTTP.Addr) + if err == nil { + hostname = net.JoinHostPort(hostname, port) + } + } + + app.events.source = notifications.SourceRecord{ + Addr: hostname, + InstanceID: dcontext.GetStringValue(app, "instance.id"), + } +} + +type redisStartAtKey struct{} + +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + dcontext.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, redisStartAtKey{}, time.Now()) + + done := func(err error) { + logger := dcontext.GetLoggerWithField(ctx, "redis.connect.duration", + dcontext.Since(ctx, redisStartAtKey{})) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + dcontext.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) +} + +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + entry, ok := dcontext.GetLogger(app).(*logrus.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + dcontext.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() // ensure that request body is always closed. + + // Prepare the context with our own little decorations. + ctx := r.Context() + ctx = dcontext.WithRequest(ctx, r) + ctx, w = dcontext.WithResponseWriter(ctx, w) + ctx = dcontext.WithLogger(ctx, dcontext.GetRequestLogger(ctx)) + r = r.WithContext(ctx) + + defer func() { + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + dcontext.GetResponseLogger(r.Context()).Infof("response completed") + } + }() + + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + app.router.ServeHTTP(w, r) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for headerName, headerValues := range app.Config.HTTP.Headers { + for _, value := range headerValues { + w.Header().Add(headerName, value) + } + } + + context := app.context(w, r) + + if err := app.authorized(w, r, context); err != nil { + dcontext.GetLogger(context).Warnf("error authorizing context: %v", err) + return + } + + // Add username to request logging + context.Context = dcontext.WithLogger(context.Context, dcontext.GetLogger(context.Context, auth.UserNameKey)) + + // sync up context on the request. + r = r.WithContext(context) + + if app.nameRequired(r) { + nameRef, err := reference.WithName(getName(context)) + if err != nil { + dcontext.GetLogger(context).Errorf("error parsing reference from context: %v", err) + context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ + Name: getName(context), + Reason: err, + }) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + repository, err := app.registry.Repository(context, nameRef) + + if err != nil { + dcontext.GetLogger(context).Errorf("error resolving repository: %v", err) + + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) + case distribution.ErrRepositoryNameInvalid: + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case errcode.Error: + context.Errors = append(context.Errors, err) + } + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + + // assign and decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + repository, + app.eventBridge(context, r)) + + context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) + if err != nil { + dcontext.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + } + + dispatch(context, r).ServeHTTP(w, r) + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if err := errcode.ServeJSON(w, context.Errors); err != nil { + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + + app.logError(context, context.Errors) + } + }) +} + +type errCodeKey struct{} + +func (errCodeKey) String() string { return "err.code" } + +type errMessageKey struct{} + +func (errMessageKey) String() string { return "err.message" } + +type errDetailKey struct{} + +func (errDetailKey) String() string { return "err.detail" } + +func (app *App) logError(ctx context.Context, errors errcode.Errors) { + for _, e1 := range errors { + var c context.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = context.WithValue(ctx, errCodeKey{}, e.Code) + c = context.WithValue(c, errMessageKey{}, e.Code.Message()) + c = context.WithValue(c, errDetailKey{}, e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = context.WithValue(ctx, errCodeKey{}, e) + c = context.WithValue(c, errMessageKey{}, e.Message()) + default: + // just normal go 'error' + c = context.WithValue(ctx, errCodeKey{}, errcode.ErrorCodeUnknown) + c = context.WithValue(c, errMessageKey{}, e1.Error()) + } + + c = dcontext.WithLogger(c, dcontext.GetLogger(c, + errCodeKey{}, + errMessageKey{}, + errDetailKey{})) + dcontext.GetResponseLogger(c).Errorf("response completed with error") + } +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { + ctx := r.Context() + ctx = dcontext.WithVars(ctx, r) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, + "vars.name", + "vars.reference", + "vars.digest", + "vars.uuid")) + + context := &Context{ + App: app, + Context: ctx, + } + + if app.httpHost.Scheme != "" && app.httpHost.Host != "" { + // A "host" item in the configuration takes precedence over + // X-Forwarded-Proto and X-Forwarded-Host headers, and the + // hostname in the request. + context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false) + } else { + context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs) + } + + return context +} + +// authorized checks if the request can proceed with access to the requested +// repository. If it succeeds, the context may access the requested +// repository. An error will be returned if access is not available. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + dcontext.GetLogger(context).Debug("authorizing request") + repo := getName(context) + + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if repo != "" { + accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + if fromRepo := r.FormValue("from"); fromRepo != "" { + // mounting a blob from one repository to another requires pull (GET) + // access to the source repository. + accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) + } + } else { + // Only allow the name not to be set on the base route. + if app.nameRequired(r) { + // For this to be properly secured, repo must always be set for a + // resource that may make a modification. The only condition under + // which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making + // that mistake elsewhere in the code, allowing any operation to + // proceed. + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return fmt.Errorf("forbidden: no repository name") + } + accessRecords = appendCatalogAccessRecord(accessRecords, r) + } + + ctx, err := app.accessController.Authorized(context.Context, accessRecords...) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + // Add the appropriate WWW-Auth header + err.SetHeaders(w) + + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + dcontext.GetLogger(context).Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + dcontext.GetLogger(ctx).Info("authorized request") + // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context + // should be replaced by another, rather than replacing the context on a + // mutable object. + context.Context = ctx + return nil +} + +// eventBridge returns a bridge for the current request, configured with the +// correct actor and source. +func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { + actor := notifications.ActorRecord{ + Name: getUserName(ctx, r), + } + request := notifications.NewRequestRecord(dcontext.GetRequestID(ctx), r) + + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) +} + +// nameRequired returns true if the route requires a name. +func (app *App) nameRequired(r *http.Request) bool { + route := mux.CurrentRoute(r) + if route == nil { + return true + } + routeName := route.GetName() + return routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} + +// appendAccessRecords checks the method and adds the appropriate Access records to the records list. +func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { + resource := auth.Resource{ + Type: "repository", + Name: repo, + } + + switch method { + case "GET", "HEAD": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + records = append(records, + auth.Access{ + Resource: resource, + Action: "delete", + }) + } + return records +} + +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + +// applyRegistryMiddleware wraps a registry instance with the configured middlewares +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { + for _, mw := range middlewares { + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) + if err != nil { + return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) + } + registry = rmw + } + return registry, nil + +} + +// applyRepoMiddleware wraps a repository with the configured middlewares +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { + for _, mw := range middlewares { + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) + if err != nil { + return nil, err + } + repository = rmw + } + return repository, nil +} + +// applyStorageMiddleware wraps a storage driver with the configured middlewares +func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { + for _, mw := range middlewares { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) + if err != nil { + return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) + } + driver = smw + } + return driver, nil +} + +// uploadPurgeDefaultConfig provides a default configuration for upload +// purging to be used in the absence of configuration in the +// confifuration file +func uploadPurgeDefaultConfig() map[interface{}]interface{} { + config := map[interface{}]interface{}{} + config["enabled"] = true + config["age"] = "168h" + config["interval"] = "24h" + config["dryrun"] = false + return config +} + +func badPurgeUploadConfig(reason string) { + panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) +} + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log dcontext.Logger, config map[interface{}]interface{}) { + if config["enabled"] == false { + return + } + + var purgeAgeDuration time.Duration + var err error + purgeAge, ok := config["age"] + if ok { + ageStr, ok := purgeAge.(string) + if !ok { + badPurgeUploadConfig("age is not a string") + } + purgeAgeDuration, err = time.ParseDuration(ageStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) + } + } else { + badPurgeUploadConfig("age missing") + } + + var intervalDuration time.Duration + interval, ok := config["interval"] + if ok { + intervalStr, ok := interval.(string) + if !ok { + badPurgeUploadConfig("interval is not a string") + } + + intervalDuration, err = time.ParseDuration(intervalStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) + } + } else { + badPurgeUploadConfig("interval missing") + } + + var dryRunBool bool + dryRun, ok := config["dryrun"] + if ok { + dryRunBool, ok = dryRun.(bool) + if !ok { + badPurgeUploadConfig("cannot parse dryrun") + } + } else { + badPurgeUploadConfig("dryrun missing") + } + + go func() { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + log.Infof("Starting upload purge in %s", intervalDuration) + time.Sleep(intervalDuration) + } + }() +} diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go new file mode 100644 index 00000000..12c0b61c --- /dev/null +++ b/registry/handlers/app_test.go @@ -0,0 +1,279 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/silly" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/testdriver" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + driver := testdriver.New() + ctx := context.Background() + registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + app := &App{ + Config: &configuration.Configuration{}, + Context: ctx, + router: v2.Router(), + driver: driver, + registry: registry, + } + server := httptest.NewServer(app) + defer server.Close() + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Repository.Named().Name() != getName(ctx) { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.Value(expectedK) != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.Value("vars").(map[string]string) { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "foo/bar", + "reference", "sometag", + }, + }, + { + endpoint: v2.RouteNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUpload, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUploadChunk, + vars: []string{ + "name", "foo/bar", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + ctx := context.Background() + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": nil, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(ctx, &config) + + server := httptest.NewServer(app) + defer server.Close() + builder, err := v2.NewURLBuilderFromString(server.URL, false) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { + t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) + } + + var errs errcode.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + err2, ok := errs[0].(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", errs[0]) + } + if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) + } +} + +// Test the access record accumulator +func TestAppendAccessRecords(t *testing.T) { + repo := "testRepo" + + expectedResource := auth.Resource{ + Type: "repository", + Name: repo, + } + + expectedPullRecord := auth.Access{ + Resource: expectedResource, + Action: "pull", + } + expectedPushRecord := auth.Access{ + Resource: expectedResource, + Action: "push", + } + expectedDeleteRecord := auth.Access{ + Resource: expectedResource, + Action: "delete", + } + + records := []auth.Access{} + result := appendAccessRecords(records, "GET", repo) + expectedResult := []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "HEAD", repo) + expectedResult = []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "POST", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PUT", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PATCH", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "DELETE", repo) + expectedResult = []auth.Access{expectedDeleteRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + +} diff --git a/registry/handlers/basicauth.go b/registry/handlers/basicauth.go new file mode 100644 index 00000000..8727a3cd --- /dev/null +++ b/registry/handlers/basicauth.go @@ -0,0 +1,11 @@ +// +build go1.4 + +package handlers + +import ( + "net/http" +) + +func basicAuth(r *http.Request) (username, password string, ok bool) { + return r.BasicAuth() +} diff --git a/registry/handlers/basicauth_prego14.go b/registry/handlers/basicauth_prego14.go new file mode 100644 index 00000000..6cf10a25 --- /dev/null +++ b/registry/handlers/basicauth_prego14.go @@ -0,0 +1,41 @@ +// +build !go1.4 + +package handlers + +import ( + "encoding/base64" + "net/http" + "strings" +) + +// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we +// can compile on go1.3 and earlier. + +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} diff --git a/registry/handlers/blob.go b/registry/handlers/blob.go new file mode 100644 index 00000000..5c31cc76 --- /dev/null +++ b/registry/handlers/blob.go @@ -0,0 +1,99 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" + "github.com/opencontainers/go-digest" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + } + + if !ctx.readOnly { + mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) + } + + return mhandler +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) + } else { + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrUnsupported: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) + return + case distribution.ErrBlobUnknown: + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + return + default: + bh.Errors = append(bh.Errors, err) + context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) + return + } + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go new file mode 100644 index 00000000..49ab1aaa --- /dev/null +++ b/registry/handlers/blobupload.go @@ -0,0 +1,368 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage" + "github.com/gorilla/handlers" + "github.com/opencontainers/go-digest" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := handlers.MethodHandler{ + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + } + + if !ctx.readOnly { + handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) + handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) + handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) + handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) + } + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(ctx).Infof("error resolving upload: %v", err) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Named().Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + dcontext.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + }) + } + buh.Upload = upload + + if size := upload.Size(); size != buh.State.Offset { + defer upload.Close() + dcontext.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } + return closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session, optionally mounting the blob from a separate repository. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + var options []distribution.BlobCreateOption + + fromRepo := r.FormValue("from") + mountDigest := r.FormValue("mount") + + if mountDigest != "" && fromRepo != "" { + opt, err := buh.createBlobMountOption(fromRepo, mountDigest) + if opt != nil && err == nil { + options = append(options, opt) + } + } + + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh, options...) + + if err != nil { + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + } else if err == distribution.ErrUnsupported { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + buh.Upload = upload + + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + if err := copyFullPayload(buh, w, r, buh.Upload, -1, "blob PATCH"); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) + return + } + + dgst, err := digest.Parse(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) + return + } + + if err := copyFullPayload(buh, w, r, buh.Upload, -1, "blob PUT"); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the mediatype. For now, we can let the backend take care + // of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + case errcode.Error: + buh.Errors = append(buh.Errors, err) + default: + switch err { + case distribution.ErrAccessDenied: + buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied) + case distribution.ErrUnsupported: + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + default: + dcontext.GetLogger(buh).Errorf("unknown error completing upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + dcontext.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + dcontext.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Named().Name() + buh.State.UUID = buh.Upload.ID() + buh.Upload.Close() + buh.State.Offset = buh.Upload.Size() + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + dcontext.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Named(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + dcontext.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := buh.Upload.Size() + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} + +// mountBlob attempts to mount a blob from another repository by its digest. If +// successful, the blob is linked into the blob store and 201 Created is +// returned with the canonical url of the blob. +func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { + dgst, err := digest.Parse(mountDigest) + if err != nil { + return nil, err + } + + ref, err := reference.WithName(fromRepo) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(ref, dgst) + if err != nil { + return nil, err + } + + return storage.WithMountFrom(canonical), nil +} + +// writeBlobCreatedHeaders writes the standard headers describing a newly +// created blob. A 201 Created is written as well as the canonical URL and +// blob digest. +func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { + ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest) + if err != nil { + return err + } + blobURL, err := buh.urlBuilder.BuildBlobURL(ref) + if err != nil { + return err + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) + return nil +} diff --git a/registry/handlers/catalog.go b/registry/handlers/catalog.go new file mode 100644 index 00000000..eca98468 --- /dev/null +++ b/registry/handlers/catalog.go @@ -0,0 +1,98 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/storage/driver" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + _, pathNotFound := err.(driver.PathNotFoundError) + + if err == io.EOF || pathNotFound { + moreEntries = false + } else if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos[0:filled], + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/registry/handlers/context.go b/registry/handlers/context.go new file mode 100644 index 00000000..e5d26768 --- /dev/null +++ b/registry/handlers/context.go @@ -0,0 +1,92 @@ +package handlers + +import ( + "context" + "fmt" + "net/http" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + "github.com/opencontainers/go-digest" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + context.Context + + // Repository is the repository for the current request. All requests + // should be scoped to a single repository. This field may be nil. + Repository distribution.Repository + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors errcode.Errors + + urlBuilder *v2.URLBuilder + + // TODO(stevvooe): The goal is too completely factor this context and + // dispatching out of the web application. Ideally, we should lean on + // context.Context for injection of these resources. +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +func getName(ctx context.Context) (name string) { + return dcontext.GetStringValue(ctx, "vars.name") +} + +func getReference(ctx context.Context) (reference string) { + return dcontext.GetStringValue(ctx, "vars.reference") +} + +var errDigestNotAvailable = fmt.Errorf("digest not available in context") + +func getDigest(ctx context.Context) (dgst digest.Digest, err error) { + dgstStr := dcontext.GetStringValue(ctx, "vars.digest") + + if dgstStr == "" { + dcontext.GetLogger(ctx).Errorf("digest not available") + return "", errDigestNotAvailable + } + + d, err := digest.Parse(dgstStr) + if err != nil { + dcontext.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + return "", err + } + + return d, nil +} + +func getUploadUUID(ctx context.Context) (uuid string) { + return dcontext.GetStringValue(ctx, "vars.uuid") +} + +// getUserName attempts to resolve a username from the context and request. If +// a username cannot be resolved, the empty string is returned. +func getUserName(ctx context.Context, r *http.Request) string { + username := dcontext.GetStringValue(ctx, auth.UserNameKey) + + // Fallback to request user with basic auth + if username == "" { + var ok bool + uname, _, ok := basicAuth(r) + if ok { + username = uname + } + } + + return username +} diff --git a/registry/handlers/health_test.go b/registry/handlers/health_test.go new file mode 100644 index 00000000..0f38bd1c --- /dev/null +++ b/registry/handlers/health_test.go @@ -0,0 +1,210 @@ +package handlers + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" +) + +func TestFileHealthCheck(t *testing.T) { + interval := time.Second + + tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") + if err != nil { + t.Fatalf("could not create temporary file: %v", err) + } + defer tmpfile.Close() + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + FileCheckers: []configuration.FileChecker{ + { + Interval: interval, + File: tmpfile.Name(), + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[tmpfile.Name()] != "file exists" { + t.Fatal(`did not get "file exists" result for health check`) + } + + os.Remove(tmpfile.Name()) + + <-time.After(2 * interval) + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } +} + +func TestTCPHealthCheck(t *testing.T) { + interval := time.Second + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("could not create listener: %v", err) + } + addrStr := ln.Addr().String() + + // Start accepting + go func() { + for { + conn, err := ln.Accept() + if err != nil { + // listener was closed + return + } + defer conn.Close() + } + }() + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + TCPCheckers: []configuration.TCPChecker{ + { + Interval: interval, + Addr: addrStr, + Timeout: 500 * time.Millisecond, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } + + ln.Close() + <-time.After(2 * interval) + + // Health check should now fail + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[addrStr] != "connection to "+addrStr+" failed" { + t.Fatal(`did not get "connection failed" result for health check`) + } +} + +func TestHTTPHealthCheck(t *testing.T) { + interval := time.Second + threshold := 3 + + stopFailing := make(chan struct{}) + + checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Fatalf("expected HEAD request, got %s", r.Method) + } + select { + case <-stopFailing: + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusInternalServerError) + } + })) + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + HTTPCheckers: []configuration.HTTPChecker{ + { + Interval: interval, + URI: checkedServer.URL, + Threshold: threshold, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + for i := 0; ; i++ { + <-time.After(interval) + + status := healthRegistry.CheckStatus() + + if i < threshold-1 { + // definitely shouldn't have hit the threshold yet + if len(status) != 0 { + t.Fatal("expected 1 item in health check results") + } + continue + } + if i < threshold+1 { + // right on the threshold - don't expect a failure yet + continue + } + + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[checkedServer.URL] != "downstream service returned unexpected status: 500" { + t.Fatal("did not get expected result for health check") + } + + break + } + + // Signal HTTP handler to start returning 200 + close(stopFailing) + + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } +} diff --git a/registry/handlers/helpers.go b/registry/handlers/helpers.go new file mode 100644 index 00000000..46ad797e --- /dev/null +++ b/registry/handlers/helpers.go @@ -0,0 +1,72 @@ +package handlers + +import ( + "context" + "errors" + "io" + "net/http" + + dcontext "github.com/docker/distribution/context" +) + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} + +// copyFullPayload copies the payload of an HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +// +// The copy will be limited to `limit` bytes, if limit is greater than zero. +func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + dcontext.GetLogger(ctx).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) + } + + var body = r.Body + if limit > 0 { + body = http.MaxBytesReader(responseWriter, body, limit) + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't receive as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + + dcontext.GetLoggerWithFields(ctx, map[interface{}]interface{}{ + "error": err, + "copied": copied, + "contentLength": r.ContentLength, + }, "error", "copied", "contentLength").Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + dcontext.GetLogger(ctx).Errorf("unknown error reading request payload: %v", err) + return err + } + + return nil +} diff --git a/registry/handlers/hmac.go b/registry/handlers/hmac.go new file mode 100644 index 00000000..94ed9fda --- /dev/null +++ b/registry/handlers/hmac.go @@ -0,0 +1,74 @@ +package handlers + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. + Name string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 + + // StartedAt is the original start time of the upload. + StartedAt time.Time +} + +type hmacKey string + +var errInvalidSecret = fmt.Errorf("invalid secret") + +// unpackUploadState unpacks and validates the blob upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, err + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, errInvalidSecret + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, errInvalidSecret + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} diff --git a/registry/handlers/hmac_test.go b/registry/handlers/hmac_test.go new file mode 100644 index 00000000..366c7279 --- /dev/null +++ b/registry/handlers/hmac_test.go @@ -0,0 +1,117 @@ +package handlers + +import "testing" + +var blobUploadStates = []blobUploadState{ + { + Name: "hello", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "hello-world", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "h3ll0_w0rld", + UUID: "abcd-1234-qwer-0987", + Offset: 1337, + }, + { + Name: "ABCDEFG", + UUID: "ABCD-1234-QWER-0987", + Offset: 1234567890, + }, + { + Name: "this-is-A-sort-of-Long-name-for-Testing", + UUID: "dead-1234-beef-0987", + Offset: 8675309, + }, +} + +var secrets = []string{ + "supersecret", + "12345", + "a", + "SuperSecret", + "Sup3r... S3cr3t!", + "This is a reasonably long secret key that is used for the purpose of testing.", + "\u2603+\u2744", // snowman+snowflake +} + +// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and +// validates that the tokens can be used to reconstruct the proper upload state. +func TestLayerUploadTokens(t *testing.T) { + secret := hmacKey("supersecret") + + for _, testcase := range blobUploadStates { + token, err := secret.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + } +} + +// TestHMACValidate ensures that any HMAC token providers are compatible if and +// only if they share the same secret. +func TestHMACValidation(t *testing.T) { + for _, secret := range secrets { + secret1 := hmacKey(secret) + secret2 := hmacKey(secret) + badSecret := hmacKey("DifferentSecret") + + for _, testcase := range blobUploadStates { + token, err := secret1.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret2.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + + _, err = badSecret.unpackUploadState(token) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) + } + + badToken, err := badSecret.packUploadState(lus) + if err != nil { + t.Fatal(err) + } + + _, err = secret1.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + + _, err = secret2.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + } + } +} + +func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { + if expected.Name != received.Name { + t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) + } + if expected.UUID != received.UUID { + t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) + } + if expected.Offset != received.Offset { + t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) + } +} diff --git a/registry/handlers/hooks.go b/registry/handlers/hooks.go new file mode 100644 index 00000000..e51df2b7 --- /dev/null +++ b/registry/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/registry/handlers/mail.go b/registry/handlers/mail.go new file mode 100644 index 00000000..39244909 --- /dev/null +++ b/registry/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go new file mode 100644 index 00000000..4e1d9868 --- /dev/null +++ b/registry/handlers/manifests.go @@ -0,0 +1,480 @@ +package handlers + +import ( + "bytes" + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + "github.com/gorilla/handlers" + "github.com/opencontainers/go-digest" +) + +// These constants determine which architecture and OS to choose from a +// manifest list when downconverting it to a schema1 manifest. +const ( + defaultArch = "amd64" + defaultOS = "linux" + maxManifestBodySize = 4 << 20 +) + +// manifestDispatcher takes the request context and builds the +// appropriate handler for handling manifest requests. +func manifestDispatcher(ctx *Context, r *http.Request) http.Handler { + manifestHandler := &manifestHandler{ + Context: ctx, + } + reference := getReference(ctx) + dgst, err := digest.Parse(reference) + if err != nil { + // We just have a tag + manifestHandler.Tag = reference + } else { + manifestHandler.Digest = dgst + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(manifestHandler.GetManifest), + "HEAD": http.HandlerFunc(manifestHandler.GetManifest), + } + + if !ctx.readOnly { + mhandler["PUT"] = http.HandlerFunc(manifestHandler.PutManifest) + mhandler["DELETE"] = http.HandlerFunc(manifestHandler.DeleteManifest) + } + + return mhandler +} + +// manifestHandler handles http operations on image manifests. +type manifestHandler struct { + *Context + + // One of tag or digest gets set, depending on what is present in context. + Tag string + Digest digest.Digest +} + +// GetManifest fetches the image manifest from the storage backend, if it exists. +func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(imh).Debug("GetImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var manifest distribution.Manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + desc, err := tags.Get(imh, imh.Tag) + if err != nil { + if _, ok := err.(distribution.ErrTagUnknown); ok { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + imh.Digest = desc.Digest + } + + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) + if err != nil { + if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + supportsSchema2 := false + supportsManifestList := false + // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values + // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 + for _, acceptHeader := range r.Header["Accept"] { + // r.Header[...] is a slice in case the request contains the same header more than once + // if the header isn't set, we'll get the zero value, which "range" will handle gracefully + + // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 + for _, mediaType := range strings.Split(acceptHeader, ",") { + // remove "; q=..." if present + if i := strings.Index(mediaType, ";"); i >= 0 { + mediaType = mediaType[:i] + } + + // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") + mediaType = strings.TrimSpace(mediaType) + + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + } + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if imh.Tag != "" && isSchema2 && !supportsSchema2 { + // Rewrite manifest in schema1 format + dcontext.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else if imh.Tag != "" && isManifestList && !supportsManifestList { + // Rewrite manifest in schema1 format + dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } + + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else { + imh.Digest = manifestDigest + } + } + + ct, p, err := manifest.Payload() + if err != nil { + return + } + + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) + w.Write(p) +} + +func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return nil, err + } + + ref := imh.Repository.Named() + + if imh.Tag != "" { + ref, err = reference.WithTag(ref, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) + return nil, err + } + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) + for _, d := range schema2Manifest.Layers { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + } + manifest, err := builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) + + return manifest, nil +} + +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted + return true + } + } + return false +} + +// PutManifest validates and stores a manifest in the registry. +func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(imh).Debug("PutImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var jsonBuf bytes.Buffer + if err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, "image manifest PUT"); err != nil { + // copyFullPayload reports the error if necessary + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error())) + return + } + + mediaType := r.Header.Get("Content-Type") + manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + + if imh.Digest != "" { + if desc.Digest != imh.Digest { + dcontext.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + } + } else if imh.Tag != "" { + imh.Digest = desc.Digest + } else { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + + if err := imh.applyResourcePolicy(manifest); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + _, err = manifests.Put(imh, manifest, options...) + if err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + if err == distribution.ErrUnsupported { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + } + if err == distribution.ErrAccessDenied { + imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) + return + } + switch err := err.(type) { + case distribution.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case distribution.ErrManifestBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) + case distribution.ErrManifestNameInvalid: + imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case distribution.ErrManifestUnverified: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) + } + } + } + case errcode.Error: + imh.Errors = append(imh.Errors, err) + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + return + } + + // Tag this manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + err = tags.Tag(imh, imh.Tag, desc) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + } + + // Construct a canonical url for the uploaded manifest. + ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + location, err := imh.urlBuilder.BuildManifestURL(ref) + if err != nil { + // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to + // happen. We'll log the error here but proceed as if it worked. Worst + // case, we set an empty location header. + dcontext.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + } + + w.Header().Set("Location", location) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// applyResourcePolicy checks whether the resource class matches what has +// been authorized and allowed by the policy configuration. +func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) error { + allowedClasses := imh.App.Config.Policy.Repository.Classes + if len(allowedClasses) == 0 { + return nil + } + + var class string + switch m := manifest.(type) { + case *schema1.SignedManifest: + class = "image" + case *schema2.DeserializedManifest: + switch m.Config.MediaType { + case schema2.MediaTypeImageConfig: + class = "image" + case schema2.MediaTypePluginConfig: + class = "plugin" + default: + message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) + return errcode.ErrorCodeDenied.WithMessage(message) + } + } + + if class == "" { + return nil + } + + // Check to see if class is allowed in registry + var allowedClass bool + for _, c := range allowedClasses { + if class == c { + allowedClass = true + break + } + } + if !allowedClass { + message := fmt.Sprintf("registry does not allow %s manifest", class) + return errcode.ErrorCodeDenied.WithMessage(message) + } + + resources := auth.AuthorizedResources(imh) + n := imh.Repository.Named().Name() + + var foundResource bool + for _, r := range resources { + if r.Name == n { + if r.Class == "" { + r.Class = "image" + } + if r.Class == class { + return nil + } + foundResource = true + } + } + + // resource was found but no matching class was found + if foundResource { + message := fmt.Sprintf("repository not authorized for %s manifest", class) + return errcode.ErrorCodeDenied.WithMessage(message) + } + + return nil + +} + +// DeleteManifest removes the manifest with the given digest from the registry. +func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) { + dcontext.GetLogger(imh).Debug("DeleteImageManifest") + + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh, imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + return + } + } + + tagService := imh.Repository.Tags(imh) + referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + for _, tag := range referencedTags { + if err := tagService.Untag(imh, tag); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + } + + w.WriteHeader(http.StatusAccepted) +} diff --git a/registry/handlers/tags.go b/registry/handlers/tags.go new file mode 100644 index 00000000..91f1031e --- /dev/null +++ b/registry/handlers/tags.go @@ -0,0 +1,62 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + tagService := th.Repository.Tags(th) + tags, err := tagService.All(th) + if err != nil { + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) + case errcode.Error: + th.Errors = append(th.Errors, err) + default: + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Repository.Named().Name(), + Tags: tags, + }); err != nil { + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} diff --git a/registry/listener/listener.go b/registry/listener/listener.go new file mode 100644 index 00000000..b93a7a63 --- /dev/null +++ b/registry/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} diff --git a/registry/middleware/registry/middleware.go b/registry/middleware/registry/middleware.go new file mode 100644 index 00000000..49defd82 --- /dev/null +++ b/registry/middleware/registry/middleware.go @@ -0,0 +1,54 @@ +package middleware + +import ( + "context" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/storage" +) + +// InitFunc is the type of a RegistryMiddleware factory function and is +// used to register the constructor for different RegistryMiddleware backends. +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) + +var middlewares map[string]InitFunc +var registryoptions []storage.RegistryOption + +// Register is used to register an InitFunc for +// a RegistryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RegistryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, registry, options) + } + } + + return nil, fmt.Errorf("no registry middleware registered with name: %s", name) +} + +// RegisterOptions adds more options to RegistryOption list. Options get applied before +// any other configuration-based options. +func RegisterOptions(options ...storage.RegistryOption) error { + registryoptions = append(registryoptions, options...) + return nil +} + +// GetRegistryOptions returns list of RegistryOption. +func GetRegistryOptions() []storage.RegistryOption { + return registryoptions +} diff --git a/registry/middleware/repository/middleware.go b/registry/middleware/repository/middleware.go new file mode 100644 index 00000000..f1554b70 --- /dev/null +++ b/registry/middleware/repository/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "context" + "fmt" + + "github.com/docker/distribution" +) + +// InitFunc is the type of a RepositoryMiddleware factory function and is +// used to register the constructor for different RepositoryMiddleware backends. +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RepositoryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RepositoryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, repository, options) + } + } + + return nil, fmt.Errorf("no repository middleware registered with name: %s", name) +} diff --git a/registry/proxy/proxyauth.go b/registry/proxy/proxyauth.go new file mode 100644 index 00000000..09a0973e --- /dev/null +++ b/registry/proxy/proxyauth.go @@ -0,0 +1,83 @@ +package proxy + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +const challengeHeader = "Docker-Distribution-Api-Version" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +func (c credentials) RefreshToken(u *url.URL, service string) string { + return "" +} + +func (c credentials) SetRefreshToken(u *url.URL, service, token string) { +} + +// configureAuth stores credentials for challenge responses +func configureAuth(username, password, remoteURL string) (auth.CredentialStore, error) { + creds := map[string]userpass{} + + authURLs, err := getAuthURLs(remoteURL) + if err != nil { + return nil, err + } + + for _, url := range authURLs { + context.GetLogger(context.Background()).Infof("Discovered token authentication URL: %s", url) + creds[url] = userpass{ + username: username, + password: password, + } + } + + return credentials{creds: creds}, nil +} + +func getAuthURLs(remoteURL string) ([]string, error) { + authURLs := []string{} + + resp, err := http.Get(remoteURL + "/v2/") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + for _, c := range challenge.ResponseChallenges(resp) { + if strings.EqualFold(c.Scheme, "bearer") { + authURLs = append(authURLs, c.Parameters["realm"]) + } + } + + return authURLs, nil +} + +func ping(manager challenge.Manager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + return manager.AddResponse(resp) +} diff --git a/registry/proxy/proxyblobstore.go b/registry/proxy/proxyblobstore.go new file mode 100644 index 00000000..0f2c7e39 --- /dev/null +++ b/registry/proxy/proxyblobstore.go @@ -0,0 +1,225 @@ +package proxy + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/opencontainers/go-digest" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler + repositoryName reference.Named + authChallenger authChallenger +} + +var _ distribution.BlobStore = &proxyBlobStore{} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]struct{}) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { + desc, err := pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + if w, ok := writer.(http.ResponseWriter); ok { + setResponseHeaders(w, desc.Size, desc.MediaType, dgst) + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + defer remoteReader.Close() + + _, err = io.CopyN(writer, remoteReader, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + + return desc, nil +} + +func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { + localDesc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil { + // Stat can report a zero sized file here if it's checked between creation + // and population. Return nil error, and continue + return false, nil + } + + if err == nil { + proxyMetrics.BlobPush(uint64(localDesc.Size)) + return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + return false, nil + +} + +func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { + defer func() { + mu.Lock() + delete(inflight, dgst) + mu.Unlock() + }() + + var desc distribution.Descriptor + var err error + var bw distribution.BlobWriter + + bw, err = pbs.localStore.Create(ctx) + if err != nil { + return err + } + + desc, err = pbs.copyContent(ctx, dgst, bw) + if err != nil { + return err + } + + _, err = bw.Commit(ctx, desc) + if err != nil { + return err + } + + return nil +} + +func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + served, err := pbs.serveLocal(ctx, w, r, dgst) + if err != nil { + dcontext.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) + return err + } + + if served { + return nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return err + } + + mu.Lock() + _, ok := inflight[dgst] + if ok { + mu.Unlock() + _, err := pbs.copyContent(ctx, dgst, w) + return err + } + inflight[dgst] = struct{}{} + mu.Unlock() + + go func(dgst digest.Digest) { + if err := pbs.storeLocal(ctx, dgst); err != nil { + dcontext.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) + } + + blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) + if err != nil { + dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return + } + + pbs.scheduler.AddBlob(blobRef, repositoryTTL) + }(dgst) + + _, err = pbs.copyContent(ctx, dgst, w) + if err != nil { + return err + } + return nil +} + +func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + blob, err := pbs.localStore.Get(ctx, dgst) + if err == nil { + return blob, nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return []byte{}, err + } + + blob, err = pbs.remoteStore.Get(ctx, dgst) + if err != nil { + return []byte{}, err + } + + _, err = pbs.localStore.Put(ctx, "", blob) + if err != nil { + return []byte{}, err + } + return blob, nil +} + +// Unsupported functions +func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/registry/proxy/proxyblobstore_test.go b/registry/proxy/proxyblobstore_test.go new file mode 100644 index 00000000..9bee48d8 --- /dev/null +++ b/registry/proxy/proxyblobstore_test.go @@ -0,0 +1,416 @@ +package proxy + +import ( + "context" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/filesystem" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/opencontainers/go-digest" +) + +var sbsMu sync.Mutex + +type statsBlobStore struct { + stats map[string]int + blobs distribution.BlobStore +} + +func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbsMu.Lock() + sbs.stats["put"]++ + sbsMu.Unlock() + + return sbs.blobs.Put(ctx, mediaType, p) +} + +func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbsMu.Lock() + sbs.stats["get"]++ + sbsMu.Unlock() + + return sbs.blobs.Get(ctx, dgst) +} + +func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + sbsMu.Lock() + sbs.stats["create"]++ + sbsMu.Unlock() + + return sbs.blobs.Create(ctx, options...) +} + +func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbsMu.Lock() + sbs.stats["resume"]++ + sbsMu.Unlock() + + return sbs.blobs.Resume(ctx, id) +} + +func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbsMu.Lock() + sbs.stats["open"]++ + sbsMu.Unlock() + + return sbs.blobs.Open(ctx, dgst) +} + +func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbsMu.Lock() + sbs.stats["serveblob"]++ + sbsMu.Unlock() + + return sbs.blobs.ServeBlob(ctx, w, r, dgst) +} + +func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + + sbsMu.Lock() + sbs.stats["stat"]++ + sbsMu.Unlock() + + return sbs.blobs.Stat(ctx, dgst) +} + +func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbsMu.Lock() + sbs.stats["delete"]++ + sbsMu.Unlock() + + return sbs.blobs.Delete(ctx, dgst) +} + +type testEnv struct { + numUnique int + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context +} + +func (te *testEnv) LocalStats() *map[string]int { + sbsMu.Lock() + ls := te.store.localStore.(statsBlobStore).stats + sbsMu.Unlock() + return &ls +} + +func (te *testEnv) RemoteStats() *map[string]int { + sbsMu.Lock() + rs := te.store.remoteStore.(statsBlobStore).stats + sbsMu.Unlock() + return &rs +} + +// Populate remote store and record the digests +func makeTestEnv(t *testing.T, name string) *testEnv { + nameRef, err := reference.WithName(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + + ctx := context.Background() + + truthDir, err := ioutil.TempDir("", "truth") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + cacheDir, err := ioutil.TempDir("", "cache") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + localDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": truthDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + // todo: create a tempfile area here + localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + localRepo, err := localRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + cacheDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": cacheDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + truthRepo, err := truthRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: truthRepo.Blobs(ctx), + } + + localBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: localRepo.Blobs(ctx), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + + proxyBlobStore := proxyBlobStore{ + repositoryName: nameRef, + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + authChallenger: &mockChallenger{}, + } + + te := &testEnv{ + store: proxyBlobStore, + ctx: ctx, + } + return te +} + +func makeBlob(size int) []byte { + blob := make([]byte, size, size) + for i := 0; i < size; i++ { + blob[i] = byte('A' + rand.Int()%48) + } + return blob +} + +func init() { + rand.Seed(42) +} + +func perm(m []distribution.Descriptor) []distribution.Descriptor { + for i := 0; i < len(m); i++ { + j := rand.Intn(i + 1) + tmp := m[i] + m[i] = m[j] + m[j] = tmp + } + return m +} + +func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { + var inRemote []distribution.Descriptor + + for i := 0; i < numUnique; i++ { + bytes := makeBlob(size) + for j := 0; j < blobCount/numUnique; j++ { + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Fatalf("Put in store") + } + + inRemote = append(inRemote, desc) + } + } + + te.inRemote = inRemote + te.numUnique = numUnique +} +func TestProxyStoreGet(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + populate(t, te, 1, 10, 1) + _, err := te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + + _, err = te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + +} + +func TestProxyStoreStat(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + remoteBlobCount := 1 + populate(t, te, remoteBlobCount, 10, 1) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Stat - touches both stores + for _, d := range te.inRemote { + _, err := te.store.Stat(te.ctx, d.Digest) + if err != nil { + t.Fatalf("Error stating proxy store") + } + } + + if (*localStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected local stat count") + } + + if (*remoteStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected remote stat count") + } + + if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { + t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) + } + +} + +func TestProxyStoreServeHighConcurrency(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 1 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 16 + testProxyStoreServe(t, te, numClients) +} + +func TestProxyStoreServeMany(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 4 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// todo(richardscothern): blobCount must be smaller than num clients +func TestProxyStoreServeBig(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + blobSize := 2 << 20 + blobCount := 4 + numUnique := 2 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// testProxyStoreServe will create clients to consume all blobs +// populated in the truth store +func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + var wg sync.WaitGroup + + for i := 0; i < numClients; i++ { + // Serveblob - pulls through blobs + wg.Add(1) + go func() { + defer wg.Done() + for _, remoteBlob := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + bodyBytes := w.Body.Bytes() + localDigest := digest.FromBytes(bodyBytes) + if localDigest != remoteBlob.Digest { + t.Fatalf("Mismatching blob fetch from proxy") + } + } + }() + } + + wg.Wait() + + remoteBlobCount := len(te.inRemote) + sbsMu.Lock() + if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique { + sbsMu.Unlock() + t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount) + } + sbsMu.Unlock() + + // Wait for any async storage goroutines to finish + time.Sleep(3 * time.Second) + + sbsMu.Lock() + remoteStatCount := (*remoteStats)["stat"] + remoteOpenCount := (*remoteStats)["open"] + sbsMu.Unlock() + + // Serveblob - blobs come from local + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl := digest.FromBytes(w.Body.Bytes()) + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + localStats = te.LocalStats() + remoteStats = te.RemoteStats() + + // Ensure remote unchanged + sbsMu.Lock() + defer sbsMu.Unlock() + if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount { + t.Fatalf("unexpected remote stats: %#v", remoteStats) + } +} diff --git a/registry/proxy/proxymanifeststore.go b/registry/proxy/proxymanifeststore.go new file mode 100644 index 00000000..5c7264d9 --- /dev/null +++ b/registry/proxy/proxymanifeststore.go @@ -0,0 +1,96 @@ +package proxy + +import ( + "context" + "time" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/opencontainers/go-digest" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName reference.Named + scheduler *scheduler.TTLExpirationScheduler + authChallenger authChallenger +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(ctx, dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return false, err + } + return pms.remoteManifests.Exists(ctx, dgst) +} + +func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + // At this point `dgst` was either specified explicitly, or returned by the + // tagstore with the most recent association. + var fromRemote bool + manifest, err := pms.localManifests.Get(ctx, dgst, options...) + if err != nil { + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return nil, err + } + + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + fromRemote = true + } + + _, payload, err := manifest.Payload() + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPush(uint64(len(payload))) + if fromRemote { + proxyMetrics.ManifestPull(uint64(len(payload))) + + _, err = pms.localManifests.Put(ctx, manifest) + if err != nil { + return nil, err + } + + // Schedule the manifest blob for removal + repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) + if err != nil { + dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return nil, err + } + + pms.scheduler.AddManifest(repoBlob, repositoryTTL) + // Ensure the manifest blob is cleaned up + //pms.scheduler.AddBlob(blobRef, repositoryTTL) + + } + + return manifest, err +} + +func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var d digest.Digest + return d, distribution.ErrUnsupported +} + +func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/registry/proxy/proxymanifeststore_test.go b/registry/proxy/proxymanifeststore_test.go new file mode 100644 index 00000000..5f3bd34e --- /dev/null +++ b/registry/proxy/proxymanifeststore_test.go @@ -0,0 +1,275 @@ +package proxy + +import ( + "context" + "io" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type statsManifest struct { + manifests distribution.ManifestService + stats map[string]int +} + +type manifestStoreTestEnv struct { + manifestDigest digest.Digest // digest of the signed manifest in the local storage + manifests proxyManifestStore +} + +func (te manifestStoreTestEnv) LocalStats() *map[string]int { + ls := te.manifests.localManifests.(statsManifest).stats + return &ls +} + +func (te manifestStoreTestEnv) RemoteStats() *map[string]int { + rs := te.manifests.remoteManifests.(statsManifest).stats + return &rs +} + +func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { + sm.stats["delete"]++ + return sm.manifests.Delete(ctx, dgst) +} + +func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + sm.stats["exists"]++ + return sm.manifests.Exists(ctx, dgst) +} + +func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm.stats["get"]++ + return sm.manifests.Get(ctx, dgst) +} + +func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + sm.stats["put"]++ + return sm.manifests.Put(ctx, manifest) +} + +type mockChallenger struct { + sync.Mutex + count int +} + +// Called for remote operations only +func (m *mockChallenger) tryEstablishChallenges(context.Context) error { + m.Lock() + defer m.Unlock() + m.count++ + return nil +} + +func (m *mockChallenger) credentialStore() auth.CredentialStore { + return nil +} + +func (m *mockChallenger) challengeManager() challenge.Manager { + return nil +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + nameRef, err := reference.WithName(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), + storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), + storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + truthRepo, err := truthRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + tr, err := truthRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + truthManifests := statsManifest{ + manifests: tr, + stats: make(map[string]int), + } + + manifestDigest, err := populateRepo(ctx, t, truthRepo, name, tag) + if err != nil { + t.Fatalf(err.Error()) + } + + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + localRepo, err := localRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + lr, err := localRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + localManifests := statsManifest{ + manifests: lr, + stats: make(map[string]int), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + return &manifestStoreTestEnv{ + manifestDigest: manifestDigest, + manifests: proxyManifestStore{ + ctx: ctx, + localManifests: localManifests, + remoteManifests: truthManifests, + scheduler: s, + repositoryName: nameRef, + authChallenger: &mockChallenger{}, + }, + } +} + +func populateRepo(ctx context.Context, t *testing.T, repository distribution.Repository, name, tag string) (digest.Digest, error) { + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + for i := 0; i < 2; i++ { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + rs, ts, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ts) + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + ms, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + dgst, err := ms.Put(ctx, sm) + if err != nil { + t.Fatalf("unexpected errors putting manifest: %v", err) + } + + return dgst, nil +} + +// TestProxyManifests contains basic acceptance tests +// for the pull-through behavior +func TestProxyManifests(t *testing.T) { + name := "foo/bar" + env := newManifestStoreTestEnv(t, name, "latest") + + localStats := env.LocalStats() + remoteStats := env.RemoteStats() + + ctx := context.Background() + // Stat - must check local and remote + exists, err := env.manifests.Exists(ctx, env.manifestDigest) + if err != nil { + t.Fatalf("Error checking existence") + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger) + } + + // Get - should succeed and pull manifest into local + _, err = env.manifests.Get(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected get count") + } + + if (*localStats)["put"] != 1 { + t.Errorf("Expected local put") + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Stat - should only go to local + exists, err = env.manifests.Exists(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count") + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Get proxied - won't require another authchallenge + _, err = env.manifests.Get(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + +} diff --git a/registry/proxy/proxymetrics.go b/registry/proxy/proxymetrics.go new file mode 100644 index 00000000..d3d84d78 --- /dev/null +++ b/registry/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/registry/proxy/proxyregistry.go b/registry/proxy/proxyregistry.go new file mode 100644 index 00000000..e8a8f352 --- /dev/null +++ b/registry/proxy/proxyregistry.go @@ -0,0 +1,263 @@ +package proxy + +import ( + "context" + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + scheduler *scheduler.TTLExpirationScheduler + remoteURL url.URL + authChallenger authChallenger +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + remoteURL, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + blobs := repo.Blobs(ctx) + + // Clear the repository reference and descriptor caches + err = blobs.Delete(ctx, r.Digest()) + if err != nil { + return err + } + + err = v.RemoveBlob(r.Digest().String()) + if err != nil { + return err + } + + return nil + }) + + s.OnManifestExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + manifests, err := repo.Manifests(ctx) + if err != nil { + return err + } + err = manifests.Delete(ctx, r.Digest()) + if err != nil { + return err + } + return nil + }) + + err = s.Start() + if err != nil { + return nil, err + } + + cs, err := configureAuth(config.Username, config.Password, config.RemoteURL) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + remoteURL: *remoteURL, + authChallenger: &remoteAuthChallenger{ + remoteURL: *remoteURL, + cm: challenge.NewSimpleManager(), + cs: cs, + }, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { + c := pr.authChallenger + + tkopts := auth.TokenHandlerOptions{ + Transport: http.DefaultTransport, + Credentials: c.credentialStore(), + Scopes: []auth.Scope{ + auth.RepositoryScope{ + Repository: name.Name(), + Actions: []string{"pull"}, + }, + }, + Logger: dcontext.GetLogger(ctx), + } + + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(c.challengeManager(), + auth.NewTokenHandlerWithOptions(tkopts))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(name, pr.remoteURL.String(), tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: &proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + repositoryName: name, + authChallenger: pr.authChallenger, + }, + manifests: &proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + authChallenger: pr.authChallenger, + }, + name: name, + tags: &proxyTagService{ + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + authChallenger: pr.authChallenger, + }, + }, nil +} + +func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator { + return pr.embedded.Blobs() +} + +func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { + return pr.embedded.BlobStatter() +} + +// authChallenger encapsulates a request to the upstream to establish credential challenges +type authChallenger interface { + tryEstablishChallenges(context.Context) error + challengeManager() challenge.Manager + credentialStore() auth.CredentialStore +} + +type remoteAuthChallenger struct { + remoteURL url.URL + sync.Mutex + cm challenge.Manager + cs auth.CredentialStore +} + +func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { + return r.cs +} + +func (r *remoteAuthChallenger) challengeManager() challenge.Manager { + return r.cm +} + +// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist +func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { + r.Lock() + defer r.Unlock() + + remoteURL := r.remoteURL + remoteURL.Path = "/v2/" + challenges, err := r.cm.GetChallenges(remoteURL) + if err != nil { + return err + } + + if len(challenges) > 0 { + return nil + } + + // establish challenge type with upstream + if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { + return err + } + + dcontext.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) + return nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name reference.Named + tags distribution.TagService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Named() reference.Named { + return pr.name +} + +func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { + return pr.tags +} diff --git a/registry/proxy/proxytagservice.go b/registry/proxy/proxytagservice.go new file mode 100644 index 00000000..6a925639 --- /dev/null +++ b/registry/proxy/proxytagservice.go @@ -0,0 +1,66 @@ +package proxy + +import ( + "context" + + "github.com/docker/distribution" +) + +// proxyTagService supports local and remote lookup of tags. +type proxyTagService struct { + localTags distribution.TagService + remoteTags distribution.TagService + authChallenger authChallenger +} + +var _ distribution.TagService = proxyTagService{} + +// Get attempts to get the most recent digest for the tag by checking the remote +// tag service first and then caching it locally. If the remote is unavailable +// the local association is returned +func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + } + + desc, err := pt.localTags.Get(ctx, tag) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil +} + +func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} + +func (pt proxyTagService) Untag(ctx context.Context, tag string) error { + err := pt.localTags.Untag(ctx, tag) + if err != nil { + return err + } + return nil +} + +func (pt proxyTagService) All(ctx context.Context) ([]string, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } + } + return pt.localTags.All(ctx) +} + +func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/registry/proxy/proxytagservice_test.go b/registry/proxy/proxytagservice_test.go new file mode 100644 index 00000000..1314121e --- /dev/null +++ b/registry/proxy/proxytagservice_test.go @@ -0,0 +1,182 @@ +package proxy + +import ( + "context" + "reflect" + "sort" + "sync" + "testing" + + "github.com/docker/distribution" +) + +type mockTagStore struct { + mapping map[string]distribution.Descriptor + sync.Mutex +} + +var _ distribution.TagService = &mockTagStore{} + +func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + m.Lock() + defer m.Unlock() + + if d, ok := m.mapping[tag]; ok { + return d, nil + } + return distribution.Descriptor{}, distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + m.Lock() + defer m.Unlock() + + m.mapping[tag] = desc + return nil +} + +func (m *mockTagStore) Untag(ctx context.Context, tag string) error { + m.Lock() + defer m.Unlock() + + if _, ok := m.mapping[tag]; ok { + delete(m.mapping, tag) + return nil + } + return distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) All(ctx context.Context) ([]string, error) { + m.Lock() + defer m.Unlock() + + var tags []string + for tag := range m.mapping { + tags = append(tags, tag) + } + + return tags, nil +} + +func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { + if local == nil { + local = make(map[string]distribution.Descriptor) + } + if remote == nil { + remote = make(map[string]distribution.Descriptor) + } + return &proxyTagService{ + localTags: &mockTagStore{mapping: local}, + remoteTags: &mockTagStore{mapping: remote}, + authChallenger: &mockChallenger{}, + } +} + +func TestGet(t *testing.T) { + remoteDesc := distribution.Descriptor{Size: 42} + remoteTag := "remote" + proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) + + ctx := context.Background() + + // Get pre-loaded tag + d, err := proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, remoteDesc) { + t.Fatal("unable to get put tag") + } + + local, err := proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + if !reflect.DeepEqual(local, remoteDesc) { + t.Fatalf("unexpected descriptor pulled through") + } + + // Manually overwrite remote tag + newRemoteDesc := distribution.Descriptor{Size: 43} + err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) + if err != nil { + t.Fatal(err) + } + + d, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, newRemoteDesc) { + t.Fatal("unable to get put tag") + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + // untag, ensure it's removed locally, but present in remote + err = proxyTags.Untag(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err == nil { + t.Fatalf("Expected error getting Untag'd tag") + } + + _, err = proxyTags.remoteTags.Get(ctx, remoteTag) + if err != nil { + t.Fatalf("remote tag should not be untagged with proxyTag.Untag") + } + + _, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("untagged tag should be pulled through") + } + + if proxyTags.authChallenger.(*mockChallenger).count != 3 { + t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + // Add another tag. Ensure both tags appear in 'All' + err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) + if err != nil { + t.Fatal(err) + } + + all, err := proxyTags.All(ctx) + if err != nil { + t.Fatal(err) + } + + if len(all) != 2 { + t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) + } + + sort.Strings(all) + if all[0] != "funtag" && all[1] != "remote" { + t.Fatalf("Unexpected tags returned from All() : %v ", all) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 4 { + t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger) + } +} diff --git a/registry/proxy/scheduler/scheduler.go b/registry/proxy/scheduler/scheduler.go new file mode 100644 index 00000000..f8340184 --- /dev/null +++ b/registry/proxy/scheduler/scheduler.go @@ -0,0 +1,260 @@ +package scheduler + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repository's TTL expires +type expiryFunc func(reference.Reference) error + +const ( + entryTypeBlob = iota + entryTypeManifest + indexSaveFrequency = 5 * time.Second +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` + + timer *time.Timer +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]*schedulerEntry), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + doneChan: make(chan struct{}), + saveTimer: time.NewTicker(indexSaveFrequency), + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + sync.Mutex + + entries map[string]*schedulerEntry + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc + + indexDirty bool + saveTimer *time.Ticker + doneChan chan struct{} +} + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(blobRef, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(manifestRef, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + ttles.Lock() + defer ttles.Unlock() + + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + + // Start timer for each deserialized entry + for _, entry := range ttles.entries { + entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) + } + + // Start a ticker to periodically save the entries index + + go func() { + for { + select { + case <-ttles.saveTimer.C: + ttles.Lock() + if !ttles.indexDirty { + ttles.Unlock() + continue + } + + err := ttles.writeState() + if err != nil { + dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } else { + ttles.indexDirty = false + } + ttles.Unlock() + + case <-ttles.doneChan: + return + } + } + }() + + return nil +} + +func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { + entry := &schedulerEntry{ + Key: r.String(), + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { + oldEntry.timer.Stop() + } + ttles.entries[entry.Key] = entry + entry.timer = ttles.startTimer(entry, ttl) + ttles.indexDirty = true +} + +func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { + return time.AfterFunc(ttl, func() { + ttles.Lock() + defer ttles.Unlock() + + var f expiryFunc + + switch entry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(reference.Reference) error { + return fmt.Errorf("scheduler entry type") + } + } + + ref, err := reference.Parse(entry.Key) + if err == nil { + if err := f(ref); err != nil { + dcontext.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + } + } else { + dcontext.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) + } + + delete(ttles.entries, entry.Key) + ttles.indexDirty = true + }) +} + +// Stop stops the scheduler. +func (ttles *TTLExpirationScheduler) Stop() { + ttles.Lock() + defer ttles.Unlock() + + if err := ttles.writeState(); err != nil { + dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + + for _, entry := range ttles.entries { + entry.timer.Stop() + } + + close(ttles.doneChan) + ttles.saveTimer.Stop() + ttles.stopped = true +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + return nil +} diff --git a/registry/proxy/scheduler/scheduler_test.go b/registry/proxy/scheduler/scheduler_test.go new file mode 100644 index 00000000..4d69d5b5 --- /dev/null +++ b/registry/proxy/scheduler/scheduler_test.go @@ -0,0 +1,211 @@ +package scheduler + +import ( + "encoding/json" + "sync" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { + ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + return ref1, ref2, ref3 +} + +func TestSchedule(t *testing.T) { + ref1, ref2, ref3 := testRefs(t) + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + ref3.String(): true, + } + + var mu sync.Mutex + s := New(context.Background(), inmemory.New(), "/ttl") + deleteFunc := func(repoName reference.Reference) error { + if len(remainingRepos) == 0 { + t.Fatalf("Incorrect expiry count") + } + _, ok := remainingRepos[repoName.String()] + if !ok { + t.Fatalf("Trying to remove nonexistent repo: %s", repoName) + } + t.Log("removing", repoName) + mu.Lock() + delete(remainingRepos, repoName.String()) + mu.Unlock() + + return nil + } + s.onBlobExpire = deleteFunc + err := s.Start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + s.add(ref1, 3*timeUnit, entryTypeBlob) + s.add(ref2, 1*timeUnit, entryTypeBlob) + + func() { + s.Lock() + s.add(ref3, 1*timeUnit, entryTypeBlob) + s.Unlock() + + }() + + // Ensure all repos are deleted + <-time.After(50 * timeUnit) + + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestRestoreOld(t *testing.T) { + ref1, ref2, _ := testRefs(t) + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + } + + var wg sync.WaitGroup + wg.Add(len(remainingRepos)) + var mu sync.Mutex + deleteFunc := func(r reference.Reference) error { + mu.Lock() + defer mu.Unlock() + if r.String() == ref1.String() && len(remainingRepos) == 2 { + t.Errorf("ref1 should not be removed first") + } + _, ok := remainingRepos[r.String()] + if !ok { + t.Fatalf("Trying to remove nonexistent repo: %s", r) + } + delete(remainingRepos, r.String()) + wg.Done() + return nil + } + + timeUnit := time.Millisecond + serialized, err := json.Marshal(&map[string]schedulerEntry{ + ref1.String(): { + Expiry: time.Now().Add(10 * timeUnit), + Key: ref1.String(), + EntryType: 0, + }, + ref2.String(): { + Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first + Key: ref2.String(), + EntryType: 0, + }, + }) + if err != nil { + t.Fatalf("Error serializing test data: %s", err.Error()) + } + + ctx := context.Background() + pathToStatFile := "/ttl" + fs := inmemory.New() + err = fs.PutContent(ctx, pathToStatFile, serialized) + if err != nil { + t.Fatal("Unable to write serialized data to fs") + } + s := New(context.Background(), fs, "/ttl") + s.OnBlobExpire(deleteFunc) + err = s.Start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + defer s.Stop() + + wg.Wait() + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestStopRestore(t *testing.T) { + ref1, ref2, _ := testRefs(t) + + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + } + + var mu sync.Mutex + deleteFunc := func(r reference.Reference) error { + mu.Lock() + delete(remainingRepos, r.String()) + mu.Unlock() + return nil + } + + fs := inmemory.New() + pathToStateFile := "/ttl" + s := New(context.Background(), fs, pathToStateFile) + s.onBlobExpire = deleteFunc + + err := s.Start() + if err != nil { + t.Fatalf(err.Error()) + } + s.add(ref1, 300*timeUnit, entryTypeBlob) + s.add(ref2, 100*timeUnit, entryTypeBlob) + + // Start and stop before all operations complete + // state will be written to fs + s.Stop() + time.Sleep(10 * time.Millisecond) + + // v2 will restore state from fs + s2 := New(context.Background(), fs, pathToStateFile) + s2.onBlobExpire = deleteFunc + err = s2.Start() + if err != nil { + t.Fatalf("Error starting v2: %s", err.Error()) + } + + <-time.After(500 * timeUnit) + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } + +} + +func TestDoubleStart(t *testing.T) { + s := New(context.Background(), inmemory.New(), "/ttl") + err := s.Start() + if err != nil { + t.Fatalf("Unable to start scheduler") + } + err = s.Start() + if err == nil { + t.Fatalf("Scheduler started twice without error") + } +} diff --git a/registry/registry.go b/registry/registry.go new file mode 100644 index 00000000..36aef47c --- /dev/null +++ b/registry/registry.go @@ -0,0 +1,370 @@ +package registry + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + "rsc.io/letsencrypt" + + logstash "github.com/bshuster-repo/logrus-logstash-hook" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + "github.com/docker/go-metrics" + gorhandlers "github.com/gorilla/handlers" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/yvasiyarov/gorelic" +) + +// ServeCmd is a cobra command for running the registry. +var ServeCmd = &cobra.Command{ + Use: "serve ", + Short: "`serve` stores and distributes Docker images", + Long: "`serve` stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + + // setup context + ctx := dcontext.WithVersion(dcontext.Background(), version.Version) + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(ctx, config) + if err != nil { + log.Fatalln(err) + } + + if config.HTTP.Debug.Prometheus.Enabled { + path := config.HTTP.Debug.Prometheus.Path + if path == "" { + path = "/metrics" + } + log.Info("providing prometheus metrics on ", path) + http.Handle(path, metrics.Handler()) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + +// A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server +} + +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + var err error + ctx, err = configureLogging(ctx, config) + if err != nil { + return nil, fmt.Errorf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = dcontext.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() + handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) + handler = panicHandler(handler) + if !config.Log.AccessLog.Disabled { + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + } + + server := &http.Server{ + Handler: handler, + } + + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + return err + } + + if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: nextProtos(config), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + if config.HTTP.TLS.Certificate != "" { + return fmt.Errorf("cannot specify both certificate and Let's Encrypt") + } + var m letsencrypt.Manager + if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { + return err + } + if !m.Registered() { + if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { + return err + } + } + if len(config.HTTP.TLS.LetsEncrypt.Hosts) > 0 { + m.SetHosts(config.HTTP.TLS.LetsEncrypt.Hosts) + } + tlsConf.GetCertificate = m.GetCertificate + } else { + tlsConf.Certificates = make([]tls.Certificate, 1) + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return err + } + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + return err + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + return fmt.Errorf("Could not add CA to pool") + } + } + + for _, subj := range pool.Subjects() { + dcontext.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + dcontext.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + } else { + dcontext.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + } + + return registry.server.Serve(ln) +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx)) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = dcontext.WithValues(ctx, config.Log.Fields) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// panicHandler add an HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} + +func nextProtos(config *configuration.Configuration) []string { + switch config.HTTP.HTTP2.Disabled { + case true: + return []string{"http/1.1"} + default: + return []string{"h2", "http/1.1"} + } +} diff --git a/registry/registry_test.go b/registry/registry_test.go new file mode 100644 index 00000000..34673117 --- /dev/null +++ b/registry/registry_test.go @@ -0,0 +1,30 @@ +package registry + +import ( + "reflect" + "testing" + + "github.com/docker/distribution/configuration" +) + +// Tests to ensure nextProtos returns the correct protocols when: +// * config.HTTP.HTTP2.Disabled is not explicitly set => [h2 http/1.1] +// * config.HTTP.HTTP2.Disabled is explicitly set to false [h2 http/1.1] +// * config.HTTP.HTTP2.Disabled is explicitly set to true [http/1.1] +func TestNextProtos(t *testing.T) { + config := &configuration.Configuration{} + protos := nextProtos(config) + if !reflect.DeepEqual(protos, []string{"h2", "http/1.1"}) { + t.Fatalf("expected protos to equal [h2 http/1.1], got %s", protos) + } + config.HTTP.HTTP2.Disabled = false + protos = nextProtos(config) + if !reflect.DeepEqual(protos, []string{"h2", "http/1.1"}) { + t.Fatalf("expected protos to equal [h2 http/1.1], got %s", protos) + } + config.HTTP.HTTP2.Disabled = true + protos = nextProtos(config) + if !reflect.DeepEqual(protos, []string{"http/1.1"}) { + t.Fatalf("expected protos to equal [http/1.1], got %s", protos) + } +} diff --git a/registry/root.go b/registry/root.go new file mode 100644 index 00000000..94d22f3c --- /dev/null +++ b/registry/root.go @@ -0,0 +1,89 @@ +package registry + +import ( + "fmt" + "os" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" + "github.com/spf13/cobra" +) + +var showVersion bool + +func init() { + RootCmd.AddCommand(ServeCmd) + RootCmd.AddCommand(GCCmd) + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") + GCCmd.Flags().BoolVarP(&removeUntagged, "delete-untagged", "m", false, "delete manifests that are not currently referenced via tag") + RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + +// RootCmd is the main command for the 'registry' binary. +var RootCmd = &cobra.Command{ + Use: "registry", + Short: "`registry`", + Long: "`registry`", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + cmd.Usage() + }, +} + +var dryRun bool +var removeUntagged bool + +// GCCmd is the cobra command that corresponds to the garbage-collect subcommand +var GCCmd = &cobra.Command{ + Use: "garbage-collect ", + Short: "`garbage-collect` deletes layers not referenced by any manifests", + Long: "`garbage-collect` deletes layers not referenced by any manifests", + Run: func(cmd *cobra.Command, args []string) { + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) + os.Exit(1) + } + + ctx := dcontext.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) + os.Exit(1) + } + + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + + registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) + os.Exit(1) + } + + err = storage.MarkAndSweep(ctx, driver, registry, storage.GCOpts{ + DryRun: dryRun, + RemoveUntagged: removeUntagged, + }) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) + os.Exit(1) + } + }, +} diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go new file mode 100644 index 00000000..fc027f4e --- /dev/null +++ b/registry/storage/blob_test.go @@ -0,0 +1,614 @@ +package storage + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/testdriver" + "github.com/docker/distribution/testutil" + "github.com/opencontainers/go-digest" +) + +// TestWriteSeek tests that the current file size can be +// obtained using Seek +func TestWriteSeek(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.WithName("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + contents := []byte{1, 2, 3} + blobUpload.Write(contents) + blobUpload.Close() + offset := blobUpload.Size() + if offset != int64(len(contents)) { + t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) + } + +} + +// TestSimpleBlobUpload covers the blob upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleBlobUpload(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.WithName("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := blobUpload.Cancel(ctx); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // get the enclosing directory + uploadPath := path.Dir(blobUpload.(*blobWriter).path) + + // ensure state was cleaned up + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after cleanup") + } + + // Do a resume, get unknown upload + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) + } + + // Restart! + blobUpload, err = bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + blobUpload.Close() + + offset := blobUpload.Size() + if offset != nn { + t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) + } + + // Do a resume, for good fun + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + sha256Digest := digest.NewDigest("sha256", h) + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // ensure state was cleaned up + uploadPath = path.Dir(blobUpload.(*blobWriter).path) + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after commit") + } + + // After finishing an upload, it should no longer exist. + if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + statDesc, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h.Reset() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) + } + + // Delete a blob + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + _, err = bs.Open(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected success opening deleted blob for read") + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type getting deleted manifest: %#v", err) + } + + // Re-upload the blob + randomBlob, err := ioutil.ReadAll(randomDataReader) + if err != nil { + t.Fatalf("Error reading all of blob %s", err.Error()) + } + expectedDigest := digest.FromBytes(randomBlob) + simpleUpload(t, bs, randomBlob, expectedDigest) + + d, err = bs.Stat(ctx, expectedDigest) + if err != nil { + t.Errorf("unexpected error stat-ing blob") + } + if d.Digest != expectedDigest { + t.Errorf("Mismatching digest with restored blob") + } + + _, err = bs.Open(ctx, expectedDigest) + if err != nil { + t.Errorf("Unexpected error opening blob") + } + + // Reuse state to test delete with a delete-disabled registry + registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err = registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs = repository.Blobs(ctx) + err = bs.Delete(ctx, desc.Digest) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestSimpleBlobRead just creates a simple blob file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleBlobRead(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.WithName("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + // Test for existence. + desc, err := bs.Stat(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when testing for existence: %v", err) + } + + rc, err := bs.Open(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when opening non-existent blob: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} + t.Logf("desc: %v", descBefore) + + desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) + if err != nil { + t.Fatalf("error adding blob to blobservice: %v", err) + } + + if desc.Size != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) + } + + rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + if err != nil { + t.Fatalf("error opening blob with %v: %v", dgst, err) + } + defer rc.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, rc) + if err != nil { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) + } + + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != desc.Digest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) + } + + // Now seek back the blob, read the whole thing and check against randomLayerData + offset, err := rc.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking blob: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("error reading all of blob: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +// TestBlobMount covers the blob mount process, exercising common +// error paths that might be seen during a mount. +func TestBlobMount(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.WithName("foo/bar") + sourceImageName, _ := reference.WithName("foo/source") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + sourceRepository, err := registry.Repository(ctx, sourceImageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + sbs := sourceRepository.Blobs(ctx) + + blobUpload, err := sbs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, randomDataReader) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // Test for existence. + statDesc, err := sbs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + bs := repository.Blobs(ctx) + // Test destination for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) + } + + canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest) + if err != nil { + t.Fatal(err) + } + + bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatal("unexpected blobwriter returned from Create call, should mount instead") + } + + ebm, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Fatalf("unexpected error mounting layer: %v", err) + } + + if !reflect.DeepEqual(ebm.Descriptor, desc) { + t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) + } + + // Test for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h := sha256.New() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != dgst { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) + } + + // Delete the blob from the source repo + err = sbs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) + } + + d, err = sbs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + // Delete the blob from the dest repo + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } +} + +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.WithName("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + simpleUpload(t, bs, []byte{}, digestSha256Empty) +} + +func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { + ctx := context.Background() + wr, err := bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting upload: %v", err) + } + + nn, err := io.Copy(wr, bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error copying into blob writer: %v", err) + } + + if nn != 0 { + t.Fatalf("unexpected number of bytes copied: %v > 0", nn) + } + + dgst, err := digest.FromReader(bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error getting digest: %v", err) + } + + if dgst != expectedDigest { + // sanity check on zero digest + t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) + } + + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error committing write: %v", err) + } + + if desc.Digest != dgst { + t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) + } +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// addBlob simply consumes the reader and inserts into the blob service, +// returning a descriptor on success. +func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { + wr, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + defer wr.Cancel(ctx) + + if nn, err := io.Copy(wr, rd); err != nil { + return distribution.Descriptor{}, err + } else if nn != desc.Size { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) + } + + return wr.Commit(ctx, desc) +} diff --git a/registry/storage/blobcachemetrics.go b/registry/storage/blobcachemetrics.go new file mode 100644 index 00000000..238b5806 --- /dev/null +++ b/registry/storage/blobcachemetrics.go @@ -0,0 +1,66 @@ +package storage + +import ( + "context" + "expvar" + "sync/atomic" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +func (bsc *blobStatCollector) Logger(ctx context.Context) cache.Logger { + return dcontext.GetLogger(ctx) +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/registry/storage/blobserver.go b/registry/storage/blobserver.go new file mode 100644 index 00000000..57d4f907 --- /dev/null +++ b/registry/storage/blobserver.go @@ -0,0 +1,78 @@ +package storage + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. + return err + } + } + + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil +} diff --git a/registry/storage/blobstore.go b/registry/storage/blobstore.go new file mode 100644 index 00000000..9fdf5219 --- /dev/null +++ b/registry/storage/blobstore.go @@ -0,0 +1,222 @@ +package storage + +import ( + "context" + "path" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. +type blobStore struct { + driver driver.StorageDriver + statter distribution.BlobStatter +} + +var _ distribution.BlobProvider = &blobStore{} + +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + p, err := getContent(ctx, bs.driver, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err + } + + return p, nil +} + +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + dcontext.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err + } + + bp, err := bs.path(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype here, as well. + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { + specPath, err := pathFor(blobsPathSpec{}) + if err != nil { + return err + } + + return bs.driver.Walk(ctx, specPath, func(fileInfo driver.FileInfo) error { + // skip directories + if fileInfo.IsDir() { + return nil + } + + currentPath := fileInfo.Path() + // we only want to parse paths that end with /data + _, fileName := path.Split(currentPath) + if fileName != "data" { + return nil + } + + digest, err := digestFromPath(currentPath) + if err != nil { + return err + } + + return ingester(digest) + }) +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.Parse(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + } + + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + dcontext.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go new file mode 100644 index 00000000..2bb25dcc --- /dev/null +++ b/registry/storage/blobwriter.go @@ -0,0 +1,397 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +const ( + // digestSha256Empty is the canonical sha256 digest of empty data + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// blobWriter is used to control the various aspects of resumable +// blob upload. +type blobWriter struct { + ctx context.Context + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + fileWriter storagedriver.FileWriter + driver storagedriver.StorageDriver + path string + + resumableDigestEnabled bool + committed bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + dcontext.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.fileWriter.Commit(); err != nil { + return distribution.Descriptor{}, err + } + + bw.Close() + desc.Size = bw.Size() + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + bw.committed = true + return canonical, nil +} + +// Cancel the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + dcontext.GetLogger(ctx).Debug("(*blobWriter).Cancel") + if err := bw.fileWriter.Cancel(); err != nil { + return err + } + + if err := bw.Close(); err != nil { + dcontext.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + } + + return bw.removeResources(ctx) +} + +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.committed { + return errors.New("blobwriter close after commit") + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return err + } + + return bw.fileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + var size int64 + + // Stat the on disk file + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigest(ctx); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.Digester() + verifier := desc.Digest.Verifier() + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + defer fr.Close() + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(verifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = verifier.Verified() + } + } + + if !verified { + dcontext.GetLoggerWithFields(ctx, + map[interface{}]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := pathFor(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // blobs. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the digest of an empty blob. + if desc.Digest == digestSha256Empty { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := pathFor(uploadDataPathSpec{ + name: bw.blobStore.repository.Named().Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + dcontext.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + dcontext.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/registry/storage/blobwriter_nonresumable.go b/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 00000000..32f13097 --- /dev/null +++ b/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/registry/storage/blobwriter_resumable.go b/registry/storage/blobwriter_resumable.go new file mode 100644 index 00000000..8c909169 --- /dev/null +++ b/registry/storage/blobwriter_resumable.go @@ -0,0 +1,145 @@ +// +build !noresumabledigest + +package storage + +import ( + "context" + "fmt" + "path" + "strconv" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/sirupsen/logrus" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + offset := bw.fileWriter.Size() + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + return errResumableDigestNotAvailable + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/registry/storage/cache/cache.go b/registry/storage/cache/cache.go new file mode 100644 index 00000000..10a39091 --- /dev/null +++ b/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/registry/storage/cache/cachecheck/suite.go b/registry/storage/cache/cachecheck/suite.go new file mode 100644 index 00000000..b23765e8 --- /dev/null +++ b/registry/storage/cache/cachecheck/suite.go @@ -0,0 +1,180 @@ +package cachecheck + +import ( + "context" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/storage/cache" + "github.com/opencontainers/go-digest" +) + +// CheckBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { + ctx := context.Background() + + checkBlobDescriptorCacheEmptyRepository(ctx, t, provider) + checkBlobDescriptorCacheSetAndRead(ctx, t, provider) + checkBlobDescriptorCacheClear(ctx, t, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") + if err == nil { + t.Fatalf("expected an error when asking for invalid repo") + } + + cache, err = provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repository: %v", err) + } + + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Size: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) + } + + if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ + Digest: "", + Size: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") + } + + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) + } + + if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } +} + +func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Error(err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err == nil { + t.Fatalf("expected error statting deleted blob: %v", err) + } +} diff --git a/registry/storage/cache/cachedblobdescriptorstore.go b/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 00000000..ac4c4521 --- /dev/null +++ b/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,129 @@ +package cache + +import ( + "context" + + "github.com/docker/distribution" + prometheus "github.com/docker/distribution/metrics" + "github.com/opencontainers/go-digest" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// Logger can be provided on the MetricsTracker to log errors. +// +// Usually, this is just a proxy to dcontext.GetLogger. +type Logger interface { + Errorf(format string, args ...interface{}) +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics + Logger(context.Context) Logger +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +var ( + // cacheCount is the number of total cache request received/hits/misses + cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") +) + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + cacheCount.WithValues("Request").Inc(1) + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + cacheCount.WithValues("Hit").Inc(1) + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + cacheCount.WithValues("Miss").Inc(1) + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} + +func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { + if tracker == nil { + return + } + + logger := tracker.Logger(ctx) + if logger == nil { + return + } + logger.Errorf(format, args...) +} diff --git a/registry/storage/cache/memory/memory.go b/registry/storage/cache/memory/memory.go new file mode 100644 index 00000000..42d94d9b --- /dev/null +++ b/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "context" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/opencontainers/go-digest" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNormalizedNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/registry/storage/cache/memory/memory_test.go b/registry/storage/cache/memory/memory_test.go new file mode 100644 index 00000000..49c2b5c3 --- /dev/null +++ b/registry/storage/cache/memory/memory_test.go @@ -0,0 +1,13 @@ +package memory + +import ( + "testing" + + "github.com/docker/distribution/registry/storage/cache/cachecheck" +) + +// TestInMemoryBlobInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryBlobInfoCache(t *testing.T) { + cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) +} diff --git a/registry/storage/cache/redis/redis.go b/registry/storage/cache/redis/redis.go new file mode 100644 index 00000000..550d703b --- /dev/null +++ b/registry/storage/cache/redis/redis.go @@ -0,0 +1,268 @@ +package redis + +import ( + "context" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" + "github.com/opencontainers/go-digest" +) + +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ + pool: pool, + } +} + +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNormalizedNamed(repo); err != nil { + return nil, err + } + + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil +} + +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.stat(ctx, conn, dgst) +} + +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err + } + + return desc, nil +} + +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "size", desc.Size); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil +} + +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorithm does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs sha512). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/registry/storage/cache/redis/redis_test.go b/registry/storage/cache/redis/redis_test.go new file mode 100644 index 00000000..d324842d --- /dev/null +++ b/registry/storage/cache/redis/redis_test.go @@ -0,0 +1,53 @@ +package redis + +import ( + "flag" + "os" + "testing" + "time" + + "github.com/docker/distribution/registry/storage/cache/cachecheck" + "github.com/garyburd/redigo/redis" +) + +var redisAddr string + +func init() { + flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") +} + +// TestRedisLayerInfoCache exercises a live redis instance using the cache +// implementation. +func TestRedisBlobDescriptorCacheProvider(t *testing.T) { + if redisAddr == "" { + // fallback to an environement variable + redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") + } + + if redisAddr == "" { + // skip if still not set + t.Skip("please set -test.registry.storage.cache.redis.addr to test layer info cache against redis") + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", redisAddr) + }, + MaxIdle: 1, + MaxActive: 2, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + // Clear the database + conn := pool.Get() + if _, err := conn.Do("FLUSHDB"); err != nil { + t.Fatalf("unexpected error flushing redis db: %v", err) + } + conn.Close() + + cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) +} diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go new file mode 100644 index 00000000..3c1a78de --- /dev/null +++ b/registry/storage/catalog.go @@ -0,0 +1,148 @@ +package storage + +import ( + "context" + "errors" + "io" + "path" + "strings" + + "github.com/docker/distribution/registry/storage/driver" +) + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var finishedWalk bool + var foundRepos []string + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + err = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error { + err := handleRepository(fileInfo, root, last, func(repoPath string) error { + foundRepos = append(foundRepos, repoPath) + return nil + }) + if err != nil { + return err + } + + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + finishedWalk = true + return driver.ErrSkipDir + } + + return nil + }) + + n = copy(repos, foundRepos) + + if err != nil { + return n, err + } else if !finishedWalk { + // We didn't fill buffer. No more records are available. + return n, io.EOF + } + + return n, err +} + +// Enumerate applies ingester to each repository +func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + + err = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error { + return handleRepository(fileInfo, root, "", ingester) + }) + + return err +} + +// lessPath returns true if one path a is less than path b. +// +// A component-wise comparison is done, rather than the lexical comparison of +// strings. +func lessPath(a, b string) bool { + // we provide this behavior by making separator always sort first. + return compareReplaceInline(a, b, '/', '\x00') < 0 +} + +// compareReplaceInline modifies runtime.cmpstring to replace old with new +// during a byte-wise comparison. +func compareReplaceInline(s1, s2 string, old, new byte) int { + // TODO(stevvooe): We are missing an optimization when the s1 and s2 have + // the exact same slice header. It will make the code unsafe but can + // provide some extra performance. + + l := len(s1) + if len(s2) < l { + l = len(s2) + } + + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 == old { + c1 = new + } + + if c2 == old { + c2 = new + } + + if c1 < c2 { + return -1 + } + + if c1 > c2 { + return +1 + } + } + + if len(s1) < len(s2) { + return -1 + } + + if len(s1) > len(s2) { + return +1 + } + + return 0 +} + +// handleRepository calls function fn with a repository path if fileInfo +// has a path of a repository under root and that it is lexographically +// after last. Otherwise, it will return ErrSkipDir. This should be used +// with Walk to do handling with repositories in a storage. +func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error { + filePath := fileInfo.Path() + + // lop the base path off + repo := filePath[len(root)+1:] + + _, file := path.Split(repo) + if file == "_layers" { + repo = strings.TrimSuffix(repo, "/_layers") + if lessPath(last, repo) { + if err := fn(repo); err != nil { + return err + } + } + return driver.ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return driver.ErrSkipDir + } + + return nil +} diff --git a/registry/storage/catalog_test.go b/registry/storage/catalog_test.go new file mode 100644 index 00000000..696e024e --- /dev/null +++ b/registry/storage/catalog_test.go @@ -0,0 +1,324 @@ +package storage + +import ( + "context" + "fmt" + "io" + "math/rand" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/opencontainers/go-digest" +) + +type setupEnv struct { + ctx context.Context + driver driver.StorageDriver + expected []string + registry distribution.Namespace +} + +func setupFS(t *testing.T) *setupEnv { + d := inmemory.New() + ctx := context.Background() + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repos := []string{ + "foo/a", + "foo/b", + "foo-bar/a", + "bar/c", + "bar/d", + "bar/e", + "foo/d/in", + "foo-bar/b", + "test", + } + + for _, repo := range repos { + makeRepo(ctx, t, repo, registry) + } + + expected := []string{ + "bar/c", + "bar/d", + "bar/e", + "foo/a", + "foo/b", + "foo/d/in", + "foo-bar/a", + "foo-bar/b", + "test", + } + + return &setupEnv{ + ctx: ctx, + driver: d, + expected: expected, + registry: registry, + } +} + +func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.Namespace) { + named, err := reference.WithName(name) + if err != nil { + t.Fatal(err) + } + + repo, _ := reg.Repository(ctx, named) + manifests, _ := repo.Manifests(ctx) + + layers, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatal(err) + } + + err = testutil.UploadBlobs(repo, layers) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + getKeys := func(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return + } + + manifest, err := testutil.MakeSchema1Manifest(getKeys(layers)) + if err != nil { + t.Fatal(err) + } + + _, err = manifests.Put(ctx, manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + +} + +func TestCatalog(t *testing.T) { + env := setupFS(t) + + p := make([]string, 50) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if numFilled != len(env.expected) { + t.Errorf("missing items in catalog") + } + + if !testEq(p, env.expected, len(env.expected)) { + t.Errorf("Expected catalog repos err") + } + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } +} + +func TestCatalogInParts(t *testing.T) { + env := setupFS(t) + + chunkLen := 3 + p := make([]string, chunkLen) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[0:chunkLen], numFilled) { + t.Errorf("Expected catalog first chunk err") + } + + lastRepo := p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { + t.Errorf("Expected catalog second chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF || numFilled != len(p) { + t.Errorf("Expected end of catalog") + } + + if !testEq(p, env.expected[chunkLen*2:chunkLen*3], numFilled) { + t.Errorf("Expected catalog third chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } + + if numFilled != 0 { + t.Errorf("Expected catalog fourth chunk err") + } +} + +func TestCatalogEnumerate(t *testing.T) { + env := setupFS(t) + + var repos []string + repositoryEnumerator := env.registry.(distribution.RepositoryEnumerator) + err := repositoryEnumerator.Enumerate(env.ctx, func(repoName string) error { + repos = append(repos, repoName) + return nil + }) + if err != nil { + t.Errorf("Expected catalog enumerate err") + } + + if len(repos) != len(env.expected) { + t.Errorf("Expected catalog enumerate doesn't have correct number of values") + } + + if !testEq(repos, env.expected, len(env.expected)) { + t.Errorf("Expected catalog enumerate not over all values") + } +} + +func testEq(a, b []string, size int) bool { + for cnt := 0; cnt < size-1; cnt++ { + if a[cnt] != b[cnt] { + return false + } + } + return true +} + +func setupBadWalkEnv(t *testing.T) *setupEnv { + d := newBadListDriver() + ctx := context.Background() + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + return &setupEnv{ + ctx: ctx, + driver: d, + registry: registry, + } +} + +type badListDriver struct { + driver.StorageDriver +} + +var _ driver.StorageDriver = &badListDriver{} + +func newBadListDriver() *badListDriver { + return &badListDriver{StorageDriver: inmemory.New()} +} + +func (d *badListDriver) List(ctx context.Context, path string) ([]string, error) { + return nil, fmt.Errorf("List error") +} + +func TestCatalogWalkError(t *testing.T) { + env := setupBadWalkEnv(t) + p := make([]string, 1) + + _, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF { + t.Errorf("Expected catalog driver list error") + } +} + +func BenchmarkPathCompareEqual(B *testing.B) { + B.StopTimer() + pp := randomPath(100) + // make a real copy + ppb := append([]byte{}, []byte(pp)...) + a, b := pp, string(ppb) + + B.StartTimer() + for i := 0; i < B.N; i++ { + lessPath(a, b) + } +} + +func BenchmarkPathCompareNotEqual(B *testing.B) { + B.StopTimer() + a, b := randomPath(100), randomPath(100) + B.StartTimer() + + for i := 0; i < B.N; i++ { + lessPath(a, b) + } +} + +func BenchmarkPathCompareNative(B *testing.B) { + B.StopTimer() + a, b := randomPath(100), randomPath(100) + B.StartTimer() + + for i := 0; i < B.N; i++ { + c := a < b + c = c && false + } +} + +func BenchmarkPathCompareNativeEqual(B *testing.B) { + B.StopTimer() + pp := randomPath(100) + a, b := pp, pp + B.StartTimer() + + for i := 0; i < B.N; i++ { + c := a < b + c = c && false + } +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} diff --git a/registry/storage/digester_resumable_test.go b/registry/storage/digester_resumable_test.go new file mode 100644 index 00000000..54ece3c4 --- /dev/null +++ b/registry/storage/digester_resumable_test.go @@ -0,0 +1,22 @@ +// +build !noresumabledigest + +package storage + +import ( + "testing" + + digest "github.com/opencontainers/go-digest" + "github.com/stevvooe/resumable" + _ "github.com/stevvooe/resumable/sha256" +) + +// TestResumableDetection just ensures that the resumable capability of a hash +// is exposed through the digester type, which is just a hash plus a Digest +// method. +func TestResumableDetection(t *testing.T) { + d := digest.Canonical.Digester() + + if _, ok := d.Hash().(resumable.Hash); !ok { + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) + } +} diff --git a/registry/storage/doc.go b/registry/storage/doc.go new file mode 100644 index 00000000..387d9234 --- /dev/null +++ b/registry/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go new file mode 100644 index 00000000..728d8a85 --- /dev/null +++ b/registry/storage/driver/azure/azure.go @@ -0,0 +1,507 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" + paramRealm = "realm" + maxChunkSize = 4 * 1024 * 1024 +) + +type driver struct { + client azure.BlobStorageClient + container string +} + +type baseEmbed struct{ base.Base } + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct{ baseEmbed } + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok || fmt.Sprint(accountName) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok || fmt.Sprint(accountKey) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseURL + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + containerRef := blobClient.GetContainerReference(container) + if _, err = containerRef.CreateIfNotExists(); err != nil { + return nil, err + } + + d := &driver{ + client: blobClient, + container: container} + return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil +} + +// Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + defer blob.Close() + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if limit := 64 * 1024 * 1024; len(contents) > limit { // max size for block blobs uploaded via single "Put Blob" + return fmt.Errorf("uploading %d bytes with PutContent is not supported; limit: %d bytes", len(contents), limit) + } + + // Historically, blobs uploaded via PutContent used to be of type AppendBlob + // (https://github.com/docker/distribution/pull/1438). We can't replace + // these blobs atomically via a single "Put Blob" operation without + // deleting them first. Once we detect they are BlockBlob type, we can + // overwrite them with an atomically "Put Blob" operation. + // + // While we delete the blob and create a new one, there will be a small + // window of inconsistency and if the Put Blob fails, we may end up with + // losing the existing data while migrating it to BlockBlob type. However, + // expectation is the clients pushing will be retrying when they get an error + // response. + props, err := d.client.GetBlobProperties(d.container, path) + if err != nil && !is404(err) { + return fmt.Errorf("failed to get blob properties: %v", err) + } + if err == nil && props.BlobType != azure.BlobTypeBlock { + if err := d.client.DeleteBlob(d.container, path, nil); err != nil { + return fmt.Errorf("failed to delete legacy blob (%s): %v", props.BlobType, err) + } + } + + r := bytes.NewReader(contents) + return d.client.CreateBlockBlobFromReader(d.container, path, uint64(len(contents)), r, nil) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + info, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + size := int64(info.ContentLength) + if offset >= size { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + blobExists, err := d.client.BlobExists(d.container, path) + if err != nil { + return nil, err + } + var size int64 + if blobExists { + if append { + blobProperties, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + size = blobProperties.ContentLength + } else { + err := d.client.DeleteBlob(d.container, path, nil) + if err != nil { + return nil, err + } + } + } else { + if append { + return nil, storagedriver.PathNotFoundError{Path: path} + } + err := d.client.PutAppendBlob(d.container, path, nil) + if err != nil { + return nil, err + } + } + + return d.newWriter(path, size), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // Check if the path is a blob + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if ok { + blob, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(blob.ContentLength), + ModTime: mtim, + IsDir: false, + }}, nil + } + + // Check if path is a virtual container + virtContainerPath := path + if !strings.HasSuffix(virtContainerPath, "/") { + virtContainerPath += "/" + } + + containerRef := d.client.GetContainerReference(d.container) + blobs, err := containerRef.ListBlobs(azure.ListBlobsParameters{ + Prefix: virtContainerPath, + MaxResults: 1, + }) + if err != nil { + return nil, err + } + if len(blobs.Blobs) > 0 { + // path is a virtual container + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + }}, nil + } + + // path is not a blob or virtual container + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + if path != "" && len(list) == 0 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath, nil) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path, nil) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b, nil); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// URLFor returns a publicly accessible URL for the blob stored at given path +// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration + expires, ok := options["expiry"] + if ok { + t, ok := expires.(time.Time) + if ok { + expiresTime = t + } + } + return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + containerRef := d.client.GetContainerReference(d.container) + for { + resp, err := containerRef.ListBlobs(azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + statusCodeErr, ok := err.(azure.AzureStorageServiceError) + return ok && statusCodeErr.StatusCode == http.StatusNotFound +} + +type writer struct { + driver *driver + path string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter { + return &writer{ + driver: d, + path: path, + size: size, + bw: bufio.NewWriterSize(&blockWriter{ + client: d.client, + container: d.container, + path: path, + }, maxChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.bw.Flush() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.client.DeleteBlob(w.driver.container, w.path, nil) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return w.bw.Flush() +} + +type blockWriter struct { + client azure.BlobStorageClient + container string + path string +} + +func (bw *blockWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += maxChunkSize { + chunkSize := maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil) + if err != nil { + return n, err + } + + n += chunkSize + } + + return n, nil +} diff --git a/registry/storage/driver/azure/azure_test.go b/registry/storage/driver/azure/azure_test.go new file mode 100644 index 00000000..4a0661b3 --- /dev/null +++ b/registry/storage/driver/azure/azure_test.go @@ -0,0 +1,63 @@ +package azure + +import ( + "fmt" + "os" + "strings" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +const ( + envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" + envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" + envContainer = "AZURE_STORAGE_CONTAINER" + envRealm = "AZURE_STORAGE_REALM" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + var ( + accountName string + accountKey string + container string + realm string + ) + + config := []struct { + env string + value *string + }{ + {envAccountName, &accountName}, + {envAccountKey, &accountKey}, + {envContainer, &container}, + {envRealm, &realm}, + } + + missing := []string{} + for _, v := range config { + *v.value = os.Getenv(v.env) + if *v.value == "" { + missing = append(missing, v.env) + } + } + + azureDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(accountName, accountKey, container, realm) + } + + // Skip Azure storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if len(missing) > 0 { + return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) + } + return "" + } + + testsuites.RegisterSuite(azureDriverConstructor, skipCheck) +} diff --git a/registry/storage/driver/base/base.go b/registry/storage/driver/base/base.go new file mode 100644 index 00000000..ab55b022 --- /dev/null +++ b/registry/storage/driver/base/base.go @@ -0,0 +1,240 @@ +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnecessary field. +package base + +import ( + "context" + "io" + + dcontext "github.com/docker/distribution/context" + prometheus "github.com/docker/distribution/metrics" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/go-metrics" + "time" +) + +var ( + // storageAction is the metrics of blob related operations + storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action") +) + +func init() { + metrics.Register(prometheus.StorageNamespace) +} + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + storagedriver.StorageDriver +} + +// Format errors received from the storage driver +func (base *Base) setDriverName(e error) error { + switch actual := e.(type) { + case nil: + return nil + case storagedriver.ErrUnsupportedMethod: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.PathNotFoundError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidPathError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidOffsetError: + actual.DriverName = base.StorageDriver.Name() + return actual + default: + storageError := storagedriver.Error{ + DriverName: base.StorageDriver.Name(), + Enclosed: e, + } + + return storageError + } +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.GetContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + b, e := base.StorageDriver.GetContent(ctx, path) + storageAction.WithValues(base.Name(), "GetContent").UpdateSince(start) + return b, base.setDriverName(e) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.PutContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + err := base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) + storageAction.WithValues(base.Name(), "PutContent").UpdateSince(start) + return err +} + +// Reader wraps Reader of underlying storage driver. +func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Reader(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + rc, e := base.StorageDriver.Reader(ctx, path, offset) + return rc, base.setDriverName(e) +} + +// Writer wraps Writer of underlying storage driver. +func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Writer(%q, %v)", base.Name(), path, append) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + writer, e := base.StorageDriver.Writer(ctx, path, append) + return writer, base.setDriverName(e) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Stat(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + fi, e := base.StorageDriver.Stat(ctx, path) + storageAction.WithValues(base.Name(), "Stat").UpdateSince(start) + return fi, base.setDriverName(e) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.List(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + str, e := base.StorageDriver.List(ctx, path) + storageAction.WithValues(base.Name(), "List").UpdateSince(start) + return str, base.setDriverName(e) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) + + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + err := base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) + storageAction.WithValues(base.Name(), "Move").UpdateSince(start) + return err +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Delete(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + err := base.setDriverName(base.StorageDriver.Delete(ctx, path)) + storageAction.WithValues(base.Name(), "Delete").UpdateSince(start) + return err +} + +// URLFor wraps URLFor of underlying storage driver. +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.URLFor(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + start := time.Now() + str, e := base.StorageDriver.URLFor(ctx, path, options) + storageAction.WithValues(base.Name(), "URLFor").UpdateSince(start) + return str, base.setDriverName(e) +} + +// Walk wraps Walk of underlying storage driver. +func (base *Base) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Walk(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Walk(ctx, path, f)) +} diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go new file mode 100644 index 00000000..97a30ae4 --- /dev/null +++ b/registry/storage/driver/base/regulator.go @@ -0,0 +1,141 @@ +package base + +import ( + "context" + "io" + "sync" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +type regulator struct { + storagedriver.StorageDriver + *sync.Cond + + available uint64 +} + +// NewRegulator wraps the given driver and is used to regulate concurrent calls +// to the given storage driver to a maximum of the given limit. This is useful +// for storage drivers that would otherwise create an unbounded number of OS +// threads if allowed to be called unregulated. +func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { + return ®ulator{ + StorageDriver: driver, + Cond: sync.NewCond(&sync.Mutex{}), + available: limit, + } +} + +func (r *regulator) enter() { + r.L.Lock() + for r.available == 0 { + r.Wait() + } + r.available-- + r.L.Unlock() +} + +func (r *regulator) exit() { + r.L.Lock() + r.Signal() + r.available++ + r.L.Unlock() +} + +// Name returns the human-readable "name" of the driver, useful in error +// messages and logging. By convention, this will just be the registration +// name, but drivers may provide other information here. +func (r *regulator) Name() string { + r.enter() + defer r.exit() + + return r.StorageDriver.Name() +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.GetContent(ctx, path) +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { + r.enter() + defer r.exit() + + return r.StorageDriver.PutContent(ctx, path, content) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Reader(ctx, path, offset) +} + +// Writer stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Writer(ctx, path, append) +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Stat(ctx, path) +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (r *regulator) List(ctx context.Context, path string) ([]string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.List(ctx, path) +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +// Note: This may be no more efficient than a copy followed by a delete for +// many implementations. +func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (r *regulator) Delete(ctx context.Context, path string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Delete(ctx, path) +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// May return an ErrUnsupportedMethod in certain StorageDriver +// implementations. +func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.URLFor(ctx, path, options) +} diff --git a/registry/storage/driver/base/regulator_test.go b/registry/storage/driver/base/regulator_test.go new file mode 100644 index 00000000..e4c0ad58 --- /dev/null +++ b/registry/storage/driver/base/regulator_test.go @@ -0,0 +1,67 @@ +package base + +import ( + "sync" + "testing" + "time" +) + +func TestRegulatorEnterExit(t *testing.T) { + const limit = 500 + + r := NewRegulator(nil, limit).(*regulator) + + for try := 0; try < 50; try++ { + run := make(chan struct{}) + + var firstGroupReady sync.WaitGroup + var firstGroupDone sync.WaitGroup + firstGroupReady.Add(limit) + firstGroupDone.Add(limit) + for i := 0; i < limit; i++ { + go func() { + r.enter() + firstGroupReady.Done() + <-run + r.exit() + firstGroupDone.Done() + }() + } + firstGroupReady.Wait() + + // now we exhausted all the limit, let's run a little bit more + var secondGroupReady sync.WaitGroup + var secondGroupDone sync.WaitGroup + for i := 0; i < 50; i++ { + secondGroupReady.Add(1) + secondGroupDone.Add(1) + go func() { + secondGroupReady.Done() + r.enter() + r.exit() + secondGroupDone.Done() + }() + } + secondGroupReady.Wait() + + // allow the first group to return resources + close(run) + + done := make(chan struct{}) + go func() { + secondGroupDone.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("some r.enter() are still locked") + } + + firstGroupDone.Wait() + + if r.available != limit { + t.Fatalf("r.available: got %d, want %d", r.available, limit) + } + } +} diff --git a/registry/storage/driver/factory/factory.go b/registry/storage/driver/factory/factory.go new file mode 100644 index 00000000..a9c04ec5 --- /dev/null +++ b/registry/storage/driver/factory/factory.go @@ -0,0 +1,64 @@ +package factory + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name. +// Individual StorageDriver implementations generally register with the factory via the Register +// func (below) in their init() funcs, and as such they should be imported anonymously before use. +// See below for an example of how to register and get a StorageDriver for S3 +// +// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +// Additionally, it is not concurrency safe. Most Storage Drivers call this function +// in their init() functions. See the documentation for StorageDriverFactory for more. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/registry/storage/driver/fileinfo.go b/registry/storage/driver/fileinfo.go new file mode 100644 index 00000000..e5064029 --- /dev/null +++ b/registry/storage/driver/fileinfo.go @@ -0,0 +1,79 @@ +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go new file mode 100644 index 00000000..9a9c062a --- /dev/null +++ b/registry/storage/driver/filesystem/driver.go @@ -0,0 +1,446 @@ +package filesystem + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "strconv" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const ( + driverName = "filesystem" + defaultRootDirectory = "/var/lib/registry" + defaultMaxThreads = uint64(100) + + // minThreads is the minimum value for the maxthreads configuration + // parameter. If the driver's parameters are less than this we set + // the parameters to minThreads + minThreads = uint64(25) +) + +// DriverParameters represents all configuration options available for the +// filesystem driver +type DriverParameters struct { + RootDirectory string + MaxThreads uint64 +} + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +// - maxthreads +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params, err := fromParametersImpl(parameters) + if err != nil || params == nil { + return nil, err + } + return New(*params), nil +} + +func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { + var ( + err error + maxThreads = defaultMaxThreads + rootDirectory = defaultRootDirectory + ) + + if parameters != nil { + if rootDir, ok := parameters["rootdirectory"]; ok { + rootDirectory = fmt.Sprint(rootDir) + } + + // Get maximum number of threads for blocking filesystem operations, + // if specified + threads := parameters["maxthreads"] + switch v := threads.(type) { + case string: + if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { + return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) + } + case uint64: + maxThreads = v + case int, int32, int64: + val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() + // If threads is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + maxThreads = uint64(val) + } + case uint, uint32: + maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) + } + + if maxThreads < minThreads { + maxThreads = minThreads + } + } + + params := &DriverParameters{ + RootDirectory: rootDirectory, + MaxThreads: maxThreads, + } + return params, nil +} + +// New constructs a new Driver with a given rootDirectory +func New(params DriverParameters) *Driver { + fsDriver := &driver{rootDirectory: params.RootDirectory} + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + writer, err := d.Writer(ctx, subPath, false) + if err != nil { + return err + } + defer writer.Close() + _, err = io.Copy(writer, bytes.NewReader(contents)) + if err != nil { + writer.Cancel() + return err + } + return writer.Commit() +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0777); err != nil { + return nil, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + + var offset int64 + + if !append { + err := fp.Truncate(0) + if err != nil { + fp.Close() + return nil, err + } + } else { + n, err := fp.Seek(0, os.SEEK_END) + if err != nil { + fp.Close() + return nil, err + } + offset = int64(n) + } + + return newFileWriter(fp, offset), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} + +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func newFileWriter(file *os.File, size int64) *fileWriter { + return &fileWriter{ + file: file, + size: size, + bw: bufio.NewWriter(file), + } +} + +func (fw *fileWriter) Write(p []byte) (int, error) { + if fw.closed { + return 0, fmt.Errorf("already closed") + } else if fw.committed { + return 0, fmt.Errorf("already committed") + } else if fw.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := fw.bw.Write(p) + fw.size += int64(n) + return n, err +} + +func (fw *fileWriter) Size() int64 { + return fw.size +} + +func (fw *fileWriter) Close() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + if err := fw.file.Close(); err != nil { + return err + } + fw.closed = true + return nil +} + +func (fw *fileWriter) Cancel() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + fw.cancelled = true + fw.file.Close() + return os.Remove(fw.file.Name()) +} + +func (fw *fileWriter) Commit() error { + if fw.closed { + return fmt.Errorf("already closed") + } else if fw.committed { + return fmt.Errorf("already committed") + } else if fw.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + fw.committed = true + return nil +} diff --git a/registry/storage/driver/filesystem/driver_test.go b/registry/storage/driver/filesystem/driver_test.go new file mode 100644 index 00000000..3be85923 --- /dev/null +++ b/registry/storage/driver/filesystem/driver_test.go @@ -0,0 +1,113 @@ +package filesystem + +import ( + "io/ioutil" + "os" + "reflect" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + driver, err := FromParameters(map[string]interface{}{ + "rootdirectory": root, + }) + if err != nil { + panic(err) + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return driver, nil + }, testsuites.NeverSkip) +} + +func TestFromParametersImpl(t *testing.T) { + + tests := []struct { + params map[string]interface{} // techincally the yaml can contain anything + expected DriverParameters + pass bool + }{ + // check we use default threads and root dirs + { + params: map[string]interface{}{}, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: defaultMaxThreads, + }, + pass: true, + }, + // Testing initiation with a string maxThreads which can't be parsed + { + params: map[string]interface{}{ + "maxthreads": "fail", + }, + expected: DriverParameters{}, + pass: false, + }, + { + params: map[string]interface{}{ + "maxthreads": "100", + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + { + params: map[string]interface{}{ + "maxthreads": 100, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + // check that we use minimum thread counts + { + params: map[string]interface{}{ + "maxthreads": 1, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: minThreads, + }, + pass: true, + }, + } + + for _, item := range tests { + params, err := fromParametersImpl(item.params) + + if !item.pass { + // We only need to assert that expected failures have an error + if err == nil { + t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params) + } + continue + } + + if err != nil { + t.Fatalf("unexpected error creating filesystem driver: %s", err) + } + // Note that we get a pointer to params back + if !reflect.DeepEqual(*params, item.expected) { + t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) + } + } + +} diff --git a/registry/storage/driver/gcs/doc.go b/registry/storage/driver/gcs/doc.go new file mode 100644 index 00000000..0f23ea78 --- /dev/null +++ b/registry/storage/driver/gcs/doc.go @@ -0,0 +1,3 @@ +// Package gcs implements the Google Cloud Storage driver backend. Support can be +// enabled by including the "include_gcs" build tag. +package gcs diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go new file mode 100644 index 00000000..d129bee7 --- /dev/null +++ b/registry/storage/driver/gcs/gcs.go @@ -0,0 +1,876 @@ +// Package gcs provides a storagedriver.StorageDriver implementation to +// store blobs in Google cloud storage. +// +// This package leverages the google.golang.org/cloud/storage client library +//for interfacing with gcs. +// +// Because gcs is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Note that the contents of incomplete uploads are not accessible even though +// Stat returns their length +// +// +build include_gcs + +package gcs + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/sirupsen/logrus" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/googleapi" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" +) + +const ( + driverName = "gcs" + dummyProjectID = "" + + uploadSessionContentType = "application/x-docker-upload-session" + minChunkSize = 256 * 1024 + defaultChunkSize = 20 * minChunkSize + + maxTries = 5 +) + +var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`) + +// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set +type driverParameters struct { + bucket string + config *jwt.Config + email string + privateKey []byte + client *http.Client + rootDirectory string + chunkSize int +} + +func init() { + factory.Register(driverName, &gcsDriverFactory{}) +} + +// gcsDriverFactory implements the factory.StorageDriverFactory interface +type gcsDriverFactory struct{} + +// Create StorageDriver from parameters +func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// driver is a storagedriver.StorageDriver implementation backed by GCS +// Objects are stored at absolute keys in the provided bucket. +type driver struct { + client *http.Client + bucket string + email string + privateKey []byte + rootDirectory string + chunkSize int +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - bucket +func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + chunkSize := defaultChunkSize + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int, uint, int32, uint32, uint64, int64: + chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()) + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + if chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize) + } + } + + var ts oauth2.TokenSource + jwtConf := new(jwt.Config) + if keyfile, ok := parameters["keyfile"]; ok { + jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) + if err != nil { + return nil, err + } + jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) + } else { + var err error + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + } + + params := driverParameters{ + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), + chunkSize: chunkSize, + } + + return New(params) +} + +// New constructs a new driver +func New(params driverParameters) (storagedriver.StorageDriver, error) { + rootDirectory := strings.Trim(params.rootDirectory, "/") + if rootDirectory != "" { + rootDirectory += "/" + } + if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) + } + d := &driver{ + bucket: params.bucket, + rootDirectory: rootDirectory, + email: params.email, + privateKey: params.privateKey, + client: params.client, + chunkSize: params.chunkSize, + } + + return &base.Base{ + StorageDriver: d, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (d *driver) GetContent(context context.Context, path string) ([]byte, error) { + gcsContext := d.context(context) + name := d.pathToKey(path) + var rc io.ReadCloser + err := retry(func() error { + var err error + rc, err = storage.NewReader(gcsContext, d.bucket, name) + return err + }) + if err == storage.ErrObjectNotExist { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (d *driver) PutContent(context context.Context, path string, contents []byte) error { + return retry(func() error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, contents) + }) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (d *driver) Reader(context context.Context, path string, offset int64) (io.ReadCloser, error) { + res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) + if err != nil { + if res != nil { + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path)) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + } + return nil, err + } + if res.Header.Get("Content-Type") == uploadSessionContentType { + defer res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + return res.Body, nil +} + +func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) { + // copied from google.golang.org/cloud/storage#NewReader : + // to set the additional "Range" header + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + if offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) + } + var res *http.Response + err = retry(func() error { + var err error + res, err = client.Do(req) + return err + }) + if err != nil { + return nil, err + } + return res, googleapi.CheckMediaResponse(res) +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(context context.Context, path string, append bool) (storagedriver.FileWriter, error) { + writer := &writer{ + client: d.client, + bucket: d.bucket, + name: d.pathToKey(path), + buffer: make([]byte, d.chunkSize), + } + + if append { + err := writer.init(path) + if err != nil { + return nil, err + } + } + return writer, nil +} + +type writer struct { + client *http.Client + bucket string + name string + size int64 + offset int64 + closed bool + sessionURI string + buffer []byte + buffSize int +} + +// Cancel removes any written content from this FileWriter. +func (w *writer) Cancel() error { + w.closed = true + err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + } + return err +} + +func (w *writer) Close() error { + if w.closed { + return nil + } + w.closed = true + + err := w.writeChunk() + if err != nil { + return err + } + + // Copy the remaining bytes from the buffer to the upload session + // Normally buffSize will be smaller than minChunkSize. However, in the + // unlikely event that the upload session failed to start, this number could be higher. + // In this case we can safely clip the remaining bytes to the minChunkSize + if w.buffSize > minChunkSize { + w.buffSize = minChunkSize + } + + // commit the writes by updating the upload session + err = retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = uploadSessionContentType + wc.Metadata = map[string]string{ + "Session-URI": w.sessionURI, + "Offset": strconv.FormatInt(w.offset, 10), + } + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil +} + +func putContentsClose(wc *storage.Writer, contents []byte) error { + size := len(contents) + var nn int + var err error + for nn < size { + n, err := wc.Write(contents[nn:size]) + nn += n + if err != nil { + break + } + } + if err != nil { + wc.CloseWithError(err) + return err + } + return wc.Close() +} + +// Commit flushes all content written to this FileWriter and makes it +// available for future calls to StorageDriver.GetContent and +// StorageDriver.Reader. +func (w *writer) Commit() error { + + if err := w.checkClosed(); err != nil { + return err + } + w.closed = true + + // no session started yet just perform a simple upload + if w.sessionURI == "" { + err := retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil + } + size := w.offset + int64(w.buffSize) + var nn int + // loop must be performed at least once to ensure the file is committed even when + // the buffer is empty + for { + n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size) + nn += int(n) + w.offset += n + w.size = w.offset + if err != nil { + w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize]) + return err + } + if nn == w.buffSize { + break + } + } + w.buffSize = 0 + return nil +} + +func (w *writer) checkClosed() error { + if w.closed { + return fmt.Errorf("Writer already closed") + } + return nil +} + +func (w *writer) writeChunk() error { + var err error + // chunks can be uploaded only in multiples of minChunkSize + // chunkSize is a multiple of minChunkSize less than or equal to buffSize + chunkSize := w.buffSize - (w.buffSize % minChunkSize) + if chunkSize == 0 { + return nil + } + // if their is no sessionURI yet, obtain one by starting the session + if w.sessionURI == "" { + w.sessionURI, err = startSession(w.client, w.bucket, w.name) + } + if err != nil { + return err + } + nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1) + w.offset += nn + if w.offset > w.size { + w.size = w.offset + } + // shift the remaining bytes to the start of the buffer + w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize]) + + return err +} + +func (w *writer) Write(p []byte) (int, error) { + err := w.checkClosed() + if err != nil { + return 0, err + } + + var nn int + for nn < len(p) { + n := copy(w.buffer[w.buffSize:], p[nn:]) + w.buffSize += n + if w.buffSize == cap(w.buffer) { + err = w.writeChunk() + if err != nil { + break + } + } + nn += n + } + return nn, err +} + +// Size returns the number of bytes written to this FileWriter. +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) init(path string) error { + res, err := getObject(w.client, w.bucket, w.name, 0) + if err != nil { + return err + } + defer res.Body.Close() + if res.Header.Get("Content-Type") != uploadSessionContentType { + return storagedriver.PathNotFoundError{Path: path} + } + offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64) + if err != nil { + return err + } + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI") + w.buffSize = copy(w.buffer, buffer) + w.offset = offset + w.size = offset + int64(w.buffSize) + return nil +} + +type request func() error + +func retry(req request) error { + backoff := time.Second + var err error + for i := 0; i < maxTries; i++ { + err = req() + if err == nil { + return nil + } + + status, ok := err.(*googleapi.Error) + if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + return err + } + + time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) + if i <= 4 { + backoff = backoff * 2 + } + } + return err +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (d *driver) Stat(context context.Context, path string) (storagedriver.FileInfo, error) { + var fi storagedriver.FileInfoFields + //try to get as file + gcsContext := d.context(context) + obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) + if err == nil { + if obj.ContentType == uploadSessionContentType { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + Size: obj.Size, + ModTime: obj.Updated, + IsDir: false, + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + //try to get as folder + dirpath := d.pathToDirKey(path) + + var query *storage.Query + query = &storage.Query{} + query.Prefix = dirpath + query.MaxResults = 1 + + objects, err := storageListObjects(gcsContext, d.bucket, query) + if err != nil { + return nil, err + } + if len(objects.Results) < 1 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + } + obj = objects.Results[0] + if obj.Name == dirpath { + fi.Size = obj.Size + fi.ModTime = obj.Updated + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (d *driver) List(context context.Context, path string) ([]string, error) { + var query *storage.Query + query = &storage.Query{} + query.Delimiter = "/" + query.Prefix = d.pathToDirKey(path) + list := make([]string, 0, 64) + for { + objects, err := storageListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, object := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType { + list = append(list, d.keyToPath(object.Name)) + } + } + for _, subpath := range objects.Prefixes { + subpath = d.keyToPath(subpath) + list = append(list, subpath) + } + query = objects.Next + if query == nil { + break + } + } + if path != "/" && len(list) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in Google Cloud Storage. + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +func (d *driver) Move(context context.Context, sourcePath string, destPath string) error { + gcsContext := d.context(context) + _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + } + return err + } + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + // if deleting the file fails, log the error, but do not fail; the file was successfully copied, + // and the original should eventually be cleaned when purging the uploads folder. + if err != nil { + logrus.Infof("error deleting file: %v due to %v", sourcePath, err) + } + return nil +} + +// listAll recursively lists all names of objects stored at "prefix" and its subpaths. +func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { + list := make([]string, 0, 64) + query := &storage.Query{} + query.Prefix = prefix + query.Versions = false + for { + objects, err := storageListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, obj := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if obj.Deleted.IsZero() { + list = append(list, obj.Name) + } + } + query = objects.Next + if query == nil { + break + } + } + return list, nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(context context.Context, path string) error { + prefix := d.pathToDirKey(path) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + for _, key := range keys { + err := storageDeleteObject(gcsContext, d.bucket, key) + // GCS only guarantees eventual consistency, so listAll might return + // paths that no longer exist. If this happens, just ignore any not + // found error + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + if err != nil { + return err + } + } + return nil + } + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + } + } + return err +} + +func storageDeleteObject(context context.Context, bucket string, name string) error { + return retry(func() error { + return storage.DeleteObject(context, bucket, name) + }) +} + +func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.StatObject(context, bucket, name) + return err + }) + return obj, err +} + +func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { + var objs *storage.Objects + err := retry(func() error { + var err error + objs, err = storage.ListObjects(context, bucket, q) + return err + }) + return objs, err +} + +func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) + return err + }) + return obj, err +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// Returns ErrUnsupportedMethod if this driver has no privateKey +func (d *driver) URLFor(context context.Context, path string, options map[string]interface{}) (string, error) { + if d.privateKey == nil { + return "", storagedriver.ErrUnsupportedMethod{} + } + + name := d.pathToKey(path) + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + opts := &storage.SignedURLOptions{ + GoogleAccessID: d.email, + PrivateKey: d.privateKey, + Method: methodString, + Expires: expiresTime, + } + return storage.SignedURL(d.bucket, name, opts) +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +func startSession(client *http.Client, bucket string, name string) (uri string, err error) { + u := &url.URL{ + Scheme: "https", + Host: "www.googleapis.com", + Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket), + RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name), + } + err = retry(func() error { + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return err + } + req.Header.Set("X-Upload-Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", "0") + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + uri = resp.Header.Get("Location") + return nil + }) + return uri, err +} + +func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) { + bytesPut := int64(0) + err := retry(func() error { + req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk)) + if err != nil { + return err + } + length := int64(len(chunk)) + to := from + length - 1 + size := "*" + if totalSize >= 0 { + size = strconv.FormatInt(totalSize, 10) + } + req.Header.Set("Content-Type", "application/octet-stream") + if from == to+1 { + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size)) + } else { + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size)) + } + req.Header.Set("Content-Length", strconv.FormatInt(length, 10)) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if totalSize < 0 && resp.StatusCode == 308 { + groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range")) + end, err := strconv.ParseInt(groups[2], 10, 64) + if err != nil { + return err + } + bytesPut = end - from + 1 + return nil + } + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + bytesPut = to - from + 1 + return nil + }) + return bytesPut, err +} + +func (d *driver) context(context context.Context) context.Context { + return cloud.WithContext(context, dummyProjectID, d.client) +} + +func (d *driver) pathToKey(path string) string { + return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") +} + +func (d *driver) pathToDirKey(path string) string { + return d.pathToKey(path) + "/" +} + +func (d *driver) keyToPath(key string) string { + return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") +} diff --git a/registry/storage/driver/gcs/gcs_test.go b/registry/storage/driver/gcs/gcs_test.go new file mode 100644 index 00000000..e58216be --- /dev/null +++ b/registry/storage/driver/gcs/gcs_test.go @@ -0,0 +1,311 @@ +// +build include_gcs + +package gcs + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + dcontext "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/cloud/storage" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) +var skipGCS func() string + +func init() { + bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") + credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + + // Skip GCS storage driver tests if environment variable parameters are not provided + skipGCS = func() string { + if bucket == "" || credentials == "" { + return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" + } + return "" + } + + if skipGCS() != "" { + return + } + + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + var ts oauth2.TokenSource + var email string + var privateKey []byte + + ts, err = google.DefaultTokenSource(dcontext.Background(), storage.ScopeFullControl) + if err != nil { + // Assume that the file contents are within the environment variable since it exists + // but does not contain a valid file path + jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + if err != nil { + panic(fmt.Sprintf("Error reading JWT config : %s", err)) + } + email = jwtConfig.Email + privateKey = []byte(jwtConfig.PrivateKey) + if len(privateKey) == 0 { + panic("Error reading JWT config : missing private_key property") + } + if email == "" { + panic("Error reading JWT config : missing client_email property") + } + ts = jwtConfig.TokenSource(dcontext.Background()) + } + + gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { + parameters := driverParameters{ + bucket: bucket, + rootDirectory: root, + email: email, + privateKey: privateKey, + client: oauth2.NewClient(dcontext.Background(), ts), + chunkSize: defaultChunkSize, + } + + return New(parameters) + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return gcsDriverConstructor(root) + }, skipGCS) +} + +// Test Committing a FileWriter without having called Write +func TestCommitEmpty(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := dcontext.Background() + + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != 0 { + t.Fatalf("writer.Size: %d != 0", writer.Size()) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != 0 { + t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents)) + } +} + +// Test Committing a FileWriter after having written exactly +// defaultChunksize bytes. +func TestCommit(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := dcontext.Background() + + contents := make([]byte, defaultChunkSize) + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + _, err = writer.Write(contents) + if err != nil { + t.Fatalf("writer.Write: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != int64(len(contents)) { + t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents)) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != len(contents) { + t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents)) + } +} + +func TestRetry(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + assertError := func(expected string, observed error) { + observedMsg := "" + if observed != nil { + observedMsg = observed.Error() + } + if observedMsg != expected { + t.Fatalf("expected %v, observed %v\n", expected, observedMsg) + } + } + + err := retry(func() error { + return &googleapi.Error{ + Code: 503, + Message: "google api error", + } + }) + assertError("googleapi: Error 503: google api error", err) + + err = retry(func() error { + return &googleapi.Error{ + Code: 404, + Message: "google api error", + } + }) + assertError("googleapi: Error 404: google api error", err) + + err = retry(func() error { + return fmt.Errorf("error") + }) + assertError("error", err) +} + +func TestEmptyRootList(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := gcsDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := gcsDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := dcontext.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to remove %v due to %v\n", filename, err) + } + }() + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +// TestMoveDirectory checks that moving a directory returns an error. +func TestMoveDirectory(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + ctx := dcontext.Background() + contents := []byte("contents") + // Create a regular file. + err = driver.PutContent(ctx, "/parent/dir/foo", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := driver.Delete(ctx, "/parent") + if err != nil { + t.Fatalf("failed to remove /parent due to %v\n", err) + } + }() + + err = driver.Move(ctx, "/parent/dir", "/parent/other") + if err == nil { + t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n") + } +} diff --git a/registry/storage/driver/inmemory/driver.go b/registry/storage/driver/inmemory/driver.go new file mode 100644 index 00000000..e18e2933 --- /dev/null +++ b/registry/storage/driver/inmemory/driver.go @@ -0,0 +1,318 @@ +package inmemory + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "inmemory" + +func init() { + factory.Register(driverName, &inMemoryDriverFactory{}) +} + +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +type driver struct { + root *dir + mutex sync.RWMutex +} + +// baseEmbed allows us to hide the Base embed. +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { + baseEmbed // embedded, hidden base driver. +} + +var _ storagedriver.StorageDriver = &Driver{} + +// New constructs a new Driver. +func New() *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface. + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + return ioutil.ReadAll(rc) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(p) + + f, err := d.root.mkfile(normalized) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + + return nil +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := normalize(path) + found := d.root.find(normalized) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + f, err := d.root.mkfile(normalized) + if err != nil { + return nil, fmt.Errorf("not a file") + } + + if !append { + f.truncate() + } + + return d.newWriter(f), nil +} + +// Stat returns info about the provided path. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + found := d.root.find(normalized) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... + } + + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err + } + } + + return entries, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err + } +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: path} + default: + return err + } +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +type writer struct { + d *driver + f *file + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(f *file) storagedriver.FileWriter { + return &writer{ + d: d, + f: f, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.f.WriteAt(p, int64(len(w.f.data))) +} + +func (w *writer) Size() int64 { + w.d.mutex.RLock() + defer w.d.mutex.RUnlock() + + return int64(len(w.f.data)) +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.d.root.delete(w.f.path()) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return nil +} diff --git a/registry/storage/driver/inmemory/driver_test.go b/registry/storage/driver/inmemory/driver_test.go new file mode 100644 index 00000000..dbc1916f --- /dev/null +++ b/registry/storage/driver/inmemory/driver_test.go @@ -0,0 +1,19 @@ +package inmemory + +import ( + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(), nil + } + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) +} diff --git a/registry/storage/driver/inmemory/mfs.go b/registry/storage/driver/inmemory/mfs.go new file mode 100644 index 00000000..cdefacfd --- /dev/null +++ b/registry/storage/driver/inmemory/mfs.go @@ -0,0 +1,338 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + p = normalize(p) + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if normalize(srcDirname) != normalize(sp.path()) { + return errNotExists + } + + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] + if !ok { + return errNotExists + } + + delete(spd.children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if normalize(dirname) != normalize(parent.path()) { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data + } + + f.mod = time.Now() + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go new file mode 100644 index 00000000..5dc3ee41 --- /dev/null +++ b/registry/storage/driver/middleware/cloudfront/middleware.go @@ -0,0 +1,204 @@ +// Package middleware - cloudfront wrapper for storage libs +// N.B. currently only works with S3, not arbitrary sites +// +package middleware + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/cloudfront/sign" + dcontext "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/middleware" +) + +// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that +// constructs temporary signed CloudFront URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this CloudFront content URL. +type cloudFrontStorageMiddleware struct { + storagedriver.StorageDriver + awsIPs *awsIPs + urlSigner *sign.URLSigner + baseURL string + duration time.Duration +} + +var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} + +// newCloudFrontLayerHandler constructs and returns a new CloudFront +// LayerHandler implementation. +// Required options: baseurl, privatekey, keypairid + +// Optional options: ipFilteredBy, awsregion +// ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes +// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly +// awsregion: a comma separated string of AWS regions. +func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + // parse baseurl + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } + + // parse privatekey to get pkPath + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("no privatekey provided") + } + pkPath, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + + // parse keypairid + kpid, ok := options["keypairid"] + if !ok { + return nil, fmt.Errorf("no keypairid provided") + } + keypairID, ok := kpid.(string) + if !ok { + return nil, fmt.Errorf("keypairid must be a string") + } + + // get urlSigner from the file specified in pkPath + pkBytes, err := ioutil.ReadFile(pkPath) + if err != nil { + return nil, fmt.Errorf("failed to read privatekey file: %s", err) + } + + block, _ := pem.Decode([]byte(pkBytes)) + if block == nil { + return nil, fmt.Errorf("failed to decode private key as an rsa private key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + urlSigner := sign.NewURLSigner(keypairID, privateKey) + + // parse duration + duration := 20 * time.Minute + if d, ok := options["duration"]; ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("invalid duration: %s", err) + } + duration = dur + } + } + + // parse updatefrenquency + updateFrequency := defaultUpdateFrequency + if u, ok := options["updatefrenquency"]; ok { + switch u := u.(type) { + case time.Duration: + updateFrequency = u + case string: + updateFreq, err := time.ParseDuration(u) + if err != nil { + return nil, fmt.Errorf("invalid updatefrenquency: %s", err) + } + duration = updateFreq + } + } + + // parse iprangesurl + ipRangesURL := defaultIPRangesURL + if i, ok := options["iprangesurl"]; ok { + if iprangeurl, ok := i.(string); ok { + ipRangesURL = iprangeurl + } else { + return nil, fmt.Errorf("iprangesurl must be a string") + } + } + + // parse ipfilteredby + var awsIPs *awsIPs + if ipFilteredBy := options["ipfilteredby"].(string); ok { + switch strings.ToLower(strings.TrimSpace(ipFilteredBy)) { + case "", "none": + awsIPs = nil + case "aws": + newAWSIPs(ipRangesURL, updateFrequency, nil) + case "awsregion": + var awsRegion []string + if regions, ok := options["awsregion"].(string); ok { + for _, awsRegions := range strings.Split(regions, ",") { + awsRegion = append(awsRegion, strings.ToLower(strings.TrimSpace(awsRegions))) + } + awsIPs = newAWSIPs(ipRangesURL, updateFrequency, awsRegion) + } else { + return nil, fmt.Errorf("awsRegion must be a comma separated string of valid aws regions") + } + default: + return nil, fmt.Errorf("ipfilteredby only allows a string the following value: none|aws|awsregion") + } + } else { + return nil, fmt.Errorf("ipfilteredby only allows a string with the following value: none|aws|awsregion") + } + + return &cloudFrontStorageMiddleware{ + StorageDriver: storageDriver, + urlSigner: urlSigner, + baseURL: baseURL, + duration: duration, + awsIPs: awsIPs, + }, nil +} + +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + +// URLFor attempts to find a url which may be used to retrieve the file at the given path. +// Returns an error if the file cannot be found. +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + // TODO(endophage): currently only supports S3 + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + dcontext.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) + } + + if eligibleForS3(ctx, lh.awsIPs) { + return lh.StorageDriver.URLFor(ctx, path, options) + } + + // Get signed cloudfront url. + cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) + if err != nil { + return "", err + } + return cfURL, nil +} + +// init registers the cloudfront layerHandler backend. +func init() { + storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) +} diff --git a/registry/storage/driver/middleware/cloudfront/s3filter.go b/registry/storage/driver/middleware/cloudfront/s3filter.go new file mode 100644 index 00000000..c8c7f570 --- /dev/null +++ b/registry/storage/driver/middleware/cloudfront/s3filter.go @@ -0,0 +1,223 @@ +package middleware + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + "sync" + "time" + + dcontext "github.com/docker/distribution/context" +) + +const ( + // ipRangesURL is the URL to get definition of AWS IPs + defaultIPRangesURL = "https://ip-ranges.amazonaws.com/ip-ranges.json" + // updateFrequency tells how frequently AWS IPs need to be updated + defaultUpdateFrequency = time.Hour * 12 +) + +// newAWSIPs returns a New awsIP object. +// If awsRegion is `nil`, it accepts any region. Otherwise, it only allow the regions specified +func newAWSIPs(host string, updateFrequency time.Duration, awsRegion []string) *awsIPs { + ips := &awsIPs{ + host: host, + updateFrequency: updateFrequency, + awsRegion: awsRegion, + updaterStopChan: make(chan bool), + } + if err := ips.tryUpdate(); err != nil { + dcontext.GetLogger(context.Background()).WithError(err).Warn("failed to update AWS IP") + } + go ips.updater() + return ips +} + +// awsIPs tracks a list of AWS ips, filtered by awsRegion +type awsIPs struct { + host string + updateFrequency time.Duration + ipv4 []net.IPNet + ipv6 []net.IPNet + mutex sync.RWMutex + awsRegion []string + updaterStopChan chan bool + initialized bool +} + +type awsIPResponse struct { + Prefixes []prefixEntry `json:"prefixes"` + V6Prefixes []prefixEntry `json:"ipv6_prefixes"` +} + +type prefixEntry struct { + IPV4Prefix string `json:"ip_prefix"` + IPV6Prefix string `json:"ipv6_prefix"` + Region string `json:"region"` + Service string `json:"service"` +} + +func fetchAWSIPs(url string) (awsIPResponse, error) { + var response awsIPResponse + resp, err := http.Get(url) + if err != nil { + return response, err + } + if resp.StatusCode != 200 { + body, _ := ioutil.ReadAll(resp.Body) + return response, fmt.Errorf("failed to fetch network data. response = %s", body) + } + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&response) + if err != nil { + return response, err + } + return response, nil +} + +// tryUpdate attempts to download the new set of ip addresses. +// tryUpdate must be thread safe with contains +func (s *awsIPs) tryUpdate() error { + response, err := fetchAWSIPs(s.host) + if err != nil { + return err + } + + var ipv4 []net.IPNet + var ipv6 []net.IPNet + + processAddress := func(output *[]net.IPNet, prefix string, region string) { + regionAllowed := false + if len(s.awsRegion) > 0 { + for _, ar := range s.awsRegion { + if strings.ToLower(region) == ar { + regionAllowed = true + break + } + } + } else { + regionAllowed = true + } + + _, network, err := net.ParseCIDR(prefix) + if err != nil { + dcontext.GetLoggerWithFields(dcontext.Background(), map[interface{}]interface{}{ + "cidr": prefix, + }).Error("unparseable cidr") + return + } + if regionAllowed { + *output = append(*output, *network) + } + + } + + for _, prefix := range response.Prefixes { + processAddress(&ipv4, prefix.IPV4Prefix, prefix.Region) + } + for _, prefix := range response.V6Prefixes { + processAddress(&ipv6, prefix.IPV6Prefix, prefix.Region) + } + s.mutex.Lock() + defer s.mutex.Unlock() + // Update each attr of awsips atomically. + s.ipv4 = ipv4 + s.ipv6 = ipv6 + s.initialized = true + return nil +} + +// This function is meant to be run in a background goroutine. +// It will periodically update the ips from aws. +func (s *awsIPs) updater() { + defer close(s.updaterStopChan) + for { + time.Sleep(s.updateFrequency) + select { + case <-s.updaterStopChan: + dcontext.GetLogger(context.Background()).Info("aws ip updater received stop signal") + return + default: + err := s.tryUpdate() + if err != nil { + dcontext.GetLogger(context.Background()).WithError(err).Error("git AWS IP") + } + } + } +} + +// getCandidateNetworks returns either the ipv4 or ipv6 networks +// that were last read from aws. The networks returned +// have the same type as the ip address provided. +func (s *awsIPs) getCandidateNetworks(ip net.IP) []net.IPNet { + s.mutex.RLock() + defer s.mutex.RUnlock() + if ip.To4() != nil { + return s.ipv4 + } else if ip.To16() != nil { + return s.ipv6 + } else { + dcontext.GetLoggerWithFields(dcontext.Background(), map[interface{}]interface{}{ + "ip": ip, + }).Error("unknown ip address format") + // assume mismatch, pass through cloudfront + return nil + } +} + +// Contains determines whether the host is within aws. +func (s *awsIPs) contains(ip net.IP) bool { + networks := s.getCandidateNetworks(ip) + for _, network := range networks { + if network.Contains(ip) { + return true + } + } + return false +} + +// parseIPFromRequest attempts to extract the ip address of the +// client that made the request +func parseIPFromRequest(ctx context.Context) (net.IP, error) { + request, err := dcontext.GetRequest(ctx) + if err != nil { + return nil, err + } + ipStr := dcontext.RemoteIP(request) + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid ip address from requester: %s", ipStr) + } + + return ip, nil +} + +// eligibleForS3 checks if a request is eligible for using S3 directly +// Return true only when the IP belongs to a specific aws region and user-agent is docker +func eligibleForS3(ctx context.Context, awsIPs *awsIPs) bool { + if awsIPs != nil && awsIPs.initialized { + if addr, err := parseIPFromRequest(ctx); err == nil { + request, err := dcontext.GetRequest(ctx) + if err != nil { + dcontext.GetLogger(ctx).Warnf("the CloudFront middleware cannot parse the request: %s", err) + } else { + loggerField := map[interface{}]interface{}{ + "user-client": request.UserAgent(), + "ip": dcontext.RemoteIP(request), + } + if awsIPs.contains(addr) { + dcontext.GetLoggerWithFields(ctx, loggerField).Info("request from the allowed AWS region, skipping CloudFront") + return true + } + dcontext.GetLoggerWithFields(ctx, loggerField).Warn("request not from the allowed AWS region, fallback to CloudFront") + } + } else { + dcontext.GetLogger(ctx).WithError(err).Warn("failed to parse ip address from context, fallback to CloudFront") + } + } + return false +} diff --git a/registry/storage/driver/middleware/cloudfront/s3filter_test.go b/registry/storage/driver/middleware/cloudfront/s3filter_test.go new file mode 100644 index 00000000..6aca3abc --- /dev/null +++ b/registry/storage/driver/middleware/cloudfront/s3filter_test.go @@ -0,0 +1,401 @@ +package middleware + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + dcontext "github.com/docker/distribution/context" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" + + "reflect" // used as a replacement for testify +) + +// Rather than pull in all of testify +func assertEqual(t *testing.T, x, y interface{}) { + if !reflect.DeepEqual(x, y) { + t.Errorf("%s: Not equal! Expected='%v', Actual='%v'\n", t.Name(), x, y) + t.FailNow() + } +} + +type mockIPRangeHandler struct { + data awsIPResponse +} + +func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + bytes, err := json.Marshal(m.data) + if err != nil { + w.WriteHeader(500) + return + } + w.Write(bytes) + +} + +func newTestHandler(data awsIPResponse) *httptest.Server { + return httptest.NewServer(mockIPRangeHandler{ + data: data, + }) +} + +func serverIPRanges(server *httptest.Server) string { + return fmt.Sprintf("%s/", server.URL) +} + +func setupTest(data awsIPResponse) *httptest.Server { + // This is a basic schema which only claims the exact ip + // is in aws. + server := newTestHandler(data) + return server +} + +func TestS3TryUpdate(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "123.231.123.231/32"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + + assertEqual(t, 1, len(ips.ipv4)) + assertEqual(t, 0, len(ips.ipv6)) + +} + +func TestMatchIPV6(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + V6Prefixes: []prefixEntry{ + {IPV6Prefix: "ff00::/16"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("ff00::"))) + assertEqual(t, 1, len(ips.ipv6)) + assertEqual(t, 0, len(ips.ipv4)) +} + +func TestMatchIPV4(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "192.168.0.0/24"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4_2(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4WithRegionMatched(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, []string{"us-east-1"}) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4WithRegionMatch_2(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, []string{"us-west-2", "us-east-1"}) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4WithRegionNotMatched(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, []string{"us-west-2"}) + ips.tryUpdate() + assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestInvalidData(t *testing.T) { + t.Parallel() + // Invalid entries from aws should be ignored. + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "9000"}, + {IPV4Prefix: "192.168.0.0/24"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, 1, len(ips.ipv4)) +} + +func TestInvalidNetworkType(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "192.168.0.0/24"}, + }, + V6Prefixes: []prefixEntry{ + {IPV6Prefix: "ff00::/8"}, + {IPV6Prefix: "fe00::/8"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + assertEqual(t, 0, len(ips.getCandidateNetworks(make([]byte, 17)))) // 17 bytes does not correspond to any net type + assertEqual(t, 1, len(ips.getCandidateNetworks(make([]byte, 4)))) // netv4 networks + assertEqual(t, 2, len(ips.getCandidateNetworks(make([]byte, 16)))) // netv6 networks +} + +func TestParsing(t *testing.T) { + var data = `{ + "prefixes": [{ + "ip_prefix": "192.168.0.0", + "region": "someregion", + "service": "s3"}], + "ipv6_prefixes": [{ + "ipv6_prefix": "2001:4860:4860::8888", + "region": "anotherregion", + "service": "ec2"}] + }` + rawMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(data)) }) + t.Parallel() + server := httptest.NewServer(rawMockHandler) + defer server.Close() + schema, err := fetchAWSIPs(server.URL) + + assertEqual(t, nil, err) + assertEqual(t, 1, len(schema.Prefixes)) + assertEqual(t, prefixEntry{ + IPV4Prefix: "192.168.0.0", + Region: "someregion", + Service: "s3", + }, schema.Prefixes[0]) + assertEqual(t, 1, len(schema.V6Prefixes)) + assertEqual(t, prefixEntry{ + IPV6Prefix: "2001:4860:4860::8888", + Region: "anotherregion", + Service: "ec2", + }, schema.V6Prefixes[0]) +} + +func TestUpdateCalledRegularly(t *testing.T) { + t.Parallel() + + updateCount := 0 + server := httptest.NewServer(http.HandlerFunc( + func(rw http.ResponseWriter, req *http.Request) { + updateCount++ + rw.Write([]byte("ok")) + })) + defer server.Close() + newAWSIPs(fmt.Sprintf("%s/", server.URL), time.Second, nil) + time.Sleep(time.Second*4 + time.Millisecond*500) + if updateCount < 4 { + t.Errorf("Update should have been called at least 4 times, actual=%d", updateCount) + } +} + +func TestEligibleForS3(t *testing.T) { + awsIPs := &awsIPs{ + ipv4: []net.IPNet{{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.IPv4Mask(255, 255, 255, 0), + }}, + initialized: true, + } + empty := context.TODO() + makeContext := func(ip string) context.Context { + req := &http.Request{ + RemoteAddr: ip, + } + + return dcontext.WithRequest(empty, req) + } + + cases := []struct { + Context context.Context + Expected bool + }{ + {Context: empty, Expected: false}, + {Context: makeContext("192.168.1.2"), Expected: true}, + {Context: makeContext("192.168.0.2"), Expected: false}, + } + + for _, testCase := range cases { + name := fmt.Sprintf("Client IP = %v", + testCase.Context.Value("http.request.ip")) + t.Run(name, func(t *testing.T) { + assertEqual(t, testCase.Expected, eligibleForS3(testCase.Context, awsIPs)) + }) + } +} + +func TestEligibleForS3WithAWSIPNotInitialized(t *testing.T) { + awsIPs := &awsIPs{ + ipv4: []net.IPNet{{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.IPv4Mask(255, 255, 255, 0), + }}, + initialized: false, + } + empty := context.TODO() + makeContext := func(ip string) context.Context { + req := &http.Request{ + RemoteAddr: ip, + } + + return dcontext.WithRequest(empty, req) + } + + cases := []struct { + Context context.Context + Expected bool + }{ + {Context: empty, Expected: false}, + {Context: makeContext("192.168.1.2"), Expected: false}, + {Context: makeContext("192.168.0.2"), Expected: false}, + } + + for _, testCase := range cases { + name := fmt.Sprintf("Client IP = %v", + testCase.Context.Value("http.request.ip")) + t.Run(name, func(t *testing.T) { + assertEqual(t, testCase.Expected, eligibleForS3(testCase.Context, awsIPs)) + }) + } +} + +// populate ips with a number of different ipv4 and ipv6 networks, for the purposes +// of benchmarking contains() performance. +func populateRandomNetworks(b *testing.B, ips *awsIPs, ipv4Count, ipv6Count int) { + generateNetworks := func(dest *[]net.IPNet, bytes int, count int) { + for i := 0; i < count; i++ { + ip := make([]byte, bytes) + _, err := rand.Read(ip) + if err != nil { + b.Fatalf("failed to generate network for test : %s", err.Error()) + } + mask := make([]byte, bytes) + for i := 0; i < bytes; i++ { + mask[i] = 0xff + } + *dest = append(*dest, net.IPNet{ + IP: ip, + Mask: mask, + }) + } + } + + generateNetworks(&ips.ipv4, 4, ipv4Count) + generateNetworks(&ips.ipv6, 16, ipv6Count) +} + +func BenchmarkContainsRandom(b *testing.B) { + // Generate a random network configuration, of size comparable to + // aws official networks list + // curl -s https://ip-ranges.amazonaws.com/ip-ranges.json | jq '.prefixes | length' + // 941 + numNetworksPerType := 1000 // keep in sync with the above + // intentionally skip constructor when creating awsIPs, to avoid updater routine. + // This benchmark is only concerned with contains() performance. + awsIPs := awsIPs{} + populateRandomNetworks(b, &awsIPs, numNetworksPerType, numNetworksPerType) + + ipv4 := make([][]byte, b.N) + ipv6 := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + ipv4[i] = make([]byte, 4) + ipv6[i] = make([]byte, 16) + rand.Read(ipv4[i]) + rand.Read(ipv6[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + awsIPs.contains(ipv4[i]) + awsIPs.contains(ipv6[i]) + } +} + +func BenchmarkContainsProd(b *testing.B) { + awsIPs := newAWSIPs(defaultIPRangesURL, defaultUpdateFrequency, nil) + ipv4 := make([][]byte, b.N) + ipv6 := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + ipv4[i] = make([]byte, 4) + ipv6[i] = make([]byte, 16) + rand.Read(ipv4[i]) + rand.Read(ipv6[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + awsIPs.contains(ipv4[i]) + awsIPs.contains(ipv6[i]) + } +} diff --git a/registry/storage/driver/middleware/redirect/middleware.go b/registry/storage/driver/middleware/redirect/middleware.go new file mode 100644 index 00000000..8f63674c --- /dev/null +++ b/registry/storage/driver/middleware/redirect/middleware.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "context" + "fmt" + "net/url" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +type redirectStorageMiddleware struct { + storagedriver.StorageDriver + scheme string + host string +} + +var _ storagedriver.StorageDriver = &redirectStorageMiddleware{} + +func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + o, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + b, ok := o.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + u, err := url.Parse(b) + if err != nil { + return nil, fmt.Errorf("unable to parse redirect baseurl: %s", b) + } + if u.Scheme == "" { + return nil, fmt.Errorf("no scheme specified for redirect baseurl") + } + if u.Host == "" { + return nil, fmt.Errorf("no host specified for redirect baseurl") + } + + return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil +} + +func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + u := &url.URL{Scheme: r.scheme, Host: r.host, Path: path} + return u.String(), nil +} + +func init() { + storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) +} diff --git a/registry/storage/driver/middleware/redirect/middleware_test.go b/registry/storage/driver/middleware/redirect/middleware_test.go new file mode 100644 index 00000000..1eb6309f --- /dev/null +++ b/registry/storage/driver/middleware/redirect/middleware_test.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "testing" + + check "gopkg.in/check.v1" +) + +func Test(t *testing.T) { check.TestingT(t) } + +type MiddlewareSuite struct{} + +var _ = check.Suite(&MiddlewareSuite{}) + +func (s *MiddlewareSuite) TestNoConfig(c *check.C) { + options := make(map[string]interface{}) + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no baseurl provided") +} + +func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "example.com" + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") +} + +func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "https://example.com:5443" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "https") + c.Assert(m.host, check.Equals, "example.com:5443") + + url, err := middleware.URLFor(nil, "/rick/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "https://example.com:5443/rick/data") +} + +func (s *MiddlewareSuite) TestHTTP(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "http://example.com" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "http") + c.Assert(m.host, check.Equals, "example.com") + + url, err := middleware.URLFor(nil, "morty/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "http://example.com/morty/data") +} diff --git a/registry/storage/driver/middleware/storagemiddleware.go b/registry/storage/driver/middleware/storagemiddleware.go new file mode 100644 index 00000000..7e40a8dd --- /dev/null +++ b/registry/storage/driver/middleware/storagemiddleware.go @@ -0,0 +1,39 @@ +package storagemiddleware + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// InitFunc is the type of a StorageMiddleware factory function and is +// used to register the constructor for different StorageMiddleware backends. +type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) + +var storageMiddlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a StorageMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if storageMiddlewares == nil { + storageMiddlewares = make(map[string]InitFunc) + } + if _, exists := storageMiddlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + storageMiddlewares[name] = initFunc + + return nil +} + +// Get constructs a StorageMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { + if storageMiddlewares != nil { + if initFunc, exists := storageMiddlewares[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no storage middleware registered with name: %s", name) +} diff --git a/registry/storage/driver/oss/doc.go b/registry/storage/driver/oss/doc.go new file mode 100644 index 00000000..d1bc932f --- /dev/null +++ b/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go new file mode 100644 index 00000000..1dcf42b8 --- /dev/null +++ b/registry/storage/driver/oss/oss.go @@ -0,0 +1,694 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// +build include_oss + +package oss + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/sirupsen/logrus" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize +const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyID string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + Endpoint string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + + accessKey, ok := parameters["accesskeyid"] + if !ok { + return nil, fmt.Errorf("No accesskeyid parameter provided") + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + return nil, fmt.Errorf("No accesskeysecret parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The internal parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + + params := DriverParameters{ + AccessKeyID: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), + } + + return New(params) +} + +// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) + bucket := client.Bucket(params.Bucket) + client.SetDebug(false) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + return nil, parseError(path, err) + } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.ossPath(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) + } + for _, multi := range multis { + if key != multi.Key { + continue + } + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range parts { + multiSize += part.Size + } + return d.newWriter(key, multi, parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && opath[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + ossPath := d.ossPath(path) + listResponse, err := d.Bucket.List(ossPath, "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(ossPath, "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + // This is to cover for the cases when the first key equal to ossPath. + if len(files) > 0 && files[0] == strings.Replace(ossPath, d.ossPath(""), prefix, 1) { + files = files[1:] + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +const maxConcurrency = 10 + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) + err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath), + d.getContentType(), + getPermissions(), + oss.Options{}, + maxConcurrency) + if err != nil { + logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ossPath := d.ossPath(path) + listResponse, err := d.Bucket.List(ossPath, "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + numOssObjects := len(listResponse.Contents) + for index, key := range listResponse.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(key.Key) > len(ossPath) && (key.Key)[len(ossPath)] != '/' { + numOssObjects = index + break + } + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:numOssObjects]}) + if err != nil { + return nil + } + + if numOssObjects < len(listResponse.Contents) { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) + signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("signed URL: %s", signedURL) + return signedURL, nil +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *oss.Multi + parts []oss.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *oss.Multi, parts []oss.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []oss.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go new file mode 100644 index 00000000..438d9a48 --- /dev/null +++ b/registry/storage/driver/oss/oss_test.go @@ -0,0 +1,142 @@ +// +build include_oss + +package oss + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go new file mode 100644 index 00000000..0c6f7e5e --- /dev/null +++ b/registry/storage/driver/s3-aws/s3.go @@ -0,0 +1,1324 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +// maxChunkSize defines the maximum multipart upload chunk size allowed by S3. +const maxChunkSize = 5 << 30 + +const defaultChunkSize = 2 * minChunkSize + +const ( + // defaultMultipartCopyChunkSize defines the default chunk size for all + // but the last Upload Part - Copy operation of a multipart copy. + // Empirically, 32 MB is optimal. + defaultMultipartCopyChunkSize = 32 << 20 + + // defaultMultipartCopyMaxConcurrency defines the default maximum number + // of concurrent Upload Part - Copy operations for a multipart copy. + defaultMultipartCopyMaxConcurrency = 100 + + // defaultMultipartCopyThresholdSize defines the default object size + // above which multipart copy will be used. (PUT Object - Copy is used + // for objects at or below this size.) Empirically, 32 MB is optimal. + defaultMultipartCopyThresholdSize = 32 << 20 +) + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint +const noStorageClass = "NONE" + +// validRegions maps known s3 region identifiers to region descriptors +var validRegions = map[string]struct{}{} + +// validObjectACLs contains known s3 object Acls +var validObjectACLs = map[string]struct{}{} + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + RegionEndpoint string + Encrypt bool + KeyID string + Secure bool + V4Auth bool + ChunkSize int64 + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + UserAgent string + ObjectACL string + SessionToken string +} + +func init() { + for _, region := range []string{ + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-west-2", + "eu-central-1", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + "cn-north-1", + "us-gov-west-1", + "ca-central-1", + } { + validRegions[region] = struct{}{} + } + + for _, objectACL := range []string{ + s3.ObjectCannedACLPrivate, + s3.ObjectCannedACLPublicRead, + s3.ObjectCannedACLPublicReadWrite, + s3.ObjectCannedACLAuthenticatedRead, + s3.ObjectCannedACLAwsExecRead, + s3.ObjectCannedACLBucketOwnerRead, + s3.ObjectCannedACLBucketOwnerFullControl, + } { + validObjectACLs[objectACL] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register("s3", &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + KeyID string + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + ObjectACL string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + + regionName, ok := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := fmt.Sprint(regionName) + // Don't check the region value if a custom endpoint is provided. + if regionEndpoint == "" { + if _, ok = validRegions[region]; !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + v4Bool := true + v4auth := parameters["v4auth"] + switch v4auth := v4auth.(type) { + case string: + b, err := strconv.ParseBool(v4auth) + if err != nil { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + v4Bool = b + case bool: + v4Bool = v4auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + + keyID := parameters["keyid"] + if keyID == nil { + keyID = "" + } + + chunkSize, err := getParameterAsInt64(parameters, "chunksize", defaultChunkSize, minChunkSize, maxChunkSize) + if err != nil { + return nil, err + } + + multipartCopyChunkSize, err := getParameterAsInt64(parameters, "multipartcopychunksize", defaultMultipartCopyChunkSize, minChunkSize, maxChunkSize) + if err != nil { + return nil, err + } + + multipartCopyMaxConcurrency, err := getParameterAsInt64(parameters, "multipartcopymaxconcurrency", defaultMultipartCopyMaxConcurrency, 1, math.MaxInt64) + if err != nil { + return nil, err + } + + multipartCopyThresholdSize, err := getParameterAsInt64(parameters, "multipartcopythresholdsize", defaultMultipartCopyThresholdSize, 0, maxChunkSize) + if err != nil { + return nil, err + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != noStorageClass && + storageClassString != s3.StorageClassStandard && + storageClassString != s3.StorageClassReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + []string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + storageClass = storageClassString + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + objectACL := s3.ObjectCannedACLPrivate + objectACLParam := parameters["objectacl"] + if objectACLParam != nil { + objectACLString, ok := objectACLParam.(string) + if !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + } + + if _, ok = validObjectACLs[objectACLString]; !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + } + objectACL = objectACLString + } + + sessionToken := "" + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + fmt.Sprint(regionEndpoint), + encryptBool, + fmt.Sprint(keyID), + secureBool, + v4Bool, + chunkSize, + multipartCopyChunkSize, + multipartCopyMaxConcurrency, + multipartCopyThresholdSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + objectACL, + fmt.Sprint(sessionToken), + } + + return New(params) +} + +// getParameterAsInt64 converts paramaters[name] to an int64 value (using +// defaultt if nil), verifies it is no smaller than min, and returns it. +func getParameterAsInt64(parameters map[string]interface{}, name string, defaultt int64, min int64, max int64) (int64, error) { + rv := defaultt + param := parameters[name] + switch v := param.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, param) + } + rv = vv + case int64: + rv = v + case int, uint, int32, uint32, uint64: + rv = reflect.ValueOf(v).Convert(reflect.TypeOf(rv)).Int() + case nil: + // do nothing + default: + return 0, fmt.Errorf("invalid value for %s: %#v", name, param) + } + + if rv < min || rv > max { + return 0, fmt.Errorf("The %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max) + } + + return rv, nil +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + if !params.V4Auth && + (params.RegionEndpoint == "" || + strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) { + return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication") + } + + awsConfig := aws.NewConfig() + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + SessionToken: params.SessionToken, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + if params.RegionEndpoint != "" { + awsConfig.WithS3ForcePathStyle(true) + awsConfig.WithEndpoint(params.RegionEndpoint) + } + + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } + + s3obj := s3.New(session.New(awsConfig)) + + // enable S3 compatible signature v2 signing instead + if !params.V4Auth { + setv2Handlers(s3obj) + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, + MultipartCopyChunkSize: params.MultipartCopyChunkSize, + MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, + MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + ObjectACL: params.ObjectACL, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + return ioutil.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + _, err := d.S3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }) + return parseError(path, err) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + resp, err := d.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }) + + if err != nil { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return nil, err + } + return d.newWriter(key, *resp.UploadId, nil), nil + } + resp, err := d.S3.ListMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(key), + }) + if err != nil { + return nil, parseError(path, err) + } + + for _, multi := range resp.Uploads { + if key != *multi.Key { + continue + } + resp, err := d.S3.ListParts(&s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + }) + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range resp.Parts { + multiSize += *part.Size + } + return d.newWriter(key, *multi.UploadId, resp.Parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(resp.Contents) == 1 { + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + } else if len(resp.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if *resp.IsTruncated { + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + if err := d.copy(ctx, sourcePath, destPath); err != nil { + return err + } + return d.Delete(ctx, sourcePath) +} + +// copy copies an object stored at sourcePath to destPath. +func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) error { + // S3 can copy objects up to 5 GB in size with a single PUT Object - Copy + // operation. For larger objects, the multipart upload API must be used. + // + // Empirically, multipart copy is fastest with 32 MB parts and is faster + // than PUT Object - Copy for objects larger than 32 MB. + + fileInfo, err := d.Stat(ctx, sourcePath) + if err != nil { + return parseError(sourcePath, err) + } + + if fileInfo.Size() <= d.MultipartCopyThresholdSize { + _, err := d.S3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }) + if err != nil { + return parseError(sourcePath, err) + } + return nil + } + + createResp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return err + } + + numParts := (fileInfo.Size() + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize + completedParts := make([]*s3.CompletedPart, numParts) + errChan := make(chan error, numParts) + limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency) + + for i := range completedParts { + i := int64(i) + go func() { + limiter <- struct{}{} + firstByte := i * d.MultipartCopyChunkSize + lastByte := firstByte + d.MultipartCopyChunkSize - 1 + if lastByte >= fileInfo.Size() { + lastByte = fileInfo.Size() - 1 + } + uploadResp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + Key: aws.String(d.s3Path(destPath)), + PartNumber: aws.Int64(i + 1), + UploadId: createResp.UploadId, + CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)), + }) + if err == nil { + completedParts[i] = &s3.CompletedPart{ + ETag: uploadResp.CopyPartResult.ETag, + PartNumber: aws.Int64(i + 1), + } + } + errChan <- err + <-limiter + }() + } + + for range completedParts { + err := <-errChan + if err != nil { + return err + } + } + + _, err = d.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + UploadId: createResp.UploadId, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts}, + }) + return err +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +// We must be careful since S3 does not guarantee read after delete consistency +func (d *driver) Delete(ctx context.Context, path string) error { + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + s3Path := d.s3Path(path) + listObjectsInput := &s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(s3Path), + } +ListLoop: + for { + // list all the objects + resp, err := d.S3.ListObjects(listObjectsInput) + + // resp.Contents can only be empty on the first call + // if there were no more results to return after the first call, resp.IsTruncated would have been false + // and the loop would be exited without recalling ListObjects + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + for _, key := range resp.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' { + break ListLoop + } + s3Objects = append(s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }) + } + + // resp.Contents must have at least one element or we would have returned not found + listObjectsInput.Marker = resp.Contents[len(resp.Contents)-1].Key + + // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" + // if everything has been returned, break + if resp.IsTruncated == nil || !*resp.IsTruncated { + break + } + } + + // need to chunk objects into groups of 1000 per s3 restrictions + total := len(s3Objects) + for i := 0; i < total; i += 1000 { + _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects[i:min(i+1000, total)], + Quiet: aws.Bool(false), + }, + }) + if err != nil { + return err + } + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresIn := 20 * time.Minute + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresIn = et.Sub(time.Now()) + } + } + + var req *request.Request + + switch methodString { + case "GET": + req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + case "HEAD": + req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + default: + panic("unreachable") + } + + return req.Presign(expiresIn) +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, from string, f storagedriver.WalkFn) error { + path := from + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + var objectCount int64 + if err := d.doWalk(ctx, &objectCount, d.s3Path(path), prefix, f); err != nil { + return err + } + + // S3 doesn't have the concept of empty directories, so it'll return path not found if there are no objects + if objectCount == 0 { + return storagedriver.PathNotFoundError{Path: from} + } + + return nil +} + +type walkInfoContainer struct { + storagedriver.FileInfoFields + prefix *string +} + +// Path provides the full path of the target of this file info. +func (wi walkInfoContainer) Path() string { + return wi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (wi walkInfoContainer) Size() int64 { + return wi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (wi walkInfoContainer) ModTime() time.Time { + return wi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (wi walkInfoContainer) IsDir() bool { + return wi.FileInfoFields.IsDir +} + +func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, prefix string, f storagedriver.WalkFn) error { + var retError error + + listObjectsInput := &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(path), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + } + + ctx, done := dcontext.WithTrace(parentCtx) + defer done("s3aws.ListObjectsV2Pages(%s)", path) + listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool { + + *objectCount += *objects.KeyCount + walkInfos := make([]walkInfoContainer, 0, *objects.KeyCount) + + for _, dir := range objects.CommonPrefixes { + commonPrefix := *dir.Prefix + walkInfos = append(walkInfos, walkInfoContainer{ + prefix: dir.Prefix, + FileInfoFields: storagedriver.FileInfoFields{ + IsDir: true, + Path: strings.Replace(commonPrefix[:len(commonPrefix)-1], d.s3Path(""), prefix, 1), + }, + }) + } + + for _, file := range objects.Contents { + walkInfos = append(walkInfos, walkInfoContainer{ + FileInfoFields: storagedriver.FileInfoFields{ + IsDir: false, + Size: *file.Size, + ModTime: *file.LastModified, + Path: strings.Replace(*file.Key, d.s3Path(""), prefix, 1), + }, + }) + } + + sort.SliceStable(walkInfos, func(i, j int) bool { return walkInfos[i].FileInfoFields.Path < walkInfos[j].FileInfoFields.Path }) + + for _, walkInfo := range walkInfos { + err := f(walkInfo) + + if err == storagedriver.ErrSkipDir { + if walkInfo.IsDir() { + continue + } else { + break + } + } else if err != nil { + retError = err + return false + } + + if walkInfo.IsDir() { + if err := d.doWalk(ctx, objectCount, *walkInfo.prefix, prefix, f); err != nil { + retError = err + return false + } + } + } + return true + }) + + if retError != nil { + return retError + } + + if listObjectErr != nil { + return listObjectErr + } + + return nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if !d.Encrypt { + return nil + } + if d.KeyID == "" { + return aws.String("AES256") + } + return aws.String("aws:kms") +} + +func (d *driver) getSSEKMSKeyID() *string { + if d.KeyID != "" { + return aws.String(d.KeyID) + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String(d.ObjectACL) +} + +func (d *driver) getStorageClass() *string { + if d.StorageClass == noStorageClass { + return nil + } + return aws.String(d.StorageClass) +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + uploadID string + parts []*s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += *part.Size + } + return &writer{ + driver: d, + key: key, + uploadID: uploadID, + parts: parts, + size: size, + } +} + +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return 0, err + } + + resp, err := w.driver.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + ContentType: w.driver.getContentType(), + ACL: w.driver.getACL(), + ServerSideEncryption: w.driver.getEncryptionMode(), + StorageClass: w.driver.getStorageClass(), + }) + if err != nil { + return 0, err + } + w.uploadID = *resp.UploadId + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + resp, err := w.driver.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + }) + defer resp.Body.Close() + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart, err = ioutil.ReadAll(resp.Body) + if err != nil { + return 0, err + } + } else { + // Otherwise we can use the old file as the new first part + copyPartResp, err := w.driver.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(w.driver.Bucket), + CopySource: aws.String(w.driver.Bucket + "/" + w.key), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: resp.UploadId, + }) + if err != nil { + return 0, err + } + w.parts = []*s3.Part{ + { + ETag: copyPartResp.CopyPartResult.ETag, + PartNumber: aws.Int64(1), + Size: aws.Int64(w.size), + }, + } + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + _, err := w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + partNumber := aws.Int64(int64(len(w.parts) + 1)) + resp, err := w.driver.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: partNumber, + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(w.readyPart), + }) + if err != nil { + return err + } + w.parts = append(w.parts, &s3.Part{ + ETag: resp.ETag, + PartNumber: partNumber, + Size: aws.Int64(int64(len(w.readyPart))), + }) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/registry/storage/driver/s3-aws/s3_test.go b/registry/storage/driver/s3-aws/s3_test.go new file mode 100644 index 00000000..363a22eb --- /dev/null +++ b/registry/storage/driver/s3-aws/s3_test.go @@ -0,0 +1,315 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + keyID := os.Getenv("S3_KEY_ID") + secure := os.Getenv("S3_SECURE") + v4Auth := os.Getenv("S3_V4_AUTH") + region := os.Getenv("AWS_REGION") + objectACL := os.Getenv("S3_OBJECT_ACL") + root, err := ioutil.TempDir("", "driver-") + regionEndpoint := os.Getenv("REGION_ENDPOINT") + sessionToken := os.Getenv("AWS_SESSION_TOKEN") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4Bool := true + if v4Auth != "" { + v4Bool, err = strconv.ParseBool(v4Auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + regionEndpoint, + encryptBool, + keyID, + secureBool, + v4Bool, + minChunkSize, + defaultMultipartCopyChunkSize, + defaultMultipartCopyMaxConcurrency, + defaultMultipartCopyThresholdSize, + rootDirectory, + storageClass, + driverName + "-test", + objectACL, + sessionToken, + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StorageClassStandard) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + if _, err = s3DriverConstructor(rootDir, noStorageClass); err != nil { + t.Fatalf("unexpected error creating driver without storage class: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(standardDriverUnwrapped.Bucket), + Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if resp.StorageClass != nil { + t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(rrDriverUnwrapped.Bucket), + Key: aws.String(rrDriverUnwrapped.s3Path(rrFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if resp.StorageClass == nil { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) + } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) + } + +} + +func TestOverThousandBlobs(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + ctx := context.Background() + for i := 0; i < 1005; i++ { + filename := "/thousandfiletest/file" + strconv.Itoa(i) + contents := []byte("contents") + err = standardDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + } + + // cant actually verify deletion because read-after-delete is inconsistent, but can ensure no errors + err = standardDriver.Delete(ctx, "/thousandfiletest") + if err != nil { + t.Fatalf("unexpected error deleting thousand files: %v", err) + } +} + +func TestMoveWithMultipartCopy(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver: %v", err) + } + + ctx := context.Background() + sourcePath := "/source" + destPath := "/dest" + + defer d.Delete(ctx, sourcePath) + defer d.Delete(ctx, destPath) + + // An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy. + multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize + contents := make([]byte, 2*multipartCopyThresholdSize) + rand.Read(contents) + + err = d.PutContent(ctx, sourcePath, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = d.Move(ctx, sourcePath, destPath) + if err != nil { + t.Fatalf("unexpected error moving file: %v", err) + } + + received, err := d.GetContent(ctx, destPath) + if err != nil { + t.Fatalf("unexpected error getting content: %v", err) + } + if !bytes.Equal(contents, received) { + t.Fatal("content differs") + } + + _, err = d.GetContent(ctx, sourcePath) + switch err.(type) { + case storagedriver.PathNotFoundError: + default: + t.Fatalf("unexpected error getting content: %v", err) + } +} diff --git a/registry/storage/driver/s3-aws/s3_v2_signer.go b/registry/storage/driver/s3-aws/s3_v2_signer.go new file mode 100644 index 00000000..703660cf --- /dev/null +++ b/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -0,0 +1,222 @@ +package s3 + +// Source: https://github.com/pivotal-golang/s3cli + +// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + log "github.com/sirupsen/logrus" +) + +const ( + signatureVersion = "2" + signatureMethod = "HmacSHA1" + timeFormat = "2006-01-02T15:04:05Z" +) + +type signer struct { + // Values that must be populated from the request + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Query url.Values + stringToSign string + signature string +} + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +// setv2Handlers will setup v2 signature signing on the S3 driver +func setv2Handlers(svc *s3.S3) { + svc.Handlers.Build.PushBack(func(r *request.Request) { + parsedURL, err := url.Parse(r.HTTPRequest.URL.String()) + if err != nil { + log.Fatalf("Failed to parse URL: %v", err) + } + r.HTTPRequest.URL.Opaque = parsedURL.Path + }) + + svc.Handlers.Sign.Clear() + svc.Handlers.Sign.PushBack(Sign) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) +} + +// Sign requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + } + v2.Sign() +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + accessKey := credValue.AccessKeyID + var ( + md5, ctype, date, xamz string + xamzDate bool + sarray []string + smap map[string]string + sharray []string + ) + + headers := v2.Request.Header + params := v2.Request.URL.Query() + parsedURL, err := url.Parse(v2.Request.URL.String()) + if err != nil { + return err + } + host, canonicalPath := parsedURL.Host, parsedURL.Path + v2.Request.Header["Host"] = []string{host} + v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)} + if credValue.SessionToken != "" { + v2.Request.Header["x-amz-security-token"] = []string{credValue.SessionToken} + } + + smap = make(map[string]string) + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + vall := strings.Join(v, ",") + smap[k] = k + ":" + vall + if k == "x-amz-date" { + xamzDate = true + date = "" + } + sharray = append(sharray, k) + } + } + } + if len(sharray) > 0 { + sort.StringSlice(sharray).Sort() + for _, h := range sharray { + sarray = append(sarray, smap[h]) + } + xamz = strings.Join(sarray, "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{accessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, k) + } else { + sarray = append(sarray, k+"="+vi) + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") + } + + v2.stringToSign = strings.Join([]string{ + v2.Request.Method, + md5, + ctype, + date, + xamz + canonicalPath, + }, "\n") + hash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + + if expires { + params["Signature"] = []string{string(v2.signature)} + } else { + headers["Authorization"] = []string{"AWS " + accessKey + ":" + string(v2.signature)} + } + + log.WithFields(log.Fields{ + "string-to-sign": v2.stringToSign, + "signature": v2.signature, + }).Debugln("request signature") + return nil +} diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go new file mode 100644 index 00000000..4bee38d7 --- /dev/null +++ b/registry/storage/driver/s3-goamz/s3.go @@ -0,0 +1,766 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the docker/goamz client library for interfacing with +// S3. It is intended to be deprecated in favor of the s3-aws driver +// implementation. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" +) + +const driverName = "s3goamz" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region aws.Region + Encrypt bool + Secure bool + V4Auth bool + ChunkSize int64 + RootDirectory string + StorageClass s3.StorageClass + UserAgent string +} + +func init() { + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + StorageClass s3.StorageClass +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionName := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(fmt.Sprint(regionName)) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + v4AuthBool := false + v4Auth := parameters["v4auth"] + switch v4Auth := v4Auth.(type) { + case string: + b, err := strconv.ParseBool(v4Auth) + if err != nil { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + v4AuthBool = b + case bool: + v4AuthBool = v4Auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam := parameters["chunksize"] + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StandardStorage + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassCasted := s3.StorageClass(strings.ToUpper(storageClassString)) + if storageClassCasted != s3.StandardStorage && storageClassCasted != s3.ReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + storageClass = storageClassCasted + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + v4AuthBool, + chunkSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) + if err != nil { + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) + } + + if !params.Secure { + params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) + } + + s3obj := s3.New(auth, params.Region) + + if params.UserAgent != "" { + s3obj.Client = &http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, + transport.NewHeaderRequestModifier(http.Header{ + http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}, + }), + ), + } + } + + if params.V4Auth { + s3obj.Signature = aws.V4Signature + } else if mustV4Auth(params.Region.Name) { + return nil, fmt.Errorf("The %s region only works with v4 authentication", params.Region.Name) + } + + bucket := s3obj.Bucket(params.Bucket) + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.s3Path(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) + if err != nil { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) + } + for _, multi := range multis { + if key != multi.Key { + continue + } + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range parts { + multiSize += part.Size + } + return d.newWriter(key, multi, parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if !listResponse.IsTruncated { + break + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), + s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + s3Path := d.s3Path(path) + listResponse, err := d.Bucket.List(s3Path, "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]s3.Object, listMax) + + for len(listResponse.Contents) > 0 { + numS3Objects := len(listResponse.Contents) + for index, key := range listResponse.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(key.Key) > len(s3Path) && (key.Key)[len(s3Path)] != '/' { + numS3Objects = index + break + } + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:numS3Objects]}) + if err != nil { + return nil + } + + if numS3Objects < len(listResponse.Contents) { + return nil + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getOptions() s3.Options { + return s3.Options{ + SSE: d.Encrypt, + StorageClass: d.StorageClass, + } +} + +func getPermissions() s3.ACL { + return s3.Private +} + +// mustV4Auth checks whether must use v4 auth in specific region. +// Please see documentation at http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html +func mustV4Auth(region string) bool { + switch region { + case "eu-central-1", "cn-north-1", "us-east-2", + "ca-central-1", "ap-south-1", "ap-northeast-2", "eu-west-2": + return true + } + return false +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *s3.Multi + parts []s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *s3.Multi, parts []s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []s3.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/registry/storage/driver/s3-goamz/s3_test.go b/registry/storage/driver/s3-goamz/s3_test.go new file mode 100644 index 00000000..352ec3f5 --- /dev/null +++ b/registry/storage/driver/s3-goamz/s3_test.go @@ -0,0 +1,201 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + v4auth := os.Getenv("S3_USE_V4_AUTH") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4AuthBool := false + if v4auth != "" { + v4AuthBool, err = strconv.ParseBool(v4auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + aws.GetRegion(region), + encryptBool, + secureBool, + v4AuthBool, + minChunkSize, + rootDirectory, + storageClass, + driverName + "-test", + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StandardStorage) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.ReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.Bucket.GetResponse(standardDriverUnwrapped.s3Path(standardFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != "" { + t.Fatalf("unexpected storage class for standard file: %v", storageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.Bucket.GetResponse(rrDriverUnwrapped.s3Path(rrFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) + } +} diff --git a/registry/storage/driver/storagedriver.go b/registry/storage/driver/storagedriver.go new file mode 100644 index 00000000..b220713f --- /dev/null +++ b/registry/storage/driver/storagedriver.go @@ -0,0 +1,171 @@ +package driver + +import ( + "context" + "fmt" + "io" + "regexp" + "strconv" + "strings" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. Storage Drivers are automatically +// registered via an internal registration mechanism, and generally created +// via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). +// Please see the aforementioned factory package for example code showing how to get an instance +// of a StorageDriver +type StorageDriver interface { + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(ctx context.Context, path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(ctx context.Context, path string, content []byte) error + + // Reader retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + + // Writer returns a FileWriter which will store the content written to it + // at the location designated by "path" after the call to Commit. + Writer(ctx context.Context, path string, append bool) (FileWriter, error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(ctx context.Context, path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + //given path. + List(ctx context.Context, path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. + Move(ctx context.Context, sourcePath string, destPath string) error + + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(ctx context.Context, path string) error + + // URLFor returns a URL which may be used to retrieve the content stored at + // the given path, possibly using the given options. + // May return an ErrUnsupportedMethod in certain StorageDriver + // implementations. + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) + + // Walk traverses a filesystem defined within driver, starting + // from the given path, calling f on each file. + // If the returned error from the WalkFn is ErrSkipDir and fileInfo refers + // to a directory, the directory will not be entered and Walk + // will continue the traversal. If fileInfo refers to a normal file, processing stops + Walk(ctx context.Context, path string, f WalkFn) error +} + +// FileWriter provides an abstraction for an opened writable file-like object in +// the storage backend. The FileWriter must flush all content written to it on +// the call to Close, but is only required to make its content readable on a +// call to Commit. +type FileWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this FileWriter. + Size() int64 + + // Cancel removes any written content from this FileWriter. + Cancel() error + + // Commit flushes all content written to this FileWriter and makes it + // available for future calls to StorageDriver.GetContent and + // StorageDriver.Reader. + Commit() error +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) + +// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. +type ErrUnsupportedMethod struct { + DriverName string +} + +func (err ErrUnsupportedMethod) Error() string { + return fmt.Sprintf("%s: unsupported method", err.DriverName) +} + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string + DriverName string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string + DriverName string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 + DriverName string +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) +} + +// Error is a catch-all error type which captures an error string and +// the driver type on which it occurred. +type Error struct { + DriverName string + Enclosed error +} + +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) +} diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go new file mode 100644 index 00000000..46201ee7 --- /dev/null +++ b/registry/storage/driver/swift/swift.go @@ -0,0 +1,934 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// contentType defines the Content-Type header associated with stored segments +const contentType = "application/octet-stream" + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + TenantDomain string + TenantDomainID string + TrustID string + Region string + AuthVersion int + Container string + Prefix string + EndpointType string + InsecureSkipVerify bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +// swiftInfo maps the JSON structure returned by Swift /info endpoint +type swiftInfo struct { + Swift struct { + Version string `mapstructure:"version"` + } + Tempurl struct { + Methods []string `mapstructure:"methods"` + } + BulkDelete struct { + MaxDeletesPerRequest int `mapstructure:"max_deletes_per_request"` + } `mapstructure:"bulk_delete"` +} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + BulkDeleteMaxDeletes int + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + // Sanitize some entries before trying to decode parameters with mapstructure + // TenantID and Tenant when integers only and passed as ENV variables + // are considered as integer and not string. The parser fails in this + // case. + _, ok := parameters["tenant"] + if ok { + parameters["tenant"] = fmt.Sprint(parameters["tenant"]) + } + _, ok = parameters["tenantid"] + if ok { + parameters["tenantid"] = fmt.Sprint(parameters["tenantid"]) + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := &swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + AuthVersion: params.AuthVersion, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + TenantDomain: params.TenantDomain, + TenantDomainId: params.TenantDomainID, + TrustId: params.TrustID, + EndpointType: swift.EndpointType(params.EndpointType), + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + } else if err != nil { + return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + ChunkSize: params.ChunkSize, + TempURLMethods: make([]string, 0), + AccessKey: params.AccessKey, + } + + info := swiftInfo{} + if config, err := d.Conn.QueryInfo(); err == nil { + _, d.BulkDeleteSupport = config["bulk_delete"] + + if err := mapstructure.Decode(config, &info); err == nil { + d.TempURLContainerKey = info.Swift.Version >= "2.3.0" + d.TempURLMethods = info.Tempurl.Methods + if d.BulkDeleteSupport { + d.BulkDeleteMaxDeletes = info.BulkDelete.MaxDeletesPerRequest + } + } + } else { + d.TempURLContainerKey = params.TempURLContainerKey + d.TempURLMethods = params.TempURLMethods + } + + if len(d.TempURLMethods) > 0 { + secretKey := params.SecretKey + if secretKey == "" { + secretKey, _ = generateSecret() + } + + // Since Swift 2.2.2, we can now set secret keys on containers + // in addition to the account secret keys. Use them in preference. + if d.TempURLContainerKey { + _, containerHeaders, err := d.Conn.Container(d.Container) + if err != nil { + return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) + } + + d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } else { + // Use the account secret key + _, accountHeaders, err := d.Conn.Account() + if err != nil { + return nil, fmt.Errorf("Failed to fetch account info (%s)", err) + } + + d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, err +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + file, headers, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err + } + + //if this is a DLO and it is clear that segments are still missing, + //wait until they show up + _, isDLO := headers["X-Object-Manifest"] + size, err := file.Length() + if err != nil { + return file, err + } + if isDLO && size == 0 { + if time.Now().Add(waitingTime).After(endTime) { + return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + time.Sleep(waitingTime) + waitingTime *= 2 + continue + } + + //if not, then this reader will be fine + return file, nil + } +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + var ( + segments []swift.Object + segmentsPath string + err error + ) + + if !append { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err + } + } else { + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } else if err != nil { + return nil, err + } + manifest, ok := headers["X-Object-Manifest"] + if !ok { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil { + return nil, err + } + segments = []swift.Object{info} + } else { + _, segmentsPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentsPath); err != nil { + return nil, err + } + } + } + + return d.newWriter(path, segmentsPath, segments), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // The file exists. But on Swift 1.12, the 'bytes' field is always 0 so + // we need to do a separate HEAD request. + break + } + } + + //Don't trust an empty `objects` slice. A container listing can be + //outdated. For files, we can make a HEAD request on the object which + //reports existence (at least) much more reliably. + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + info, headers, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //if this is a DLO and it is clear that segments are still missing, + //wait until they show up + _, isDLO := headers["X-Object-Manifest"] + if isDLO && info.Bytes == 0 { + if time.Now().Add(waitingTime).After(endTime) { + return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + time.Sleep(waitingTime) + waitingTime *= 2 + continue + } + + //otherwise, accept the result + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + _, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + objects = append(objects, segments...) + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + if d.BulkDeleteSupport && len(objects) > 0 && d.BulkDeleteMaxDeletes > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + + chunks, err := chunkFilenames(filenames, d.BulkDeleteMaxDeletes) + if err != nil { + return err + } + for _, chunk := range chunks { + _, err := d.Conn.BulkDelete(d.Container, chunk) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } + } else { + for _, obj := range objects { + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + if d.SecretKey == "" { + return "", storagedriver.ErrUnsupportedMethod{} + } + + methodString := "GET" + method, ok := options["method"] + if ok { + if methodString, ok = method.(string); !ok { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + if methodString == "HEAD" { + // A "HEAD" request on a temporary URL is allowed if the + // signature was generated with "GET", "POST" or "PUT" + methodString = "GET" + } + + supported := false + for _, method := range d.TempURLMethods { + if method == methodString { + supported = true + break + } + } + + if !supported { + return "", storagedriver.ErrUnsupportedMethod{} + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) + + if d.AccessKey != "" { + // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature + url, _ := url.Parse(tempURL) + query := url.Query() + query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) + url.RawQuery = query.Encode() + tempURL = url.String() + } + + return tempURL, nil +} + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + //a simple container listing works 99.9% of the time + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //build a lookup table by object name + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentPath := getSegmentPath(path, segmentNumber) + + if _, seen := hasObjectName[segmentPath]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := d.Conn.Object(d.Container, segmentPath) + switch err { + case nil: + //found new segment -> keep going, more might be missing + segments = append(segments, segment) + continue + case swift.ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } + } +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func chunkFilenames(slice []string, maxSize int) (chunks [][]string, err error) { + if maxSize > 0 { + for offset := 0; offset < len(slice); offset += maxSize { + chunkSize := maxSize + if offset+chunkSize > len(slice) { + chunkSize = len(slice) - offset + } + chunks = append(chunks, slice[offset:offset+chunkSize]) + } + } else { + return nil, fmt.Errorf("Max chunk size must be > 0") + } + return +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func generateSecret() (string, error) { + var secretBytes [32]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) + } + return hex.EncodeToString(secretBytes[:]), nil +} + +func getSegmentPath(segmentsPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) +} + +type writer struct { + driver *driver + path string + segmentsPath string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter { + var size int64 + for _, segment := range segments { + size += segment.Bytes + } + return &writer{ + driver: d, + path: path, + segmentsPath: segmentsPath, + size: size, + bw: bufio.NewWriterSize(&segmentWriter{ + conn: d.Conn, + container: d.Container, + segmentsPath: segmentsPath, + segmentNumber: len(segments) + 1, + maxChunkSize: d.ChunkSize, + }, d.ChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if !w.committed && !w.cancelled { + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + if err := w.waitForSegmentsToShowUp(); err != nil { + return err + } + } + w.closed = true + + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.Delete(context.Background(), w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + + w.committed = true + return w.waitForSegmentsToShowUp() +} + +func (w *writer) waitForSegmentsToShowUp() error { + var err error + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + var info swift.Object + if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { + if info.Bytes == w.size { + break + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return err +} + +type segmentWriter struct { + conn *swift.Connection + container string + segmentsPath string + segmentNumber int + maxChunkSize int +} + +func (sw *segmentWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += sw.maxChunkSize { + chunkSize := sw.maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + _, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) + if err != nil { + return n, err + } + + sw.segmentNumber++ + n += chunkSize + } + + return n, nil +} diff --git a/registry/storage/driver/swift/swift_test.go b/registry/storage/driver/swift/swift_test.go new file mode 100644 index 00000000..dcd5e4ff --- /dev/null +++ b/registry/storage/driver/swift/swift_test.go @@ -0,0 +1,245 @@ +package swift + +import ( + "io/ioutil" + "os" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/ncw/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var swiftDriverConstructor func(prefix string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + tenantDomain string + tenantDomainID string + trustID string + container string + region string + AuthVersion int + endpointType string + insecureSkipVerify bool + secretKey string + accessKey string + containerKey bool + tempURLMethods []string + + swiftServer *swifttest.SwiftServer + err error + ) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + tenantDomain = os.Getenv("SWIFT_DOMAIN_NAME") + tenantDomainID = os.Getenv("SWIFT_DOMAIN_ID") + trustID = os.Getenv("SWIFT_TRUST_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) + endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + secretKey = os.Getenv("SWIFT_SECRET_KEY") + accessKey = os.Getenv("SWIFT_ACCESS_KEY") + containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) + tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + prefix, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(prefix) + + swiftDriverConstructor = func(root string) (*Driver, error) { + parameters := Parameters{ + username, + password, + authURL, + tenant, + tenantID, + domain, + domainID, + tenantDomain, + tenantDomainID, + trustID, + region, + AuthVersion, + container, + root, + endpointType, + insecureSkipVerify, + defaultChunkSize, + secretKey, + accessKey, + containerKey, + tempURLMethods, + } + + return New(parameters) + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(prefix) + } + + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) +} + +func TestEmptyRootList(t *testing.T) { + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + // Create an object with a path nested under the existing object + err = rootedDriver.PutContent(ctx, filename+"/file1", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to delete: %v", err) + } + + keys, err = rootedDriver.List(ctx, "/") + if err != nil { + t.Fatalf("failed to list objects after deletion: %v", err) + } + + if len(keys) != 0 { + t.Fatal("delete did not remove nested objects") + } +} + +func TestFilenameChunking(t *testing.T) { + // Test valid input and sizes + input := []string{"a", "b", "c", "d", "e"} + expecteds := [][][]string{ + { + {"a"}, + {"b"}, + {"c"}, + {"d"}, + {"e"}, + }, + { + {"a", "b"}, + {"c", "d"}, + {"e"}, + }, + { + {"a", "b", "c"}, + {"d", "e"}, + }, + { + {"a", "b", "c", "d"}, + {"e"}, + }, + { + {"a", "b", "c", "d", "e"}, + }, + { + {"a", "b", "c", "d", "e"}, + }, + } + for i, expected := range expecteds { + actual, err := chunkFilenames(input, i+1) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("chunk %v didn't match expected value %v", actual, expected) + } + if err != nil { + t.Fatalf("unexpected error chunking filenames: %v", err) + } + } + + // Test nil input + actual, err := chunkFilenames(nil, 5) + if len(actual) != 0 { + t.Fatal("chunks were returned when passed nil") + } + if err != nil { + t.Fatalf("unexpected error chunking filenames: %v", err) + } + + // Test 0 and < 0 sizes + actual, err = chunkFilenames(nil, 0) + if err == nil { + t.Fatal("expected error for size = 0") + } + actual, err = chunkFilenames(nil, -1) + if err == nil { + t.Fatal("expected error for size = -1") + } +} diff --git a/registry/storage/driver/testdriver/testdriver.go b/registry/storage/driver/testdriver/testdriver.go new file mode 100644 index 00000000..91254627 --- /dev/null +++ b/registry/storage/driver/testdriver/testdriver.go @@ -0,0 +1,72 @@ +package testdriver + +import ( + "context" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +const driverName = "testdriver" + +func init() { + factory.Register(driverName, &testDriverFactory{}) +} + +// testDriverFactory implements the factory.StorageDriverFactory interface. +type testDriverFactory struct{} + +func (factory *testDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +// TestDriver is a StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +type TestDriver struct { + storagedriver.StorageDriver +} + +type testFileWriter struct { + storagedriver.FileWriter + prevchunk []byte +} + +var _ storagedriver.StorageDriver = &TestDriver{} + +// New constructs a new StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +func New() *TestDriver { + return &TestDriver{StorageDriver: inmemory.New()} +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (td *TestDriver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + fw, err := td.StorageDriver.Writer(ctx, path, append) + return &testFileWriter{FileWriter: fw}, err +} + +func (tfw *testFileWriter) Write(p []byte) (int, error) { + _, err := tfw.FileWriter.Write(tfw.prevchunk) + tfw.prevchunk = make([]byte, len(p)) + copy(tfw.prevchunk, p) + return len(p), err +} + +func (tfw *testFileWriter) Close() error { + tfw.Write(nil) + return tfw.FileWriter.Close() +} + +func (tfw *testFileWriter) Cancel() error { + tfw.Write(nil) + return tfw.FileWriter.Cancel() +} + +func (tfw *testFileWriter) Commit() error { + tfw.Write(nil) + return tfw.FileWriter.Commit() +} diff --git a/registry/storage/driver/testsuites/testsuites.go b/registry/storage/driver/testsuites/testsuites.go new file mode 100644 index 00000000..4f2f1ab0 --- /dev/null +++ b/registry/storage/driver/testsuites/testsuites.go @@ -0,0 +1,1272 @@ +package testsuites + +import ( + "bytes" + "context" + "crypto/sha1" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "sort" + "strings" + "sync" + "testing" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "gopkg.in/check.v1" +) + +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +// RegisterSuite registers an in-process storage driver test suite with +// the go test runner. +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { + check.Suite(&DriverSuite{ + Constructor: driverConstructor, + SkipCheck: skipCheck, + ctx: context.Background(), + }) +} + +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. +type SkipCheck func() (reason string) + +// NeverSkip is a default SkipCheck which never skips the suite. +var NeverSkip SkipCheck = func() string { return "" } + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + SkipCheck + storagedriver.StorageDriver + ctx context.Context +} + +// SetUpSuite sets up the gocheck test suite. +func (suite *DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } + d, err := suite.Constructor() + c.Assert(err, check.IsNil) + suite.StorageDriver = d +} + +// TearDownSuite tears down the gocheck test suite. +func (suite *DriverSuite) TearDownSuite(c *check.C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, check.IsNil) + } +} + +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List(suite.ctx, "/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists(c *check.C) { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + c.Fatalf(`the root path "/" should always exist: %v`, err) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.deletePath(c, firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +func (suite *DriverSuite) deletePath(c *check.C, path string) { + for tries := 2; tries > 0; tries-- { + err := suite.StorageDriver.Delete(suite.ctx, path) + if _, ok := err.(storagedriver.PathNotFoundError); ok { + err = nil + } + c.Assert(err, check.IsNil) + paths, err := suite.StorageDriver.List(suite.ctx, path) + if len(paths) == 0 { + break + } + time.Sleep(time.Second * 2) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + // only delete if file was successfully written + if err == nil { + defer suite.deletePath(c, firstPart(filename)) + } + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + checksum := sha1.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, io.TeeReader(contents, checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, fileSize) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + +// TestReaderWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReaderWithOffset(c *check.C) { + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { + suite.testContinueStreamAppend(c, int64(10*1024*1024)) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { + suite.testContinueStreamAppend(c, int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + err = writer.Close() + c.Assert(err, check.IsNil) + + curSize := writer.Size() + c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + err = writer.Close() + c.Assert(err, check.IsNil) + + curSize = writer.Size() + c.Assert(curSize, check.Equals, 2*chunkSize) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(fullContents[curSize:])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[curSize:]))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { + filename := randomPath(32) + + _, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.Reader(suite.ctx, filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList(c *check.C) { + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.deletePath(c, rootDirectory) + + doesnotexist := path.Join(rootDirectory, "nonexistent") + _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) + c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ + Path: doesnotexist, + DriverName: suite.StorageDriver.Name(), + }) + + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) + c.Assert(err, check.IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(suite.ctx, "/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) + c.Assert(err, check.IsNil) + + sort.Strings(keys) + c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) +} + +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.deletePath(c, "/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestURLFor checks that the URLFor method functions properly, but only if it +// is implemented +func (suite *DriverSuite) TestURLFor(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err := http.Get(url) + c.Assert(err, check.IsNil) + defer response.Body.Close() + + read, err := ioutil.ReadAll(response.Body) + c.Assert(err, check.IsNil) + c.Assert(read, check.DeepEquals, contents) + + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err = http.Head(url) + c.Assert(response.StatusCode, check.Equals, 200) + c.Assert(response.ContentLength, check.Equals, int64(32)) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { + filename := randomPath(32) + err := suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, dirname) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteOnlyDeletesSubpaths checks that deleting path A does not +// delete path B when A is a prefix of B but B is not a subpath of A (so that +// deleting "/a" does not delete "/ab"). This matters for services like S3 that +// do not implement directories. +func (suite *DriverSuite) TestDeleteOnlyDeletesSubpaths(c *check.C) { + dirname := randomPath(32) + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename+"suffix"), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname, filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename+"suffix")) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, dirname)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname, filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename)) + c.Assert(err, check.IsNil) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.deletePath(c, firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like Writer does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.deletePath(c, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to Writer concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(c, size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +// TODO (brianbland): evaluate the relevancy of this test +// TestEventualConsistency checks that if stat says that a file is a certain size, then +// you can freely read from the file (this is the only guarantee that the driver needs to provide) +// func (suite *DriverSuite) TestEventualConsistency(c *check.C) { +// if testing.Short() { +// c.Skip("Skipping test in short mode") +// } +// +// filename := randomPath(32) +// defer suite.deletePath(c, firstPart(filename)) +// +// var offset int64 +// var misswrites int +// var chunkSize int64 = 32 +// +// for i := 0; i < 1024; i++ { +// contents := randomContents(chunkSize) +// read, err := suite.StorageDriver.Writer(suite.ctx, filename, offset, bytes.NewReader(contents)) +// c.Assert(err, check.IsNil) +// +// fi, err := suite.StorageDriver.Stat(suite.ctx, filename) +// c.Assert(err, check.IsNil) +// +// // We are most concerned with being able to read data as soon as Stat declares +// // it is uploaded. This is the strongest guarantee that some drivers (that guarantee +// // at best eventual consistency) absolutely need to provide. +// if fi.Size() == offset+chunkSize { +// reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) +// c.Assert(err, check.IsNil) +// +// readContents, err := ioutil.ReadAll(reader) +// c.Assert(err, check.IsNil) +// +// c.Assert(readContents, check.DeepEquals, contents) +// +// reader.Close() +// offset += read +// } else { +// misswrites++ +// } +// } +// +// if misswrites > 0 { +// c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") +// } +// +// c.Assert(misswrites, check.Not(check.Equals), 1024) +// } + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks Writer/Reader for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks Writer/Reader for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks Writer/Reader for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks Writer/Reader for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + rc, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(suite.ctx, parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.deletePath(c, firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, tf) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { + defer suite.deletePath(c, firstPart(filename)) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} + +// randomBytes pre-allocates all of the memory sizes needed for the test. If +// anything panics while accessing randomBytes, just make this number bigger. +var randomBytes = make([]byte, 128<<20) + +func init() { + _, _ = rand.Read(randomBytes) // always returns len(randomBytes) and nil error +} + +func randomContents(length int64) []byte { + return randomBytes[:length] +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + + toread := int64(len(p)) + if toread > rr.r { + toread = rr.r + } + n = copy(p, randomContents(toread)) + rr.r -= int64(n) + + if rr.r <= 0 { + err = io.EOF + } + + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/registry/storage/driver/walk.go b/registry/storage/driver/walk.go new file mode 100644 index 00000000..2a5bc51f --- /dev/null +++ b/registry/storage/driver/walk.go @@ -0,0 +1,52 @@ +package driver + +import ( + "context" + "errors" + "sort" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +type WalkFn func(fileInfo FileInfo) error + +// WalkFallback traverses a filesystem defined within driver, starting +// from the given path, calling f on each file. It uses the List method and Stat to drive itself. +// If the returned error from the WalkFn is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. If fileInfo refers to a normal file, processing stops +func WalkFallback(ctx context.Context, driver StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Stable(sort.StringSlice(children)) + for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + if err == nil && fileInfo.IsDir() { + if err := WalkFallback(ctx, driver, child, f); err != nil { + return err + } + } else if err == ErrSkipDir { + // Stop iteration if it's a file, otherwise noop if it's a directory + if !fileInfo.IsDir() { + return nil + } + } else if err != nil { + return err + } + } + return nil +} diff --git a/registry/storage/error.go b/registry/storage/error.go new file mode 100644 index 00000000..10913658 --- /dev/null +++ b/registry/storage/error.go @@ -0,0 +1,9 @@ +package storage + +import "fmt" + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/registry/storage/filereader.go b/registry/storage/filereader.go new file mode 100644 index 00000000..2050e43d --- /dev/null +++ b/registry/storage/filereader.go @@ -0,0 +1,177 @@ +package storage + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *fileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/registry/storage/filereader_test.go b/registry/storage/filereader_test.go new file mode 100644 index 00000000..e522d605 --- /dev/null +++ b/registry/storage/filereader_test.go @@ -0,0 +1,194 @@ +package storage + +import ( + "bytes" + "io" + mrand "math/rand" + "os" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/opencontainers/go-digest" +) + +func TestSimpleRead(t *testing.T) { + ctx := context.Background() + content := make([]byte, 1<<20) + n, err := mrand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read didn't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + if err != nil { + t.Fatalf("error allocating file reader: %v", err) + } + + verifier := dgst.Verifier() + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify read data") + } +} + +func TestFileReaderSeek(t *testing.T) { + driver := inmemory.New() + pattern := "01234567890ab" // prime length block + repititions := 1024 + path := "/patterned" + content := bytes.Repeat([]byte(pattern), repititions) + ctx := context.Background() + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + + if err != nil { + t.Fatalf("unexpected error creating file reader: %v", err) + } + + // Seek all over the place, in blocks of pattern size and make sure we get + // the right data. + for _, repitition := range mrand.Perm(repititions - 1) { + targetOffset := int64(len(pattern) * repitition) + // Seek to a multiple of pattern size and read pattern size bytes + offset, err := fr.Seek(targetOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if offset != targetOffset { + t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) + } + + p := make([]byte, len(pattern)) + + n, err := fr.Read(p) + if err != nil { + t.Fatalf("error reading pattern: %v", err) + } + + if n != len(pattern) { + t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) + } + + if string(p) != pattern { + t.Fatalf("incorrect read content: %q != %q", p, pattern) + } + + // Check offset + current, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if current != targetOffset+int64(len(pattern)) { + t.Fatalf("unexpected offset after read: %v", err) + } + } + + start, err := fr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking to start: %v", err) + } + + if start != 0 { + t.Fatalf("expected to seek to start: %v != 0", start) + } + + end, err := fr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("expected to seek to end: %v != %v", end, len(content)) + } + + // 4. Seek before start, ensure error. + + // seek before start + before, err := fr.Seek(-1, os.SEEK_SET) + if err == nil { + t.Fatalf("error expected, returned offset=%v", before) + } + + // 5. Seek after end, + after, err := fr.Seek(1, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error expected, returned offset=%v", after) + } + + p := make([]byte, 16) + n, err := fr.Read(p) + + if n != 0 { + t.Fatalf("bytes reads %d != %d", n, 0) + } + + if err != io.EOF { + t.Fatalf("expected io.EOF, got %v", err) + } +} + +// TestFileReaderNonExistentFile ensures the reader behaves as expected with a +// missing or zero-length remote file. While the file may not exist, the +// reader should not error out on creation and should return 0-bytes from the +// read method, with an io.EOF error. +func TestFileReaderNonExistentFile(t *testing.T) { + driver := inmemory.New() + fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) + if err != nil { + t.Fatalf("unexpected error initializing reader: %v", err) + } + + var buf [1024]byte + + n, err := fr.Read(buf[:]) + if n != 0 { + t.Fatalf("non-zero byte read reported: %d != 0", n) + } + + if err != io.EOF { + t.Fatalf("read on missing file should return io.EOF, got %v", err) + } +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestFileReaderErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is an incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} diff --git a/registry/storage/garbagecollect.go b/registry/storage/garbagecollect.go new file mode 100644 index 00000000..48d428a8 --- /dev/null +++ b/registry/storage/garbagecollect.go @@ -0,0 +1,154 @@ +package storage + +import ( + "context" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +func emit(format string, a ...interface{}) { + fmt.Printf(format+"\n", a...) +} + +// GCOpts contains options for garbage collector +type GCOpts struct { + DryRun bool + RemoveUntagged bool +} + +// ManifestDel contains manifest structure which will be deleted +type ManifestDel struct { + Name string + Digest digest.Digest + Tags []string +} + +// MarkAndSweep performs a mark and sweep of registry data +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, opts GCOpts) error { + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") + } + + // mark + markSet := make(map[digest.Digest]struct{}) + manifestArr := make([]ManifestDel, 0) + err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + emit(repoName) + + var err error + named, err := reference.WithName(repoName) + if err != nil { + return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) + } + repository, err := registry.Repository(ctx, named) + if err != nil { + return fmt.Errorf("failed to construct repository: %v", err) + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return fmt.Errorf("failed to construct manifest service: %v", err) + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + if opts.RemoveUntagged { + // fetch all tags where this manifest is the latest one + tags, err := repository.Tags(ctx).Lookup(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + return fmt.Errorf("failed to retrieve tags for digest %v: %v", dgst, err) + } + if len(tags) == 0 { + emit("manifest eligible for deletion: %s", dgst) + // fetch all tags from repository + // all of these tags could contain manifest in history + // which means that we need check (and delete) those references when deleting manifest + allTags, err := repository.Tags(ctx).All(ctx) + if err != nil { + return fmt.Errorf("failed to retrieve tags %v", err) + } + manifestArr = append(manifestArr, ManifestDel{Name: repoName, Digest: dgst, Tags: allTags}) + return nil + } + } + // Mark the manifest's blob + emit("%s: marking manifest %s ", repoName, dgst) + markSet[dgst] = struct{}{} + + manifest, err := manifestService.Get(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) + } + + descriptors := manifest.References() + for _, descriptor := range descriptors { + markSet[descriptor.Digest] = struct{}{} + emit("%s: marking blob %s", repoName, descriptor.Digest) + } + + return nil + }) + + if err != nil { + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil + } + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to mark: %v", err) + } + + // sweep + vacuum := NewVacuum(ctx, storageDriver) + if !opts.DryRun { + for _, obj := range manifestArr { + err = vacuum.RemoveManifest(obj.Name, obj.Digest, obj.Tags) + if err != nil { + return fmt.Errorf("failed to delete manifest %s: %v", obj.Digest, err) + } + } + } + blobService := registry.Blobs() + deleteSet := make(map[digest.Digest]struct{}) + err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { + // check if digest is in markSet. If not, delete it! + if _, ok := markSet[dgst]; !ok { + deleteSet[dgst] = struct{}{} + } + return nil + }) + if err != nil { + return fmt.Errorf("error enumerating blobs: %v", err) + } + emit("\n%d blobs marked, %d blobs and %d manifests eligible for deletion", len(markSet), len(deleteSet), len(manifestArr)) + for dgst := range deleteSet { + emit("blob eligible for deletion: %s", dgst) + if opts.DryRun { + continue + } + err = vacuum.RemoveBlob(string(dgst)) + if err != nil { + return fmt.Errorf("failed to delete blob %s: %v", dgst, err) + } + } + + return err +} diff --git a/registry/storage/garbagecollect_test.go b/registry/storage/garbagecollect_test.go new file mode 100644 index 00000000..1694f891 --- /dev/null +++ b/registry/storage/garbagecollect_test.go @@ -0,0 +1,502 @@ +package storage + +import ( + "io" + "path" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type image struct { + manifest distribution.Manifest + manifestDigest digest.Digest + layers map[digest.Digest]io.ReadSeeker +} + +func createRegistry(t *testing.T, driver driver.StorageDriver, options ...RegistryOption) distribution.Namespace { + ctx := context.Background() + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + options = append([]RegistryOption{EnableDelete, Schema1SigningKey(k)}, options...) + registry, err := NewRegistry(ctx, driver, options...) + if err != nil { + t.Fatalf("Failed to construct namespace") + } + return registry +} + +func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository { + ctx := context.Background() + + // Initialize a dummy repository + named, err := reference.WithName(name) + if err != nil { + t.Fatalf("Failed to parse name %s: %v", name, err) + } + + repo, err := registry.Repository(ctx, named) + if err != nil { + t.Fatalf("Failed to construct repository: %v", err) + } + return repo +} + +func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { + ctx := context.Background() + + manifestService, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf("Failed to construct manifest store: %v", err) + } + return manifestService +} + +func allManifests(t *testing.T, manifestService distribution.ManifestService) map[digest.Digest]struct{} { + ctx := context.Background() + allManMap := make(map[digest.Digest]struct{}) + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + t.Fatalf("unable to convert ManifestService into ManifestEnumerator") + } + err := manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + allManMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all manifests: %v", err) + } + return allManMap +} + +func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { + ctx := context.Background() + blobService := registry.Blobs() + allBlobsMap := make(map[digest.Digest]struct{}) + err := blobService.Enumerate(ctx, func(dgst digest.Digest) error { + allBlobsMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all blobs: %v", err) + } + return allBlobsMap +} + +func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest { + // upload layers + err := testutil.UploadBlobs(repository, im.layers) + if err != nil { + t.Fatalf("layer upload failed: %v", err) + } + + // upload manifest + ctx := context.Background() + manifestService := makeManifestService(t, repository) + manifestDigest, err := manifestService.Put(ctx, im.manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + return manifestDigest +} + +func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema1Manifest(digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema2Manifest(repository, digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func TestNoDeletionNoEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "palailogos") + manifestService, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + uploadRandomSchema2Image(t, repo) + + // construct manifestlist for fun. + blobstatter := registry.BlobStatter() + manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ + image1.manifestDigest, image2.manifestDigest}) + if err != nil { + t.Fatalf("Failed to make manifest list: %v", err) + } + + _, err = manifestService.Put(ctx, manifestList) + if err != nil { + t.Fatalf("Failed to add manifest list: %v", err) + } + + before := allBlobs(t, registry) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + after := allBlobs(t, registry) + if len(before) != len(after) { + t.Fatalf("Garbage collection affected storage: %d != %d", len(before), len(after)) + } +} + +func TestDeleteManifestIfTagNotFound(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "deletemanifests") + manifestService, err := repo.Manifests(ctx) + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifest2, err := testutil.MakeSchema1Manifest(getKeys(randomLayers2)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + _, err = manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator) + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst}) + return nil + }) + + before1 := allBlobs(t, registry) + before2 := allManifests(t, manifestService) + + // run GC with dry-run (should not remove anything) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: true, + RemoveUntagged: true, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + afterDry1 := allBlobs(t, registry) + afterDry2 := allManifests(t, manifestService) + if len(before1) != len(afterDry1) { + t.Fatalf("Garbage collection affected blobs storage: %d != %d", len(before1), len(afterDry1)) + } + if len(before2) != len(afterDry2) { + t.Fatalf("Garbage collection affected manifest storage: %d != %d", len(before2), len(afterDry2)) + } + + // Run GC (removes everything because no manifests with tags exist) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: true, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + after1 := allBlobs(t, registry) + after2 := allManifests(t, manifestService) + if len(before1) == len(after1) { + t.Fatalf("Garbage collection affected blobs storage: %d == %d", len(before1), len(after1)) + } + if len(before2) == len(after2) { + t.Fatalf("Garbage collection affected manifest storage: %d == %d", len(before2), len(after2)) + } +} + +func TestGCWithMissingManifests(t *testing.T) { + ctx := context.Background() + d := inmemory.New() + + registry := createRegistry(t, d) + repo := makeRepository(t, registry, "testrepo") + uploadRandomSchema1Image(t, repo) + + // Simulate a missing _manifests directory + revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"}) + if err != nil { + t.Fatal(err) + } + + _manifestsPath := path.Dir(revPath) + err = d.Delete(ctx, _manifestsPath) + if err != nil { + t.Fatal(err) + } + + err = MarkAndSweep(context.Background(), d, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + if len(blobs) > 0 { + t.Errorf("unexpected blobs after gc") + } +} + +func TestDeletionHasEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "komnenos") + manifests, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + manifests.Delete(ctx, image2.manifestDigest) + manifests.Delete(ctx, image3.manifestDigest) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that the image1 manifest and all the layers are still in blobs + if _, ok := blobs[image1.manifestDigest]; !ok { + t.Fatalf("First manifest is missing") + } + + for layer := range image1.layers { + if _, ok := blobs[layer]; !ok { + t.Fatalf("manifest 1 layer is missing: %v", layer) + } + } + + // check that image2 and image3 layers are not still around + for layer := range image2.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 2 layer is present: %v", layer) + } + } + + for layer := range image3.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 3 layer is present: %v", layer) + } + } +} + +func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) { + for d = range digests { + break + } + return +} + +func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return +} + +func TestDeletionWithSharedLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "tzimiskes") + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + sharedKey := getAnyKey(randomLayers1) + manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifestService := makeManifestService(t, repo) + + // Upload manifests + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestDigest2, err := manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + // delete + err = manifestService.Delete(ctx, manifestDigest2) + if err != nil { + t.Fatalf("manifest deletion failed: %v", err) + } + + // check that all of the layers in layer 1 are still there + blobs := allBlobs(t, registry) + for dgst := range randomLayers1 { + if _, ok := blobs[dgst]; !ok { + t.Fatalf("random layer 1 blob missing: %v", dgst) + } + } +} + +func TestOrphanBlobDeleted(t *testing.T) { + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "michael_z_doukas") + + digests, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatalf("Failed to create random digest: %v", err) + } + + if err = testutil.UploadBlobs(repo, digests); err != nil { + t.Fatalf("Failed to upload blob: %v", err) + } + + // formality to create the necessary directories + uploadRandomSchema2Image(t, repo) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that orphan blob layers are not still around + for dgst := range digests { + if _, ok := blobs[dgst]; ok { + t.Fatalf("Orphan layer is present: %v", dgst) + } + } +} diff --git a/registry/storage/io.go b/registry/storage/io.go new file mode 100644 index 00000000..f79e7a6f --- /dev/null +++ b/registry/storage/io.go @@ -0,0 +1,71 @@ +package storage + +import ( + "context" + "errors" + "io" + "io/ioutil" + + "github.com/docker/distribution/registry/storage/driver" +) + +const ( + maxBlobGetSize = 4 << 20 +) + +func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) { + r, err := driver.Reader(ctx, p, 0) + if err != nil { + return nil, err + } + + return readAllLimited(r, maxBlobGetSize) +} + +func readAllLimited(r io.Reader, limit int64) ([]byte, error) { + r = limitReader(r, limit) + return ioutil.ReadAll(r) +} + +// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader, +// this returns an error when the limit reached. +func limitReader(r io.Reader, n int64) io.Reader { + return &limitedReader{r: r, n: n} +} + +// limitedReader implements a reader that errors when the limit is reached. +// +// Partially cribbed from net/http.MaxBytesReader. +type limitedReader struct { + r io.Reader // underlying reader + n int64 // max bytes remaining + err error // sticky error +} + +func (l *limitedReader) Read(p []byte) (n int, err error) { + if l.err != nil { + return 0, l.err + } + if len(p) == 0 { + return 0, nil + } + // If they asked for a 32KB byte read but only 5 bytes are + // remaining, no need to read 32KB. 6 bytes will answer the + // question of the whether we hit the limit or go past it. + if int64(len(p)) > l.n+1 { + p = p[:l.n+1] + } + n, err = l.r.Read(p) + + if int64(n) <= l.n { + l.n -= int64(n) + l.err = err + return n, err + } + + n = int(l.n) + l.n = 0 + + l.err = errors.New("storage: read exceeds limit") + return n, l.err +} diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go new file mode 100644 index 00000000..3fb1da26 --- /dev/null +++ b/registry/storage/linkedblobstore.go @@ -0,0 +1,465 @@ +package storage + +import ( + "context" + "fmt" + "net/http" + "path" + "time" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" + "github.com/opencontainers/go-digest" +) + +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(name string, dgst digest.Digest) (string, error) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + registry *registry + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc + + // linkDirectoryPathSpec locates the root directories in which one might find links + linkDirectoryPathSpec pathSpec +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + dcontext.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + dcontext.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + dcontext.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt, true) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { + rootPath, err := pathFor(lbs.linkDirectoryPathSpec) + if err != nil { + return err + } + return lbs.driver.Walk(ctx, rootPath, func(fileInfo driver.FileInfo) error { + // exit early if directory... + if fileInfo.IsDir() { + return nil + } + filePath := fileInfo.Path() + + // check if it's a link + _, fileName := path.Split(filePath) + if fileName != "link" { + return nil + } + + // read the digest found in link + digest, err := lbs.blobStore.readlink(ctx, filePath) + if err != nil { + return err + } + + // ensure this conforms to the linkPathFns + _, err = lbs.Stat(ctx, digest) + if err != nil { + // we expect this error to occur so we move on + if err == distribution.ErrBlobUnknown { + return nil + } + return err + } + + err = ingestor(digest) + if err != nil { + return err + } + + return nil + }) +} + +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) { + var stat distribution.Descriptor + if sourceStat == nil { + // look up the blob info from the sourceRepo if not already provided + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err = repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + } else { + // use the provided blob info + stat = *sourceStat + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { + fw, err := lbs.driver.Writer(ctx, path, append) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.Digester(), + fileWriter: fw, + driver: lbs.driver, + path: path, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + // only use the first link + linkPathFn := lbs.linkPathFns[0] + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var ( + found bool + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + found = true + break // success! + } + + switch err := err.(type) { + case driver.PathNotFoundError: + // do nothing, just move to the next linkPathFn + default: + return distribution.Descriptor{}, err + } + } + + if !found { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. + dcontext.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } + } + + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +} diff --git a/registry/storage/linkedblobstore_test.go b/registry/storage/linkedblobstore_test.go new file mode 100644 index 00000000..85376f71 --- /dev/null +++ b/registry/storage/linkedblobstore_test.go @@ -0,0 +1,216 @@ +package storage + +import ( + "context" + "fmt" + "io" + "reflect" + "strconv" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/testutil" + "github.com/opencontainers/go-digest" +) + +func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { + fooRepoName, _ := reference.WithName("nm/foo") + fooEnv := newManifestStoreTestEnv(t, fooRepoName, "thetag") + ctx := context.Background() + stats, err := mockRegistry(t, fooEnv.registry) + if err != nil { + t.Fatal(err) + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + } + + // upload the layers to foo/bar + for dgst, rs := range testLayers { + wr, err := fooEnv.repository.Blobs(fooEnv.ctx).Create(fooEnv.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(fooEnv.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + // create another repository nm/bar + barRepoName, _ := reference.WithName("nm/bar") + barRepo, err := fooEnv.registry.Repository(ctx, barRepoName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + // cross-repo mount the test layers into a nm/bar + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + option := WithMountFrom(fooCanonical) + // ensure we can instrospect it + createOpts := distribution.CreateOptions{} + if err := option.Apply(&createOpts); err != nil { + t.Fatalf("failed to apply MountFrom option: %v", err) + } + if !createOpts.Mount.ShouldMount || createOpts.Mount.From.String() != fooCanonical.String() { + t.Fatalf("unexpected create options: %#+v", createOpts.Mount) + } + + _, err := barRepo.Blobs(ctx).Create(ctx, WithMountFrom(fooCanonical)) + if err == nil { + t.Fatalf("unexpected non-error while mounting from %q: %v", fooRepoName.String(), err) + } + if _, ok := err.(distribution.ErrBlobMounted); !ok { + t.Fatalf("expected ErrMountFrom error, not %T: %v", err, err) + } + } + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + count, exists := stats[fooCanonical.String()] + if !exists { + t.Errorf("expected entry %q not found among handled stat calls", fooCanonical.String()) + } else if count != 1 { + t.Errorf("expected exactly one stat call for entry %q, not %d", fooCanonical.String(), count) + } + } + + clearStats(stats) + + // create yet another repository nm/baz + bazRepoName, _ := reference.WithName("nm/baz") + bazRepo, err := fooEnv.registry.Repository(ctx, bazRepoName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + // cross-repo mount them into a nm/baz and provide a prepopulated blob descriptor + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + size, err := strconv.ParseInt("0x"+dgst.Hex()[:8], 0, 64) + if err != nil { + t.Fatal(err) + } + prepolutatedDescriptor := distribution.Descriptor{ + Digest: dgst, + Size: size, + MediaType: "application/octet-stream", + } + _, err = bazRepo.Blobs(ctx).Create(ctx, WithMountFrom(fooCanonical), &statCrossMountCreateOption{ + desc: prepolutatedDescriptor, + }) + blobMounted, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Errorf("expected ErrMountFrom error, not %T: %v", err, err) + continue + } + if !reflect.DeepEqual(blobMounted.Descriptor, prepolutatedDescriptor) { + t.Errorf("unexpected descriptor: %#+v != %#+v", blobMounted.Descriptor, prepolutatedDescriptor) + } + } + // this time no stat calls will be made + if len(stats) != 0 { + t.Errorf("unexpected number of stats made: %d != %d", len(stats), len(testLayers)) + } +} + +func clearStats(stats map[string]int) { + for k := range stats { + delete(stats, k) + } +} + +// mockRegistry sets a mock blob descriptor service factory that overrides +// statter's Stat method to note each attempt to stat a blob in any repository. +// Returned stats map contains canonical references to blobs with a number of +// attempts. +func mockRegistry(t *testing.T, nm distribution.Namespace) (map[string]int, error) { + registry, ok := nm.(*registry) + if !ok { + return nil, fmt.Errorf("not an expected type of registry: %T", nm) + } + stats := make(map[string]int) + + registry.blobDescriptorServiceFactory = &mockBlobDescriptorServiceFactory{ + t: t, + stats: stats, + } + + return stats, nil +} + +type mockBlobDescriptorServiceFactory struct { + t *testing.T + stats map[string]int +} + +func (f *mockBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &mockBlobDescriptorService{ + BlobDescriptorService: svc, + t: f.t, + stats: f.stats, + } +} + +type mockBlobDescriptorService struct { + distribution.BlobDescriptorService + t *testing.T + stats map[string]int +} + +var _ distribution.BlobDescriptorService = &mockBlobDescriptorService{} + +func (bs *mockBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + statter, ok := bs.BlobDescriptorService.(*linkedBlobStatter) + if !ok { + return distribution.Descriptor{}, fmt.Errorf("unexpected blob descriptor service: %T", bs.BlobDescriptorService) + } + + name := statter.repository.Named() + canonical, err := reference.WithDigest(name, dgst) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("failed to make canonical reference: %v", err) + } + + bs.stats[canonical.String()]++ + bs.t.Logf("calling Stat on %s", canonical.String()) + + return bs.BlobDescriptorService.Stat(ctx, dgst) +} + +// statCrossMountCreateOptions ensures the expected options type is passed, and optionally pre-fills the cross-mount stat info +type statCrossMountCreateOption struct { + desc distribution.Descriptor +} + +var _ distribution.BlobCreateOption = statCrossMountCreateOption{} + +func (f statCrossMountCreateOption) Apply(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("Unexpected create options: %#v", v) + } + + if !opts.Mount.ShouldMount { + return nil + } + + opts.Mount.Stat = &f.desc + + return nil +} diff --git a/registry/storage/manifestlisthandler.go b/registry/storage/manifestlisthandler.go new file mode 100644 index 00000000..085ccccc --- /dev/null +++ b/registry/storage/manifestlisthandler.go @@ -0,0 +1,93 @@ +package storage + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/opencontainers/go-digest" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go new file mode 100644 index 00000000..20e34eb0 --- /dev/null +++ b/registry/storage/manifeststore.go @@ -0,0 +1,142 @@ +package storage + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/opencontainers/go-digest" +) + +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + +type manifestStore struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler +} + +var _ distribution.ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Named().Name(), + Revision: dgst, + } + } + + return nil, err + } + + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err + } + + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} + } + } + + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) +} + +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) + } + + return "", fmt.Errorf("unrecognized manifest type %T", manifest) +} + +// Delete removes the revision of the specified manifest. +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.blobStore.Delete(ctx, dgst) +} + +func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { + err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { + err := ingester(dgst) + if err != nil { + return err + } + return nil + }) + return err +} diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go new file mode 100644 index 00000000..2fad7611 --- /dev/null +++ b/registry/storage/manifeststore_test.go @@ -0,0 +1,391 @@ +package storage + +import ( + "bytes" + "context" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type manifestStoreTestEnv struct { + ctx context.Context + driver driver.StorageDriver + registry distribution.Namespace + repository distribution.Repository + name reference.Named + tag string +} + +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, options ...RegistryOption) *manifestStoreTestEnv { + ctx := context.Background() + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, options...) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repo, err := registry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + return &manifestStoreTestEnv{ + ctx: ctx, + driver: driver, + registry: registry, + repository: repo, + name: name, + tag: tag, + } +} + +func TestManifestStorage(t *testing.T) { + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) +} + +func testManifestStorage(t *testing.T, options ...RegistryOption) { + repoName, _ := reference.WithName("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag", options...) + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: env.name.Name(), + Tag: env.tag, + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ + BlobSum: dgst, + }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, merr := schema1.Sign(&m, pk) + if merr != nil { + t.Fatalf("error signing manifest: %v", err) + } + + _, err = ms.Put(ctx, sm) + if err == nil { + t.Fatalf("expected errors putting manifest with full verification") + } + + switch err := err.(type) { + case distribution.ErrManifestVerification: + if len(err) != 2 { + t.Fatalf("expected 2 verification errors: %#v", err) + } + + for _, err := range err { + if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { + t.Fatalf("unexpected error type: %v", err) + } + } + default: + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + // Now, upload the layers that were missing! + for dgst, rs := range testLayers { + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, sm); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + exists, err := ms.Exists(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %#v", err) + } + + if !exists { + t.Fatalf("manifest should exist") + } + + fromStore, err := ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + fetchedManifest, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(fetchedManifest.Canonical, sm.Canonical) { + t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) + } + + _, pl, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + payload, err := fetchedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting payload: %v", err) + } + + // Now that we have a payload, take a moment to check that the manifest is + // return by the payload digest. + + dgst := digest.FromBytes(payload) + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("error checking manifest existence by digest: %v", err) + } + + if !exists { + t.Fatalf("manifest %s should exist", dgst) + } + + fetchedByDigest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest by digest: %v", err) + } + + byDigestManifest, ok := fetchedByDigest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(byDigestManifest.Canonical, fetchedManifest.Canonical) { + t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) + } + + sigs, err := fetchedJWS.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) + } + + // Now, push the same manifest with a different key + pk2, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm2, err := schema1.Sign(&m, pk2) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + _, pl, err = sm2.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + jws2, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("error parsing signature: %v", err) + } + + sigs2, err := jws2.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs2) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) + } + + if manifestDigest, err = ms.Put(ctx, sm2); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + fromStore, err = ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + fetched, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected type from signed manifeststore : %T", fetched) + } + + if _, err := schema1.Verify(fetched); err != nil { + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + _, pl, err = fetched.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + receivedPayload, err := receivedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting received payload: %v", err) + } + + if !bytes.Equal(receivedPayload, payload) { + t.Fatalf("payloads are not equal") + } + + // Test deleting manifests + err = ms.Delete(ctx, dgst) + if err != nil { + t.Fatalf("unexpected an error deleting manifest by digest: %v", err) + } + + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if exists { + t.Errorf("Deleted manifest should not exist") + } + + deletedManifest, err := ms.Get(ctx, dgst) + if err == nil { + t.Errorf("Unexpected success getting deleted manifest") + } + switch err.(type) { + case distribution.ErrManifestUnknownRevision: + break + default: + t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) + } + + if deletedManifest != nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + // Re-upload should restore manifest to a good state + _, err = ms.Put(ctx, sm) + if err != nil { + t.Errorf("Error re-uploading deleted manifest") + } + + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if !exists { + t.Errorf("Restored manifest should exist") + } + + deletedManifest, err = ms.Get(ctx, dgst) + if err != nil { + t.Errorf("Unexpected error getting manifest") + } + if deletedManifest == nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repo, err := r.Repository(ctx, env.name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ms, err = repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + err = ms.Delete(ctx, dgst) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestLinkPathFuncs ensures that the link path functions behavior are locked +// down and implemented as expected. +func TestLinkPathFuncs(t *testing.T) { + for _, testcase := range []struct { + repo string + digest digest.Digest + linkPathFn linkPathFunc + expected string + }{ + { + repo: "foo/bar", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + linkPathFn: blobLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", + }, + { + repo: "foo/bar", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + linkPathFn: manifestRevisionLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", + }, + } { + p, err := testcase.linkPathFn(testcase.repo, testcase.digest) + if err != nil { + t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) + } + + if p != testcase.expected { + t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) + } + } + +} diff --git a/registry/storage/paths.go b/registry/storage/paths.go new file mode 100644 index 00000000..b6d9b9b5 --- /dev/null +++ b/registry/storage/paths.go @@ -0,0 +1,490 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix + + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// hashstates// +// -> blob/ +// +// +// The storage backend layout is broken up into a content-addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controlled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload id. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Blobs: +// +// layerLinkPathSpec: /v2/repositories//_layers///link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// +// Blob Store: +// +// blobsPathSpec: /v2/blobs/ +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +func pathFor(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{storagePathRoot, storagePathVersion} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pathFor(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pathFor(manifestTagsPathSpec{ + name: v.name, + }) + + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pathFor(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagIndexEntryPathSpec: + root, err := pathFor(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(components...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. + + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobsPathSpec: + blobsPathPrefix := append(rootPrefix, "blobs") + return path.Join(blobsPathPrefix...), nil + case blobPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionsPathSpec describes the directory path for +// a manifest revision. +type manifestRevisionsPathSpec struct { + name string +} + +func (manifestRevisionsPathSpec) pathSpec() {} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// blobsPathSpec contains the path for the blobs directory +type blobsPathSpec struct{} + +func (blobsPathSpec) pathSpec() {} + +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest +} + +func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + id string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + name string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + return append(prefix, suffix...), nil +} + +// Reconstructs a digest from a path +func digestFromPath(digestPath string) (digest.Digest, error) { + + digestPath = strings.TrimSuffix(digestPath, "/data") + dir, hex := path.Split(digestPath) + dir = path.Dir(dir) + dir, next := path.Split(dir) + + // next is either the algorithm OR the first two characters in the hex string + var algo string + if next == hex[:2] { + algo = path.Base(dir) + } else { + algo = next + } + + dgst := digest.NewDigestFromHex(algo, hex) + return dgst, dgst.Validate() +} diff --git a/registry/storage/paths_test.go b/registry/storage/paths_test.go new file mode 100644 index 00000000..677a34b9 --- /dev/null +++ b/registry/storage/paths_test.go @@ -0,0 +1,135 @@ +package storage + +import ( + "testing" + + "github.com/opencontainers/go-digest" +) + +func TestPathMapper(t *testing.T) { + for _, testcase := range []struct { + spec pathSpec + expected string + err error + }{ + { + spec: manifestRevisionPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + { + spec: manifestRevisionLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", + }, + { + spec: manifestTagsPathSpec{ + name: "foo/bar", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", + }, + { + spec: manifestTagPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", + }, + { + spec: manifestTagCurrentPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", + }, + { + spec: manifestTagIndexPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", + }, + { + spec: manifestTagIndexEntryPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + { + spec: manifestTagIndexEntryLinkPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", + }, + + { + spec: uploadDataPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + }, + { + spec: uploadStartedAtPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + }, + } { + p, err := pathFor(testcase.spec) + if err != nil { + t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) + } + + if p != testcase.expected { + t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) + } + } + + // Add a few test cases to ensure we cover some errors + + // Specify a path that requires a revision and get a digest validation error. + badpath, err := pathFor(manifestRevisionPathSpec{ + name: "foo/bar", + }) + + if err == nil { + t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) + } + +} + +func TestDigestFromPath(t *testing.T) { + for _, testcase := range []struct { + path string + expected digest.Digest + multilevel bool + err error + }{ + { + path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data", + multilevel: true, + expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86", + err: nil, + }, + } { + result, err := digestFromPath(testcase.path) + if err != testcase.err { + t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err) + } + + if result != testcase.expected { + t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) + + } + } +} diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go new file mode 100644 index 00000000..a82107a7 --- /dev/null +++ b/registry/storage/purgeuploads.go @@ -0,0 +1,139 @@ +package storage + +import ( + "context" + "path" + "strings" + "time" + + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" + "github.com/sirupsen/logrus" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + logrus.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(ctx, driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + logrus.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(ctx, uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + logrus.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + + err = driver.Walk(ctx, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return storageDriver.ErrSkipDir + } + + } + + uuid, isContainingDir := uuidFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uuidFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uuidFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/registry/storage/purgeuploads_test.go b/registry/storage/purgeuploads_test.go new file mode 100644 index 00000000..23c553ba --- /dev/null +++ b/registry/storage/purgeuploads_test.go @@ -0,0 +1,166 @@ +package storage + +import ( + "context" + "path" + "strings" + "testing" + "time" + + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/uuid" +) + +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { + d := inmemory.New() + ctx := context.Background() + for i := 0; i < numUploads; i++ { + addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) + } + return d, ctx +} + +func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + + if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + t.Fatalf("Unable to write startedAt file") + } + +} + +func TestPurgeGather(t *testing.T) { + uploadCount := 5 + fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(ctx, fs) + if len(errs) != 0 { + t.Errorf("Unexepected errors: %q", errs) + } + if len(uploadData) != uploadCount { + t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) + } +} + +func TestPurgeNone(t *testing.T) { + fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) + oneHourAgo := time.Now().Add(-1 * time.Hour) + deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + if len(deleted) != 0 { + t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) + } +} + +func TestPurgeAll(t *testing.T) { + uploadCount := 10 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + + // Ensure > 1 repos are purged + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) + uploadCount++ + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + fileCount := uploadCount + if len(deleted) != fileCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), fileCount) + } +} + +func TestPurgeSome(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + + newUploadCount := 4 + + for i := 0; i < newUploadCount; i++ { + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + if len(deleted) != oldUploadCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), oldUploadCount) + } +} + +func TestPurgeOnlyUploads(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + + // Create a directory tree outside _uploads and ensure + // these files aren't deleted. + dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) + if err != nil { + t.Fatalf(err.Error()) + } + nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) + if strings.Index(nonUploadPath, "_upload") != -1 { + t.Fatalf("Non-upload path not created correctly") + } + + nonUploadFile := path.Join(nonUploadPath, "file") + if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + for _, file := range deleted { + if strings.Index(file, "_upload") == -1 { + t.Errorf("Non-upload file deleted") + } + } +} + +func TestPurgeMissingStartedAt(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) + + err := fs.Walk(ctx, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + + if file == "startedat" { + if err := fs.Delete(ctx, filePath); err != nil { + t.Fatalf("Unable to delete startedat file: %s", filePath) + } + } + return nil + }) + if err != nil { + t.Fatalf("Unexpected error during Walk: %s ", err.Error()) + } + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) > 0 { + t.Errorf("Unexpected errors") + } + if len(deleted) > 0 { + t.Errorf("Files unexpectedly deleted: %s", deleted) + } +} diff --git a/registry/storage/registry.go b/registry/storage/registry.go new file mode 100644 index 00000000..46b96853 --- /dev/null +++ b/registry/storage/registry.go @@ -0,0 +1,306 @@ +package storage + +import ( + "context" + "regexp" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libtrust" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool + schema1SigningKey libtrust.PrivateKey + blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory + manifestURLs manifestURLs +} + +// manifestURLs holds regular expressions for controlling manifest URL whitelisting +type manifestURLs struct { + allow *regexp.Regexp + deny *regexp.Regexp +} + +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error + +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// ManifestURLsAllowRegexp is a functional option for NewRegistry. +func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.allow = r + return nil + } +} + +// ManifestURLsDenyRegexp is a functional option for NewRegistry. +func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.deny = r + return nil + } +} + +// Schema1SigningKey returns a functional option for NewRegistry. It sets the +// key for signing all schema1 manifests. +func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { + return func(registry *registry) error { + registry.schema1SigningKey = key + return nil + } +} + +// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the +// factory to create BlobDescriptorServiceFactory middleware. +func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { + return func(registry *registry) error { + registry.blobDescriptorServiceFactory = factory + return nil + } +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, + } + + bs := &blobStore{ + driver: driver, + statter: statter, + } + + registry := ®istry{ + blobStore: bs, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + }, + statter: statter, + resumableDigestEnabled: true, + } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) + if err != nil { + return nil, err + } + } + + return &repository{ + ctx: ctx, + registry: reg, + name: canonicalName, + descriptorCache: descriptorCache, + }, nil +} + +func (reg *registry) Blobs() distribution.BlobEnumerator { + return reg.blobStore +} + +func (reg *registry) BlobStatter() distribution.BlobStatter { + return reg.statter +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name reference.Named + descriptorCache distribution.BlobDescriptorService +} + +// Name returns the name of the repository. +func (repo *repository) Named() reference.Named { + return repo.name +} + +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + + manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: statter, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + linkDirectoryPathSpec: manifestDirectoryPathSpec, + } + + ms := &manifestStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ + ctx: ctx, + schema1SigningKey: repo.schema1SigningKey, + repository: repo, + blobStore: blobStore, + }, + schema2Handler: &schema2ManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + manifestURLs: repo.registry.manifestURLs, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, + } + + // Apply options + for _, option := range options { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + + return ms, nil +} + +// Blobs returns an instance of the BlobStore. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, + } + + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + return &linkedBlobStore{ + registry: repo.registry, + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, + } +} diff --git a/registry/storage/schema2manifesthandler.go b/registry/storage/schema2manifesthandler.go new file mode 100644 index 00000000..b0ae01ae --- /dev/null +++ b/registry/storage/schema2manifesthandler.go @@ -0,0 +1,137 @@ +package storage + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/opencontainers/go-digest" +) + +var ( + errUnexpectedURL = errors.New("unexpected URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context + manifestURLs manifestURLs +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if skipDependencyVerification { + return nil + } + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + blobsService := ms.repository.Blobs(ctx) + + for _, descriptor := range mnfst.References() { + var err error + + switch descriptor.MediaType { + case schema2.MediaTypeForeignLayer: + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(descriptor.URLs) == 0 { + err = errMissingURL + } + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range descriptor.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + case schema2.MediaTypeManifest, schema1.MediaTypeManifest: + var exists bool + exists, err = manifestService.Exists(ctx, descriptor.Digest) + if err != nil || !exists { + err = distribution.ErrBlobUnknown // just coerce to unknown. + } + + fallthrough // double check the blob store. + default: + // forward all else to blob storage + if len(descriptor.URLs) == 0 { + _, err = blobsService.Stat(ctx, descriptor.Digest) + } + } + + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) + } + } + + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/registry/storage/schema2manifesthandler_test.go b/registry/storage/schema2manifesthandler_test.go new file mode 100644 index 00000000..6536f9d3 --- /dev/null +++ b/registry/storage/schema2manifesthandler_test.go @@ -0,0 +1,136 @@ +package storage + +import ( + "regexp" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestVerifyManifestForeignLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + registry := createRegistry(t, inmemoryDriver, + ManifestURLsAllowRegexp(regexp.MustCompile("^https?://foo")), + ManifestURLsDenyRegexp(regexp.MustCompile("^https?://foo/nope"))) + repo := makeRepository(t, registry, "test") + manifestService := makeManifestService(t, repo) + + config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeImageConfig, nil) + if err != nil { + t.Fatal(err) + } + + layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil) + if err != nil { + t.Fatal(err) + } + + foreignLayer := distribution.Descriptor{ + Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeForeignLayer, + } + + template := schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: config, + } + + type testcase struct { + BaseLayer distribution.Descriptor + URLs []string + Err error + } + + cases := []testcase{ + { + foreignLayer, + nil, + errMissingURL, + }, + { + // regular layers may have foreign urls + layer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"file:///local/file"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar#baz"}, + errInvalidURL, + }, + { + foreignLayer, + []string{""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"https://foo/bar", ""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"", "https://foo/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://nope/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/nope"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"https://foo/bar"}, + nil, + }, + } + + for _, c := range cases { + m := template + l := c.BaseLayer + l.URLs = c.URLs + m.Layers = []distribution.Descriptor{l} + dm, err := schema2.FromStruct(m) + if err != nil { + t.Error(err) + continue + } + + _, err = manifestService.Put(ctx, dm) + if verr, ok := err.(distribution.ErrManifestVerification); ok { + // Extract the first error + if len(verr) == 2 { + if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { + err = verr[0] + } + } + } + if err != c.Err { + t.Errorf("%#v: expected %v, got %v", l, c.Err, err) + } + } +} diff --git a/registry/storage/signedmanifesthandler.go b/registry/storage/signedmanifesthandler.go new file mode 100644 index 00000000..f94ecbea --- /dev/null +++ b/registry/storage/signedmanifesthandler.go @@ -0,0 +1,142 @@ +package storage + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository distribution.Repository + schema1SigningKey libtrust.PrivateKey + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + + var ( + signatures [][]byte + err error + ) + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + if ms.schema1SigningKey != nil { + if err := jsig.Sign(ms.schema1SigningKey); err != nil { + return nil, err + } + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + dcontext.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go new file mode 100644 index 00000000..ab5f54a6 --- /dev/null +++ b/registry/storage/tagstore.go @@ -0,0 +1,198 @@ +package storage + +import ( + "context" + "path" + + "github.com/docker/distribution" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +var _ distribution.TagService = &tagStore{} + +// tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects +type tagStore struct { + repository *repository + blobStore *blobStore +} + +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + }) + if err != nil { + return tags, err + } + + entries, err := ts.blobStore.driver.List(ctx, pathSpec) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} + default: + return tags, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { + tagPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return false, err + } + + exists, err := exists(ctx, ts.blobStore.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// Tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return err + } + + lbs := ts.linkedBlobStore(ctx, tag) + + // Link into the index + if err := lbs.linkBlob(ctx, desc); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(ctx, currentPath, desc.Digest) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + revision, err := ts.blobStore.readlink(ctx, currentPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + + return distribution.Descriptor{}, err + } + + return distribution.Descriptor{Digest: revision}, nil +} + +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { + tagPath, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + if err != nil { + return err + } + + if err := ts.blobStore.driver.Delete(ctx, tagPath); err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return nil // Untag is idempotent, we don't care if it didn't exist + default: + return err + } + } + + return nil +} + +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + + }}, + } +} + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + continue + } + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/registry/storage/tagstore_test.go b/registry/storage/tagstore_test.go new file mode 100644 index 00000000..314fa433 --- /dev/null +++ b/registry/storage/tagstore_test.go @@ -0,0 +1,209 @@ +package storage + +import ( + "context" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type tagsTestEnv struct { + ts distribution.TagService + ctx context.Context +} + +func testTagStore(t *testing.T) *tagsTestEnv { + ctx := context.Background() + d := inmemory.New() + reg, err := NewRegistry(ctx, d) + if err != nil { + t.Fatal(err) + } + + repoRef, _ := reference.WithName("a/b") + repo, err := reg.Repository(ctx, repoRef) + if err != nil { + t.Fatal(err) + } + + return &tagsTestEnv{ + ctx: ctx, + ts: repo.Tags(ctx), + } +} + +func TestTagStoreTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + + d := distribution.Descriptor{} + err := tags.Tag(ctx, "latest", d) + if err == nil { + t.Errorf("unexpected error putting malformed descriptor : %s", err) + } + + d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err := tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } + + // Overwrite existing + d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err = tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } +} + +func TestTagStoreUnTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} + + err := tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + err = tags.Tag(ctx, "latest", desc) + if err != nil { + t.Error(err) + } + + err = tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + errExpect := distribution.ErrTagUnknown{Tag: "latest"}.Error() + _, err = tags.Get(ctx, "latest") + if err == nil || err.Error() != errExpect { + t.Error("Expected error getting untagged tag") + } +} + +func TestTagStoreAll(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + alpha := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(alpha); i++ { + tag := alpha[i] + desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} + err := tagStore.Tag(ctx, string(tag), desc) + if err != nil { + t.Error(err) + } + } + + all, err := tagStore.All(ctx) + if err != nil { + t.Error(err) + } + if len(all) != len(alpha) { + t.Errorf("Unexpected count returned from enumerate") + } + + for i, c := range all { + if c != string(alpha[i]) { + t.Errorf("unexpected tag in enumerate %s", c) + } + } + + removed := "a" + err = tagStore.Untag(ctx, removed) + if err != nil { + t.Error(err) + } + + all, err = tagStore.All(ctx) + if err != nil { + t.Error(err) + } + for _, tag := range all { + if tag == removed { + t.Errorf("unexpected tag in enumerate %s", removed) + } + } + +} + +func TestTagLookup(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} + desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} + + tags, err := tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + if len(tags) != 0 { + t.Fatalf("Lookup returned > 0 tags from empty store") + } + + err = tagStore.Tag(ctx, "a", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "b", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "0", desc0) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "1", desc0) + if err != nil { + t.Fatal(err) + } + + tags, err = tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) + } + + tags, err = tagStore.Lookup(ctx, desc0) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) + } + +} diff --git a/registry/storage/util.go b/registry/storage/util.go new file mode 100644 index 00000000..8ab235e2 --- /dev/null +++ b/registry/storage/util.go @@ -0,0 +1,22 @@ +package storage + +import ( + "context" + + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/registry/storage/vacuum.go b/registry/storage/vacuum.go new file mode 100644 index 00000000..a43db17a --- /dev/null +++ b/registry/storage/vacuum.go @@ -0,0 +1,102 @@ +package storage + +import ( + "context" + "path" + + dcontext "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.Parse(dgst) + if err != nil { + return err + } + + blobPath, err := pathFor(blobPathSpec{digest: d}) + if err != nil { + return err + } + + dcontext.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveManifest removes a manifest from the filesystem +func (v Vacuum) RemoveManifest(name string, dgst digest.Digest, tags []string) error { + // remove a tag manifest reference, in case of not found continue to next one + for _, tag := range tags { + + tagsPath, err := pathFor(manifestTagIndexEntryPathSpec{name: name, revision: dgst, tag: tag}) + if err != nil { + return err + } + + _, err = v.driver.Stat(v.ctx, tagsPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue + default: + return err + } + } + dcontext.GetLogger(v.ctx).Infof("deleting manifest tag reference: %s", tagsPath) + err = v.driver.Delete(v.ctx, tagsPath) + if err != nil { + return err + } + } + + manifestPath, err := pathFor(manifestRevisionPathSpec{name: name, revision: dgst}) + if err != nil { + return err + } + dcontext.GetLogger(v.ctx).Infof("deleting manifest: %s", manifestPath) + return v.driver.Delete(v.ctx, manifestPath) +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/tags.go b/tags.go new file mode 100644 index 00000000..f22df2b8 --- /dev/null +++ b/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/testutil/handler.go b/testutil/handler.go new file mode 100644 index 00000000..00cd8a6a --- /dev/null +++ b/testutil/handler.go @@ -0,0 +1,148 @@ +package testutil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" +) + +// RequestResponseMap is an ordered mapping from Requests to Responses +type RequestResponseMap []RequestResponseMapping + +// RequestResponseMapping defines a Response to be sent in response to a given +// Request +type RequestResponseMapping struct { + Request Request + Response Response +} + +// Request is a simplified http.Request object +type Request struct { + // Method is the http method of the request, for example GET + Method string + + // Route is the http route of this request + Route string + + // QueryParams are the query parameters of this request + QueryParams map[string][]string + + // Body is the byte contents of the http request + Body []byte + + // Headers are the header for this request + Headers http.Header +} + +func (r Request) String() string { + queryString := "" + if len(r.QueryParams) > 0 { + keys := make([]string, 0, len(r.QueryParams)) + queryParts := make([]string, 0, len(r.QueryParams)) + for k := range r.QueryParams { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + for _, val := range r.QueryParams[k] { + queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) + } + } + queryString = "?" + strings.Join(queryParts, "&") + } + var headers []string + if len(r.Headers) > 0 { + var headerKeys []string + for k := range r.Headers { + headerKeys = append(headerKeys, k) + } + sort.Strings(headerKeys) + + for _, k := range headerKeys { + for _, val := range r.Headers[k] { + headers = append(headers, fmt.Sprintf("%s:%s", k, val)) + } + } + + } + return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) +} + +// Response is a simplified http.Response object +type Response struct { + // Statuscode is the http status code of the Response + StatusCode int + + // Headers are the http headers of this Response + Headers http.Header + + // Body is the response body + Body []byte +} + +// testHandler is an http.Handler with a defined mapping from Request to an +// ordered list of Response objects +type testHandler struct { + responseMap map[string][]Response +} + +// NewHandler returns a new test handler that responds to defined requests +// with specified responses +// Each time a Request is received, the next Response is returned in the +// mapping, until no Responses are defined, at which point a 404 is sent back +func NewHandler(requestResponseMap RequestResponseMap) http.Handler { + responseMap := make(map[string][]Response) + for _, mapping := range requestResponseMap { + responses, ok := responseMap[mapping.Request.String()] + if ok { + responseMap[mapping.Request.String()] = append(responses, mapping.Response) + } else { + responseMap[mapping.Request.String()] = []Response{mapping.Response} + } + } + return &testHandler{responseMap: responseMap} +} + +func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + requestBody, _ := ioutil.ReadAll(r.Body) + request := Request{ + Method: r.Method, + Route: r.URL.Path, + QueryParams: r.URL.Query(), + Body: requestBody, + Headers: make(map[string][]string), + } + + // Add headers of interest here + for k, v := range r.Header { + if k == "If-None-Match" { + request.Headers[k] = v + } + } + + responses, ok := app.responseMap[request.String()] + + if !ok || len(responses) == 0 { + http.NotFound(w, r) + return + } + + response := responses[0] + app.responseMap[request.String()] = responses[1:] + + responseHeader := w.Header() + for k, v := range response.Headers { + responseHeader[k] = v + } + + w.WriteHeader(response.StatusCode) + + io.Copy(w, bytes.NewReader(response.Body)) +} diff --git a/testutil/manifests.go b/testutil/manifests.go new file mode 100644 index 00000000..8afe82e4 --- /dev/null +++ b/testutil/manifests.go @@ -0,0 +1,87 @@ +package testutil + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +// MakeManifestList constructs a manifest list out of a list of manifest digests +func MakeManifestList(blobstatter distribution.BlobStatter, manifestDigests []digest.Digest) (*manifestlist.DeserializedManifestList, error) { + ctx := context.Background() + + var manifestDescriptors []manifestlist.ManifestDescriptor + for _, manifestDigest := range manifestDigests { + descriptor, err := blobstatter.Stat(ctx, manifestDigest) + if err != nil { + return nil, err + } + platformSpec := manifestlist.PlatformSpec{ + Architecture: "atari2600", + OS: "CP/M", + Variant: "ternary", + Features: []string{"VLIW", "superscalaroutoforderdevnull"}, + } + manifestDescriptor := manifestlist.ManifestDescriptor{ + Descriptor: descriptor, + Platform: platformSpec, + } + manifestDescriptors = append(manifestDescriptors, manifestDescriptor) + } + + return manifestlist.FromDescriptors(manifestDescriptors) +} + +// MakeSchema1Manifest constructs a schema 1 manifest from a given list of digests and returns +// the digest of the manifest +func MakeSchema1Manifest(digests []digest.Digest) (distribution.Manifest, error) { + manifest := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "who", + Tag: "cares", + } + + for _, digest := range digests { + manifest.FSLayers = append(manifest.FSLayers, schema1.FSLayer{BlobSum: digest}) + manifest.History = append(manifest.History, schema1.History{V1Compatibility: ""}) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("unexpected error generating private key: %v", err) + } + + signedManifest, err := schema1.Sign(&manifest, pk) + if err != nil { + return nil, fmt.Errorf("error signing manifest: %v", err) + } + + return signedManifest, nil +} + +// MakeSchema2Manifest constructs a schema 2 manifest from a given list of digests and returns +// the digest of the manifest +func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) { + ctx := context.Background() + blobStore := repository.Blobs(ctx) + builder := schema2.NewManifestBuilder(blobStore, schema2.MediaTypeImageConfig, []byte{}) + for _, digest := range digests { + builder.AppendReference(distribution.Descriptor{Digest: digest}) + } + + manifest, err := builder.Build(ctx) + if err != nil { + return nil, fmt.Errorf("unexpected error generating manifest: %v", err) + } + + return manifest, nil +} diff --git a/testutil/tarfile.go b/testutil/tarfile.go new file mode 100644 index 00000000..cb93602f --- /dev/null +++ b/testutil/tarfile.go @@ -0,0 +1,115 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + mrand "math/rand" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/opencontainers/go-digest" +) + +// CreateRandomTarFile creates a random tarfile, returning it as an +// io.ReadSeeker along with its digest. An error is returned if there is a +// problem generating valid content. +func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := mrand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + dgst = digest.FromBytes(target.Bytes()) + + return bytes.NewReader(target.Bytes()), dgst, nil +} + +// CreateRandomLayers returns a map of n digests. We don't particularly care +// about the order of said digests (since they're all random anyway). +func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) { + digestMap := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < n; i++ { + rs, ds, err := CreateRandomTarFile() + if err != nil { + return nil, fmt.Errorf("unexpected error generating test layer file: %v", err) + } + + dgst := digest.Digest(ds) + digestMap[dgst] = rs + } + return digestMap, nil +} + +// UploadBlobs lets you upload blobs to a repository +func UploadBlobs(repository distribution.Repository, layers map[digest.Digest]io.ReadSeeker) error { + ctx := context.Background() + for digest, rs := range layers { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + return fmt.Errorf("unexpected error creating upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + return fmt.Errorf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: digest}); err != nil { + return fmt.Errorf("unexpected error committinng upload: %v", err) + } + } + return nil +} diff --git a/uuid/uuid.go b/uuid/uuid.go new file mode 100644 index 00000000..d433ccaf --- /dev/null +++ b/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/uuid/uuid_test.go b/uuid/uuid_test.go new file mode 100644 index 00000000..09c3a7bb --- /dev/null +++ b/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/vendor.conf b/vendor.conf new file mode 100644 index 00000000..d0ebadf8 --- /dev/null +++ b/vendor.conf @@ -0,0 +1,49 @@ +github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e +github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab +github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 00000000..298f0e26 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 00000000..b1fb0889 --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,485 @@ +# Cobra + +A Commander for modern go CLI interactions + +[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra) + +## Overview + +Cobra is a commander providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. In addition to providing an interface, Cobra +simultaneously provides a controller to organize your application code. + +Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by +providing **fully posix compliant flags** (including short & long versions), +**nesting commands**, and the ability to **define your own help and usage** for any or +all commands. + +Cobra has an exceptionally clean interface and simple design without needless +constructors or initialization methods. + +Applications built with Cobra commands are designed to be as user friendly as +possible. Flags can be placed before or after the command (as long as a +confusing space isn’t provided). Both short and long flags can be used. A +command need not even be fully typed. The shortest unambiguous string will +suffice. Help is automatically generated and available for the application or +for a specific command using either the help command or the --help flag. + +## Concepts + +Cobra is built on a structure of commands & flags. + +**Commands** represent actions and **Flags** are modifiers for those actions. + +In the following example 'server' is a command and 'port' is a flag. + + hugo server --port=1313 + +### Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above 'server' is the command + +A Command has the following structure: + + type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. + } + +### Flags + +A Flag is a way to modify the behavior of an command. Cobra supports +fully posix compliant flags as well as the go flag package. +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding posix compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with it's corresponding flags, then the +tree is assigned to the commander which is finally executed. + +### Installing +Using Cobra is easy. First use go get to install the latest version +of the library. + + $ go get github.com/spf13/cobra + +Next include cobra in your application. + + import "github.com/spf13/cobra" + +### Create the root command + +The root command represents your binary itself. + +Cobra doesn't require any special constructors. Simply create your commands. + + var HugoCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, + } + +### Create additional commands + +Additional commands can be defined. + + var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, + } + +### Attach command to its parent +In this example we are attaching it to the root, but commands can be attached at any level. + + HugoCmd.AddCommand(versionCmd) + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + + var Verbose bool + var Source string + +There are two different approaches to assign a flag. + +#### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags assign a flag as a persistent flag on the root. + + HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") + +#### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + + HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") + +### Remove a command from its parent + +Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version. + + mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) + mainlib.RootCmd.AddCommand(versionCmd) + +### Once all commands and flags are defined, Execute the commands + +Execute should be run on the root for clarity, though it can be called on any command. + + HugoCmd.Execute() + +## Example + +In the example below we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + + import( + "github.com/spf13/cobra" + "fmt" + "strings" + ) + + func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i:=0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() + } + +For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com) + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally help will also +support all other commands as input. Say for instance you have a command called +'create' without any additional configuration cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + + Complete documentation is available at http://hugo.spf13.com + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server :: Hugo runs it's own a webserver to render the files + version :: Print the version number of Hugo + check :: Check content in the source directory + benchmark :: Benchmark hugo by building a site a number of times + help [command] :: Help about any command + + Available Flags: + -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ + -D, --build-drafts=false: include content marked as draft + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + -s, --source="": filesystem path to read files relative from + --stepAnalysis=false: display memory and timing of different steps of the program + --uglyurls=false: if true, use /filename.html instead of /filename/ + -v, --verbose=false: verbose output + -w, --watch=false: watch filesystem for changes and recreate as needed + + Use "hugo help [command]" for more information about that command. + + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or you own template for the default command to use. + +The default help command is + + func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) + } + +You can provide your own command, function or template through the following methods. + + command.SetHelpCommand(cmd *Command) + + command.SetHelpFunc(f func(*Command, []string)) + + command.SetHelpTemplate(s string) + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command Cobra responds by +showing the user the 'usage' + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of it's output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs it's own a webserver to render the files + version Print the version number of Hugo + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times + help [command] Help about any command + + Available Flags: + -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ + -D, --build-drafts=false: include content marked as draft + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + -s, --source="": filesystem path to read files relative from + --stepAnalysis=false: display memory and timing of different steps of the program + --uglyurls=false: if true, use /filename.html instead of /filename/ + -v, --verbose=false: verbose output + -w, --watch=false: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for cobra to use. + +The default usage function is + + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err + } + +Like help the function and template are over ridable through public methods. + + command.SetUsageFunc(f func(*Command) error) + + command.SetUsageTemplate(s string) + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistenPostRun` + +And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun` + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My sub command", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + +## Generating markdown formatted documentation for your command + +Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) + +## Generating bash completions for your command + +Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which when called will print +out everything Cobra knows about the flags for each command + +### Example + + command.DebugFlags() + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 00000000..735d2f54 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,357 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" +) + +func preamble(out *bytes.Buffer) { + fmt.Fprintf(out, `#!/bin/bash + + +__debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +__index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__handle_reply() +{ + __debug "${FUNCNAME}" + case $cur in + -*) + compopt -o nospace + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + [[ $COMPREPLY == *= ]] || compopt +o nospace + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions=("${must_have_one_flag[@]}") + elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + else + completions=("${commands[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__handle_flag() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __debug "${FUNCNAME}: looking for ${flagname}" + if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # skip the argument to a two word flag + if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + # skip the flag itself + c=$((c+1)) + +} + +__handle_noun() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__handle_command() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]}" + else + next_command="_${words[c]}" + fi + c=$((c+1)) + __debug "${FUNCNAME}: looking for ${next_command}" + declare -F $next_command >/dev/null && $next_command +} + +__handle_word() +{ + if [[ $c -ge $cword ]]; then + __handle_reply + return + fi + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __handle_flag + elif __contains_word "${words[c]}" "${commands[@]}"; then + __handle_command + else + __handle_noun + fi + __handle_word +} + +`) +} + +func postscript(out *bytes.Buffer, name string) { + fmt.Fprintf(out, "__start_%s()\n", name) + fmt.Fprintf(out, `{ + local cur prev words cword + _init_completion -s || return + + local c=0 + local flags=() + local two_word_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __handle_word +} + +`, name) + fmt.Fprintf(out, "complete -F __start_%s %s\n", name, name) + fmt.Fprintf(out, "# ex: ts=4 sw=4 et filetype=sh\n") +} + +func writeCommands(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, " commands=()\n") + for _, c := range cmd.Commands() { + if len(c.Deprecated) > 0 { + continue + } + fmt.Fprintf(out, " commands+=(%q)\n", c.Name()) + } + fmt.Fprintf(out, "\n") +} + +func writeFlagHandler(name string, annotations map[string][]string, out *bytes.Buffer) { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name) + + ext := strings.Join(value, "|") + ext = "__handle_filename_extension_flag " + ext + fmt.Fprintf(out, " flags_completion+=(%q)\n", ext) + } + } +} + +func writeShortFlag(flag *pflag.Flag, out *bytes.Buffer) { + b := (flag.Value.Type() == "bool") + name := flag.Shorthand + format := " " + if !b { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + fmt.Fprintf(out, format, name) + writeFlagHandler("-"+name, flag.Annotations, out) +} + +func writeFlag(flag *pflag.Flag, out *bytes.Buffer) { + b := (flag.Value.Type() == "bool") + name := flag.Name + format := " flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + fmt.Fprintf(out, format, name) + writeFlagHandler("--"+name, flag.Annotations, out) +} + +func writeFlags(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, ` flags=() + two_word_flags=() + flags_with_completion=() + flags_completion=() + +`) + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + writeFlag(flag, out) + if len(flag.Shorthand) > 0 { + writeShortFlag(flag, out) + } + }) + + fmt.Fprintf(out, "\n") +} + +func writeRequiredFlag(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, " must_have_one_flag=()\n") + flags := cmd.NonInheritedFlags() + flags.VisitAll(func(flag *pflag.Flag) { + for key, _ := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + b := (flag.Value.Type() == "bool") + if !b { + format += "=" + } + format += "\")\n" + fmt.Fprintf(out, format, flag.Name) + + if len(flag.Shorthand) > 0 { + fmt.Fprintf(out, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand) + } + } + } + }) +} + +func writeRequiredNoun(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, " must_have_one_noun=()\n") + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + fmt.Fprintf(out, " must_have_one_noun+=(%q)\n", value) + } +} + +func gen(cmd *Command, out *bytes.Buffer) { + for _, c := range cmd.Commands() { + if len(c.Deprecated) > 0 { + continue + } + gen(c, out) + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + fmt.Fprintf(out, "_%s()\n{\n", commandName) + fmt.Fprintf(out, " last_command=%q\n", commandName) + writeCommands(cmd, out) + writeFlags(cmd, out) + writeRequiredFlag(cmd, out) + writeRequiredNoun(cmd, out) + fmt.Fprintf(out, "}\n\n") +} + +func (cmd *Command) GenBashCompletion(out *bytes.Buffer) { + preamble(out) + if len(cmd.BashCompletionFunction) > 0 { + fmt.Fprintf(out, "%s\n", cmd.BashCompletionFunction) + } + gen(cmd, out) + postscript(out, cmd.Name()) +} + +func (cmd *Command) GenBashCompletionFile(filename string) error { + out := new(bytes.Buffer) + + cmd.GenBashCompletion(out) + + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + _, err = outFile.Write(out.Bytes()) + if err != nil { + return err + } + return nil +} + +func (cmd *Command) MarkFlagRequired(name string) { + flag := cmd.Flags().Lookup(name) + if flag == nil { + return + } + if flag.Annotations == nil { + flag.Annotations = make(map[string][]string) + } + annotation := make([]string, 1) + annotation[0] = "true" + flag.Annotations[BashCompOneRequiredFlag] = annotation +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 00000000..78b92b0a --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,112 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" +) + +var initializers []func() + +// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. +// Set this to true to enable it +var EnablePrefixMatching bool = false + +// enables an information splash screen on Windows if the CLI is started from explorer.exe. +var EnableWindowsMouseTrap bool = true + +var MousetrapHelpText string = `This is a command line tool + +You need to open cmd.exe and run it from there. +` + +//OnInitialize takes a series of func() arguments and appends them to a slice of func(). +func OnInitialize(y ...func()) { + for _, x := range y { + initializers = append(initializers, x) + } +} + +//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +//ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +//rpad adds padding to the right of a string +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(template.FuncMap{ + "trim": strings.TrimSpace, + "rpad": rpad, + "gt": Gt, + "eq": Eq, + }) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 00000000..3831c337 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1031 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "runtime" + "strings" + "time" + + "github.com/inconshreveable/mousetrap" + flag "github.com/spf13/pflag" +) + +// Command is just that, a command for your application. +// eg. 'go run' ... 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Name is the command name, usually the executable's name. + name string + // The one-line usage message. + Use string + // An array of aliases that can be used instead of the first word in Use. + Aliases []string + // The short description shown in the 'help' output. + Short string + // The long message shown in the 'help ' output. + Long string + // Examples of how to use the command + Example string + // List of all valid non-flag arguments, used for bash completions *TODO* actually validate these + ValidArgs []string + // Custom functions used by the bash autocompletion generator + BashCompletionFunction string + // Is this command deprecated and should print this string when used? + Deprecated string + // Full set of flags + flags *flag.FlagSet + // Set of flags childrens of this command will inherit + pflags *flag.FlagSet + // Flags that are declared specifically by this command (not inherited). + lflags *flag.FlagSet + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name + // PersistentPreRun: children of this command will inherit and execute + PersistentPreRun func(cmd *Command, args []string) + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // Run: Typically the actual work function. Most commands will only implement this + Run func(cmd *Command, args []string) + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PersistentPostRun: children of this command will inherit and execute after PostRun + PersistentPostRun func(cmd *Command, args []string) + // Commands is the list of commands supported by this program. + commands []*Command + // Parent Command for this command + parent *Command + // max lengths of commands' string lengths for use in padding + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + flagErrorBuf *bytes.Buffer + cmdErrorBuf *bytes.Buffer + + args []string // actual args parsed from flags + output *io.Writer // nil means stderr; use Out() method instead + usageFunc func(*Command) error // Usage can be defined by application + usageTemplate string // Can be defined by Application + helpTemplate string // Can be defined by Application + helpFunc func(*Command, []string) // Help can be defined by application + helpCommand *Command // The help command + helpFlagVal bool + // The global normalization function that we can use on every pFlag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName +} + +// os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return *c.output + } + + if c.HasParent() { + return c.parent.Out() + } else { + return def + } +} + +func (c *Command) Out() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = &output +} + +// Usage can be defined by application +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// Can be defined by Application +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// Can be defined by Application +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// Can be defined by Application +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.LocalFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + + if c.HasParent() { + return c.parent.UsageFunc() + } else { + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err + } + } +} +func (c *Command) HelpFunc() func(*Command, []string) { + if c.helpFunc != nil { + return c.helpFunc + } + + if c.HasParent() { + return c.parent.HelpFunc() + } else { + return func(c *Command, args []string) { + if len(args) == 0 { + // Help called without any topic, calling on root + c.Root().Help() + return + } + + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + + c.Root().Usage() + } else { + err := cmd.Help() + if err != nil { + c.Println(err) + } + } + } + } +} + +var minUsagePadding int = 25 + +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } else { + return c.parent.commandsMaxUseLen + } +} + +var minCommandPathPadding int = 11 + +// +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } else { + return c.parent.commandsMaxCommandPathLen + } +} + +var minNamePadding int = 11 + +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } else { + return c.parent.commandsMaxNameLen + } +} + +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } else { + return `{{ $cmd := . }} +Usage: {{if .Runnable}} + {{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if .HasSubCommands}} + {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} +{{end}}{{if .HasExample}} + +Examples: +{{ .Example }} +{{end}}{{ if .HasRunnableSubCommands}} + +Available Commands: {{range .Commands}}{{if and (.Runnable) (not .Deprecated)}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}} +{{end}} +{{ if .HasLocalFlags}}Flags: +{{.LocalFlags.FlagUsages}}{{end}} +{{ if .HasInheritedFlags}}Global Flags: +{{.InheritedFlags.FlagUsages}}{{end}}{{if or (.HasHelpSubCommands) (.HasRunnableSiblings)}} +Additional help topics: +{{if .HasHelpSubCommands}}{{range .Commands}}{{if and (not .Runnable) (not .Deprecated)}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasRunnableSiblings }}{{range .Parent.Commands}}{{if and (not .Runnable) (not .Deprecated)}}{{if not (eq .Name $cmd.Name) }} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{end}} +{{end}}{{ if .HasSubCommands }} +Use "{{.Root.Name}} help [command]" for more information about a command. +{{end}}` + } +} + +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } else { + return `{{with or .Long .Short }}{{. | trim}}{{end}} +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}} +` + } +} + +// Really only used when casting a command to a commander +func (c *Command) resetChildrensParents() { + for _, x := range c.commands { + x.parent = c + } +} + +// Test if the named flag is a boolean flag. +func isBooleanFlag(name string, f *flag.FlagSet) bool { + flag := f.Lookup(name) + if flag == nil { + return false + } + return flag.Value.Type() == "bool" +} + +// Test if the named flag is a boolean flag. +func isBooleanShortFlag(name string, f *flag.FlagSet) bool { + result := false + f.VisitAll(func(f *flag.Flag) { + if f.Shorthand == name && f.Value.Type() == "bool" { + result = true + } + }) + return result +} + +func stripFlags(args []string, c *Command) []string { + if len(args) < 1 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + + inQuote := false + inFlag := false + for _, y := range args { + if !inQuote { + switch { + case strings.HasPrefix(y, "\""): + inQuote = true + case strings.Contains(y, "=\""): + inQuote = true + case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !isBooleanFlag(y[2:], c.Flags()) + case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): + inFlag = true + case inFlag: + inFlag = false + case y == "": + // strip empty commands, as the go tests expect this to be ok.... + case !strings.HasPrefix(y, "-"): + commands = append(commands, y) + inFlag = false + } + } + + if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { + inQuote = false + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +// find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + if c == nil { + return nil, nil, fmt.Errorf("Called find() on a nil Command") + } + + // If there are no arguments, return the root command. If the root has no + // subcommands, args reflects arguments that should actually be passed to + // the root command, so also return the root command. + if len(args) == 0 || !c.Root().HasSubCommands() { + return c.Root(), args, nil + } + + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + if len(innerArgs) > 0 && c.HasSubCommands() { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) > 0 { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == argsWOflags[0] || cmd.HasAlias(argsWOflags[0]) { // exact name or alias match + return innerfind(cmd, argsMinusFirstX(innerArgs, argsWOflags[0])) + } else if EnablePrefixMatching { + if strings.HasPrefix(cmd.Name(), argsWOflags[0]) { // prefix match + matches = append(matches, cmd) + } + for _, x := range cmd.Aliases { + if strings.HasPrefix(x, argsWOflags[0]) { + matches = append(matches, cmd) + } + } + } + } + + // only accept a single prefix match - multiple matches would be ambiguous + if len(matches) == 1 { + return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) + } + } + } + + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + + // If we matched on the root, but we asked for a subcommand, return an error + if commandFound.Name() == c.Name() && len(stripFlags(args, c)) > 0 && commandFound.Name() != args[0] { + return nil, a, fmt.Errorf("unknown command %q", a[0]) + } + + return commandFound, a, nil +} + +func (c *Command) Root() *Command { + var findRoot func(*Command) *Command + + findRoot = func(x *Command) *Command { + if x.HasParent() { + return findRoot(x.parent) + } else { + return x + } + } + + return findRoot(c) +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + err = c.ParseFlags(a) + if err == flag.ErrHelp { + c.Help() + return nil + } + if err != nil { + // We're writing subcommand usage to root command's error buffer to have it displayed to the user + r := c.Root() + if r.cmdErrorBuf == nil { + r.cmdErrorBuf = new(bytes.Buffer) + } + // for writing the usage to the buffer we need to switch the output temporarily + // since Out() returns root output, you also need to revert that on root + out := r.Out() + r.SetOutput(r.cmdErrorBuf) + c.Usage() + r.SetOutput(out) + return err + } + // If help is called, regardless of other flags, we print that. + // Print help also if c.Run is nil. + if c.helpFlagVal || !c.Runnable() { + c.Help() + return nil + } + + c.preRun() + argWoFlags := c.Flags().Args() + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + c.Run(c, argWoFlags) + + if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) errorMsgFromParse() string { + s := c.flagErrorBuf.String() + + x := strings.Split(s, "\n") + + if len(x) > 0 { + return x[0] + } else { + return "" + } +} + +// Call execute to use the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() (err error) { + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().Execute() + } + + if EnableWindowsMouseTrap && runtime.GOOS == "windows" { + if mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } + } + + // initialize help as the last point possible to allow for user + // overriding + c.initHelp() + + var args []string + + if len(c.args) == 0 { + args = os.Args[1:] + } else { + args = c.args + } + + cmd, flags, err := c.Find(args) + if err == nil { + err = cmd.execute(flags) + } + + if err != nil { + if err == flag.ErrHelp { + c.Help() + + } else { + c.Println("Error:", err.Error()) + c.Printf("Run '%v help' for usage.\n", c.Root().Name()) + } + } + + return +} + +func (c *Command) initHelp() { + if c.helpCommand == nil { + if !c.HasSubCommands() { + return + } + + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + PersistentPreRun: func(cmd *Command, args []string) {}, + PersistentPostRun: func(cmd *Command, args []string) {}, + } + } + c.AddCommand(c.helpCommand) +} + +// Used for testing +func (c *Command) ResetCommands() { + c.commands = nil + c.helpCommand = nil + c.cmdErrorBuf = new(bytes.Buffer) + c.cmdErrorBuf.Reset() +} + +//Commands returns a slice of child commands. +func (c *Command) Commands() []*Command { + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If glabal normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + } +} + +// AddCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Convenience method to Print to the defined output +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.Out(), i...) +} + +// Convenience method to Println to the defined output +func (c *Command) Println(i ...interface{}) { + str := fmt.Sprintln(i...) + c.Print(str) +} + +// Convenience method to Printf to the defined output +func (c *Command) Printf(format string, i ...interface{}) { + str := fmt.Sprintf(format, i...) + c.Print(str) +} + +// Output the usage for the command +// Used when a user provides invalid input +// Can be defined by user by overriding UsageFunc +func (c *Command) Usage() error { + c.mergePersistentFlags() + err := c.UsageFunc()(c) + return err +} + +// Output the help for the command +// Used when a user calls help [command] +// by the default HelpFunc in the commander +func (c *Command) Help() error { + c.mergePersistentFlags() + err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) + return err +} + +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + str := c.Name() + x := c + for x.HasParent() { + str = x.parent.Name() + " " + str + x = x.parent + } + return str +} + +//The full usage for a given command (including parents) +func (c *Command) UseLine() string { + str := "" + if c.HasParent() { + str = c.parent.CommandPath() + " " + } + return str + c.Use +} + +// For use in determining which flags have been assigned to which commands +// and which persist +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() { + if x.persistentFlag(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + if c.name != "" { + return c.name + } + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// Determine if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Determine if the command is itself runnable +func (c *Command) Runnable() bool { + return c.Run != nil +} + +// Determine if the command has children commands +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +func (c *Command) HasRunnableSiblings() bool { + if !c.HasParent() { + return false + } + for _, sub := range c.parent.commands { + if sub.Runnable() { + return true + } + } + return false +} + +func (c *Command) HasHelpSubCommands() bool { + for _, sub := range c.commands { + if !sub.Runnable() { + return true + } + } + return false +} + +// Determine if the command has runnable children commands +func (c *Command) HasRunnableSubCommands() bool { + for _, sub := range c.commands { + if sub.Runnable() { + return true + } + } + return false +} + +// Determine if the command is a child command +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + c.PersistentFlags().BoolVarP(&c.helpFlagVal, "help", "h", false, "help for "+c.Name()) + } + return c.flags +} + +// Get the local FlagSet specifically set in the current command +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags.VisitAll(func(f *flag.Flag) { + local.AddFlag(f) + }) + return local +} + +// All Flags which were inherited from parents commands +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + local := c.LocalFlags() + + var rmerge func(x *Command) + + rmerge = func(x *Command) { + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + inherited.AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + if c.HasParent() { + rmerge(c.parent) + } + + return inherited +} + +// All Flags which were not inherited from parent commands +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// Get the Persistent FlagSet specifically set in the current command +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// For use in testing +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) +} + +// Does the command contain any flags (local plus persistent from the entire structure) +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// Does the command contain persistent flags +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// Does the command has flags specifically declared locally +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// Climbs up the command tree looking for matching flag +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// recursively find matching persistent flag +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil && c.HasParent() { + flag = c.parent.persistentFlag(name) + } + return +} + +// Parses persistent flag tree & local flags +func (c *Command) ParseFlags(args []string) (err error) { + c.mergePersistentFlags() + err = c.Flags().Parse(args) + return +} + +func (c *Command) Parent() *Command { + return c.parent +} + +func (c *Command) mergePersistentFlags() { + var rmerge func(x *Command) + + // Save the set of local flags + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + addtolocal := func(f *flag.Flag) { + c.lflags.AddFlag(f) + } + c.Flags().VisitAll(addtolocal) + c.PersistentFlags().VisitAll(addtolocal) + } + rmerge = func(x *Command) { + if !x.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if x.PersistentFlags().Lookup(f.Name) == nil { + x.PersistentFlags().AddFlag(f) + } + }) + } + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if c.Flags().Lookup(f.Name) == nil { + c.Flags().AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + rmerge(c) +} diff --git a/vendor/github.com/spf13/cobra/md_docs.go b/vendor/github.com/spf13/cobra/md_docs.go new file mode 100644 index 00000000..6092c85a --- /dev/null +++ b/vendor/github.com/spf13/cobra/md_docs.go @@ -0,0 +1,138 @@ +//Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "time" +) + +func printOptions(out *bytes.Buffer, cmd *Command, name string) { + flags := cmd.NonInheritedFlags() + flags.SetOutput(out) + if flags.HasFlags() { + fmt.Fprintf(out, "### Options\n\n```\n") + flags.PrintDefaults() + fmt.Fprintf(out, "```\n\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(out) + if parentFlags.HasFlags() { + fmt.Fprintf(out, "### Options inherited from parent commands\n\n```\n") + parentFlags.PrintDefaults() + fmt.Fprintf(out, "```\n\n") + } +} + +type byName []*Command + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } + +func GenMarkdown(cmd *Command, out *bytes.Buffer) { + GenMarkdownCustom(cmd, out, func(s string) string { return s }) +} + +func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + + fmt.Fprintf(out, "## %s\n\n", name) + fmt.Fprintf(out, "%s\n\n", short) + fmt.Fprintf(out, "### Synopsis\n\n") + fmt.Fprintf(out, "\n%s\n\n", long) + + if cmd.Runnable() { + fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.UseLine()) + } + + if len(cmd.Example) > 0 { + fmt.Fprintf(out, "### Examples\n\n") + fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.Example) + } + + printOptions(out, cmd, name) + + if len(cmd.Commands()) > 0 || cmd.HasParent() { + fmt.Fprintf(out, "### SEE ALSO\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + link := pname + ".md" + link = strings.Replace(link, " ", "_", -1) + fmt.Fprintf(out, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if len(child.Deprecated) > 0 { + continue + } + cname := name + " " + child.Name() + link := cname + ".md" + link = strings.Replace(link, " ", "_", -1) + fmt.Fprintf(out, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) + } + fmt.Fprintf(out, "\n") + } + + fmt.Fprintf(out, "###### Auto generated by spf13/cobra at %s\n", time.Now().UTC()) +} + +func GenMarkdownTree(cmd *Command, dir string) { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) +} + +func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { + for _, c := range cmd.Commands() { + GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler) + } + out := new(bytes.Buffer) + + GenMarkdownCustom(cmd, out, linkHandler) + + filename := cmd.CommandPath() + filename = dir + strings.Replace(filename, " ", "_", -1) + ".md" + outFile, err := os.Create(filename) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + defer outFile.Close() + _, err = outFile.WriteString(filePrepender(filename)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + _, err = outFile.Write(out.Bytes()) + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 00000000..63ed1cfe --- /dev/null +++ b/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 00000000..f7d63500 --- /dev/null +++ b/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,191 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/ogier/pflag + +Run tests by running: + + go test github.com/ogier/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/ogier/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP("boolname", "b", true, "help message") +} +flag.VarP(&flagVar, "varname", "v", 1234, "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Command line flag syntax + +``` +--flag // boolean flags only +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + +``` +// boolean flags +-f +-abc + +// non-boolean flags +-n 1234 +-Ifile + +// mixed +-abcs "hello" +-abcn1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +```go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +```go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/ogier/pflag +[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 00000000..70e2e0a6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,83 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.VarP(newBoolValue(value, p), name, "", usage) +} + +// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + f.VarP(newBoolValue(value, p), name, shorthand, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + CommandLine.VarP(newBoolValue(value, p), name, "", usage) +} + +// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + CommandLine.VarP(newBoolValue(value, p), name, shorthand, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, "", value, usage) + return p +} + +// Like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return CommandLine.BoolP(name, "", value, usage) +} + +// Like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + return CommandLine.BoolP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 00000000..66ed7ac9 --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,71 @@ +package pflag + +import "time" + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// Like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// Like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 00000000..0070b93e --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,695 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + pflag is a drop-in replacement for Go's flag package, implementing + POSIX/GNU-style --flags. + + pflag is compatible with the GNU extensions to the POSIX recommendations + for command-line options. See + http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + + Usage: + + pflag is a drop-in replacement of Go's native flag package. If you import + pflag under the name "flag" then all code should continue to function + with no changes. + + import flag "github.com/ogier/pflag" + + There is one exception to this: if you directly instantiate the Flag struct + there is one more field "Shorthand" that you will need to set. + Most code never instantiates this struct directly, and instead uses + functions such as String(), BoolVar(), and Var(), and is therefore + unaffected. + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments after the flag are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + The pflag package also defines some new functions that are not in flag, + that give one-letter shorthands for flags. You can use these by appending + 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") + Shorthand letters can be used with single dashes on the command line. + Boolean shorthand flags can be combined with other shorthand flags. + + Command line flag syntax: + --flag // boolean flags only + --flag=x + + Unlike the flag package, a single dash before an option means something + different than a double dash. Single dashes signify a series of shorthand + letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + + Flag parsing stops after the terminator "--". Unlike the flag package, + flags can be interspersed with arguments anywhere on the command line + before this terminator. + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags (in their long form) accept 1, 0, t, f, true, false, + TRUE, FALSE, True, False. + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[NormalizedName]*Flag + formal map[NormalizedName]*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + for k, v := range f.formal { + delete(f.formal, k) + nname := f.normalizeFlagName(string(k)) + f.formal[nname] = v + v.Name = string(nname) + } +} + +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// Mark a flag deprecated in your program +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Deprecated = usageMessage + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 { + return + } + format := "--%s=%s: %s\n" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = "--%s=%q: %s\n" + } + if len(flag.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s " + format + } + fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + }) +} + +func (f *FlagSet) FlagUsages() string { + x := new(bytes.Buffer) + + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 { + return + } + format := "--%s=%s: %s\n" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = "--%s=%q: %s\n" + } + if len(flag.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s " + format + } + fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + }) + + return x.String() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// Like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) +} + +func (f *FlagSet) AddFlag(flag *Flag) { + // Call normalizeFlagName function only once + var normalizedFlagName NormalizedName = f.normalizeFlagName(flag.Name) + + _, alreadythere := f.formal[normalizedFlagName] + if alreadythere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + + if len(flag.Shorthand) == 0 { + return + } + if len(flag.Shorthand) > 1 { + fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) + panic("shorthand is more than one character") + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + old, alreadythere := f.shorthands[c] + if alreadythere { + fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) + panic("shorthand redefinition") + } + f.shorthands[c] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// Like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid argument %q for %s: %v", value, origArg, err) + } + // mark as visited for Visit() + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[f.normalizeFlagName(flag.Name)] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, alreadythere := f.formal[f.normalizeFlagName(name)] + if !alreadythere { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { + // '--flag' (where flag is a bool) + value = "true" + } else { + // '--flag' (where flag was not a bool) + err = f.failf("flag needs an argument: %s", s) + return + } + err = f.setFlag(flag, value, s) + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, alreadythere := f.shorthands[c] + if !alreadythere { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + //TODO continue on error + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + value = shorthands[2:] + outShorts = "" + } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { + value = "true" + } else if len(shorthands) > 1 { + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + value = args[0] + outArgs = args[1:] + } else { + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + err = f.setFlag(flag, value, shorthands) + return +} + +func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { + a = args + shorthands := s[1:] + + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args) + } else { + args, err = f.parseShortArg(s, args) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + err := f.parseArgs(arguments) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// The default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + interspersed: true, + } + return f +} + +// Whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 00000000..b7ad67d9 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 00000000..03155123 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 00000000..dca9da6e --- /dev/null +++ b/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// Like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// Like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// Like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// Like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 00000000..18eaacd6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 00000000..0114aaaa --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 00000000..aab1022f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 00000000..efa75fbc --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,76 @@ +package pflag + +import ( + "fmt" + "net" +) + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(s) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// Like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// Like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// Like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// Like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 00000000..09b9533e --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "fmt" + "net" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// Parse IPv4 netmask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + return nil + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// Like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// Like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 00000000..362fbf8a --- /dev/null +++ b/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,69 @@ +package pflag + +import "fmt" + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// Like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// Like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// Like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// Like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 00000000..c063fe7c --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// Like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// Like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// Like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// Like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 00000000..ab1c1f9e --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,72 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} +func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 00000000..db635ae8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,72 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint16 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} +func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 00000000..99c7e805 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 00000000..6fef508d --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/version/print.go b/version/print.go new file mode 100644 index 00000000..a82bce39 --- /dev/null +++ b/version/print.go @@ -0,0 +1,26 @@ +package version + +import ( + "fmt" + "io" + "os" +) + +// FprintVersion outputs the version string to the writer, in the following +// format, followed by a newline: +// +// +// +// For example, a binary "registry" built from github.com/docker/distribution +// with version "v2.0" would print the following: +// +// registry github.com/docker/distribution v2.0 +// +func FprintVersion(w io.Writer) { + fmt.Fprintln(w, os.Args[0], Package, Version) +} + +// PrintVersion outputs the version information, from Fprint, to stdout. +func PrintVersion() { + FprintVersion(os.Stdout) +} diff --git a/version/version.go b/version/version.go new file mode 100644 index 00000000..807bb4a2 --- /dev/null +++ b/version/version.go @@ -0,0 +1,11 @@ +package version + +// Package is the overall, canonical project import path under which the +// package was built. +var Package = "github.com/docker/distribution" + +// Version indicates which version of the binary is running. This is set to +// the latest release tag by hand, always suffixed by "+unknown". During +// build, it will be replaced by the actual version. The value here will be +// used if the registry is run after a go get based install. +var Version = "v2.6.0+unknown" diff --git a/version/version.sh b/version/version.sh new file mode 100755 index 00000000..53e29ce9 --- /dev/null +++ b/version/version.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This bash script outputs the current, desired content of version.go, using +# git describe. For best effect, pipe this to the target file. Generally, this +# only needs to updated for releases. The actual value of will be replaced +# during build time if the makefile is used. + +set -e + +cat <