From c12498aced30249f595644ebfc3f087c06aa5382 Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Tue, 11 Sep 2018 05:03:46 +0100 Subject: [PATCH 1/1] Import docker.io_18.06.1+dfsg1.orig.tar.xz [dgit import orig docker.io_18.06.1+dfsg1.orig.tar.xz] --- CHANGELOG.md | 203 + CONTRIBUTING.md | 126 + Makefile | 56 + README.md | 94 + VERSION | 1 + cli/.dockerignore | 2 + cli/.mailmap | 477 + cli/AUTHORS | 648 + cli/CONTRIBUTING.md | 365 + cli/Jenkinsfile | 12 + cli/LICENSE | 191 + cli/MAINTAINERS | 136 + cli/Makefile | 90 + cli/NOTICE | 19 + cli/README.md | 69 + cli/TESTING.md | 87 + cli/VERSION | 1 + cli/appveyor.yml | 23 + cli/circle.yml | 116 + cli/cli/cobra.go | 152 + cli/cli/command/bundlefile/bundlefile.go | 70 + cli/cli/command/bundlefile/bundlefile_test.go | 78 + cli/cli/command/checkpoint/client_test.go | 36 + cli/cli/command/checkpoint/cmd.go | 28 + cli/cli/command/checkpoint/create.go | 57 + cli/cli/command/checkpoint/create_test.go | 72 + cli/cli/command/checkpoint/list.go | 54 + cli/cli/command/checkpoint/list_test.go | 67 + cli/cli/command/checkpoint/remove.go | 44 + cli/cli/command/checkpoint/remove_test.go | 65 + .../checkpoint-list-with-options.golden | 2 + cli/cli/command/cli.go | 320 + cli/cli/command/cli_test.go | 228 + cli/cli/command/commands/commands.go | 133 + cli/cli/command/config/client_test.go | 45 + cli/cli/command/config/cmd.go | 29 + cli/cli/command/config/create.go | 86 + cli/cli/command/config/create_test.go | 143 + cli/cli/command/config/inspect.go | 66 + cli/cli/command/config/inspect_test.go | 172 + cli/cli/command/config/ls.go | 77 + cli/cli/command/config/ls_test.go | 158 + cli/cli/command/config/remove.go | 53 + cli/cli/command/config/remove_test.go | 79 + .../testdata/config-create-with-name.golden | 1 + .../config-inspect-pretty.simple.golden | 8 + ...g-inspect-with-format.json-template.golden | 1 + ...inspect-with-format.simple-template.golden | 1 + ...format.multiple-configs-with-labels.golden | 26 + ...nspect-without-format.single-config.golden | 12 + .../config/testdata/config-list-sort.golden | 4 + .../config-list-with-config-format.golden | 2 + .../testdata/config-list-with-filter.golden | 3 + .../testdata/config-list-with-format.golden | 2 + .../config-list-with-quiet-option.golden | 2 + cli/cli/command/container/attach.go | 181 + cli/cli/command/container/attach_test.go | 129 + cli/cli/command/container/client_test.go | 126 + cli/cli/command/container/cmd.go | 45 + cli/cli/command/container/commit.go | 75 + cli/cli/command/container/cp.go | 304 + cli/cli/command/container/cp_test.go | 192 + cli/cli/command/container/create.go | 229 + cli/cli/command/container/create_test.go | 172 + cli/cli/command/container/diff.go | 47 + cli/cli/command/container/exec.go | 214 + cli/cli/command/container/exec_test.go | 227 + cli/cli/command/container/export.go | 58 + cli/cli/command/container/hijack.go | 208 + cli/cli/command/container/inspect.go | 47 + cli/cli/command/container/kill.go | 56 + cli/cli/command/container/list.go | 140 + cli/cli/command/container/list_test.go | 164 + cli/cli/command/container/logs.go | 80 + cli/cli/command/container/logs_test.go | 62 + cli/cli/command/container/opts.go | 836 + cli/cli/command/container/opts_test.go | 646 + cli/cli/command/container/pause.go | 49 + cli/cli/command/container/port.go | 78 + cli/cli/command/container/prune.go | 78 + cli/cli/command/container/ps_test.go | 119 + cli/cli/command/container/rename.go | 51 + cli/cli/command/container/restart.go | 62 + cli/cli/command/container/rm.go | 73 + cli/cli/command/container/run.go | 341 + cli/cli/command/container/run_test.go | 75 + cli/cli/command/container/start.go | 202 + cli/cli/command/container/stats.go | 245 + cli/cli/command/container/stats_helpers.go | 239 + .../command/container/stats_helpers_test.go | 47 + cli/cli/command/container/stats_unit_test.go | 25 + cli/cli/command/container/stop.go | 67 + .../container-list-format-name-name.golden | 2 + .../container-list-format-with-arg.golden | 2 + .../container-list-with-config-format.golden | 2 + .../container-list-with-format.golden | 2 + ...tainer-list-without-format-no-trunc.golden | 3 + .../container-list-without-format.golden | 6 + cli/cli/command/container/testdata/utf16.env | Bin 0 -> 54 bytes .../command/container/testdata/utf16be.env | Bin 0 -> 54 bytes cli/cli/command/container/testdata/utf8.env | 3 + cli/cli/command/container/testdata/valid.env | 1 + .../command/container/testdata/valid.label | 1 + cli/cli/command/container/top.go | 57 + cli/cli/command/container/tty.go | 103 + cli/cli/command/container/unpause.go | 50 + cli/cli/command/container/update.go | 133 + cli/cli/command/container/utils.go | 162 + cli/cli/command/container/utils_test.go | 70 + cli/cli/command/container/wait.go | 53 + cli/cli/command/events_utils.go | 47 + cli/cli/command/formatter/checkpoint.go | 52 + cli/cli/command/formatter/checkpoint_test.go | 52 + cli/cli/command/formatter/config.go | 171 + cli/cli/command/formatter/config_test.go | 64 + cli/cli/command/formatter/container.go | 344 + cli/cli/command/formatter/container_test.go | 658 + cli/cli/command/formatter/custom.go | 35 + cli/cli/command/formatter/custom_test.go | 28 + cli/cli/command/formatter/diff.go | 72 + cli/cli/command/formatter/diff_test.go | 60 + cli/cli/command/formatter/disk_usage.go | 425 + cli/cli/command/formatter/disk_usage_test.go | 110 + cli/cli/command/formatter/displayutils.go | 61 + .../command/formatter/displayutils_test.go | 31 + cli/cli/command/formatter/formatter.go | 119 + cli/cli/command/formatter/history.go | 109 + cli/cli/command/formatter/history_test.go | 226 + cli/cli/command/formatter/image.go | 272 + cli/cli/command/formatter/image_test.go | 356 + cli/cli/command/formatter/network.go | 129 + cli/cli/command/formatter/network_test.go | 213 + cli/cli/command/formatter/node.go | 336 + cli/cli/command/formatter/node_test.go | 348 + cli/cli/command/formatter/plugin.go | 94 + cli/cli/command/formatter/plugin_test.go | 183 + cli/cli/command/formatter/reflect.go | 66 + cli/cli/command/formatter/reflect_test.go | 66 + cli/cli/command/formatter/search.go | 103 + cli/cli/command/formatter/search_test.go | 280 + cli/cli/command/formatter/secret.go | 178 + cli/cli/command/formatter/secret_test.go | 64 + cli/cli/command/formatter/service.go | 646 + cli/cli/command/formatter/service_test.go | 359 + cli/cli/command/formatter/stack.go | 77 + cli/cli/command/formatter/stack_test.go | 73 + cli/cli/command/formatter/stats.go | 224 + cli/cli/command/formatter/stats_test.go | 301 + cli/cli/command/formatter/task.go | 150 + cli/cli/command/formatter/task_test.go | 106 + ...ainer-context-write-special-headers.golden | 3 + .../disk-usage-context-write-custom.golden | 5 + .../testdata/disk-usage-raw-format.golden | 24 + .../search-context-write-stars-table.golden | 2 + .../search-context-write-table.golden | 3 + .../testdata/service-context-write-raw.golden | 14 + .../task-context-write-table-custom.golden | 3 + cli/cli/command/formatter/trust.go | 150 + cli/cli/command/formatter/trust_test.go | 239 + cli/cli/command/formatter/volume.go | 131 + cli/cli/command/formatter/volume_test.go | 183 + cli/cli/command/idresolver/client_test.go | 29 + cli/cli/command/idresolver/idresolver.go | 70 + cli/cli/command/idresolver/idresolver_test.go | 146 + cli/cli/command/image/build.go | 627 + cli/cli/command/image/build/context.go | 425 + cli/cli/command/image/build/context_test.go | 299 + cli/cli/command/image/build/context_unix.go | 11 + .../command/image/build/context_windows.go | 17 + cli/cli/command/image/build/dockerignore.go | 39 + cli/cli/command/image/build_buildkit.go | 346 + cli/cli/command/image/build_session.go | 158 + cli/cli/command/image/build_test.go | 216 + cli/cli/command/image/client_test.go | 124 + cli/cli/command/image/cmd.go | 33 + cli/cli/command/image/history.go | 64 + cli/cli/command/image/history_test.go | 105 + cli/cli/command/image/import.go | 87 + cli/cli/command/image/import_test.go | 97 + cli/cli/command/image/inspect.go | 44 + cli/cli/command/image/inspect_test.go | 88 + cli/cli/command/image/list.go | 96 + cli/cli/command/image/list_test.go | 98 + cli/cli/command/image/load.go | 76 + cli/cli/command/image/load_test.go | 101 + cli/cli/command/image/prune.go | 94 + cli/cli/command/image/prune_test.go | 94 + cli/cli/command/image/pull.go | 83 + cli/cli/command/image/pull_test.go | 121 + cli/cli/command/image/push.go | 70 + cli/cli/command/image/push_test.go | 71 + cli/cli/command/image/remove.go | 86 + cli/cli/command/image/remove_test.go | 134 + cli/cli/command/image/save.go | 73 + cli/cli/command/image/save_test.go | 103 + cli/cli/command/image/tag.go | 41 + cli/cli/command/image/tag_test.go | 41 + .../history-command-success.non-human.golden | 2 + ...tory-command-success.quiet-no-trunc.golden | 1 + .../history-command-success.quiet.golden | 1 + .../history-command-success.simple.golden | 2 + .../testdata/import-command-success.input.txt | 1 + .../inspect-command-success.format.golden | 1 + ...inspect-command-success.simple-many.golden | 56 + .../inspect-command-success.simple.golden | 29 + .../list-command-success.filters.golden | 1 + .../list-command-success.format.golden | 0 .../list-command-success.match-name.golden | 1 + .../list-command-success.quiet-format.golden | 0 .../list-command-success.simple.golden | 1 + .../load-command-success.input-file.golden | 1 + .../testdata/load-command-success.input.txt | 1 + .../testdata/load-command-success.json.golden | 1 + .../load-command-success.simple.golden | 1 + .../testdata/prune-command-success.all.golden | 2 + ...prune-command-success.force-deleted.golden | 4 + ...rune-command-success.force-untagged.golden | 4 + .../pull-command-success.simple-no-tag.golden | 1 + .../pull-command-success.simple.golden | 0 ...-success.Image Deleted and Untagged.golden | 2 + ...emove-command-success.Image Deleted.golden | 1 + ...move-command-success.Image Untagged.golden | 1 + ...s.Image not found with force option.golden | 0 cli/cli/command/image/trust.go | 352 + cli/cli/command/image/trust_test.go | 73 + cli/cli/command/in.go | 56 + cli/cli/command/inspect/inspector.go | 199 + cli/cli/command/inspect/inspector_test.go | 259 + cli/cli/command/manifest/annotate.go | 97 + cli/cli/command/manifest/annotate_test.go | 77 + cli/cli/command/manifest/client_test.go | 48 + cli/cli/command/manifest/cmd.go | 45 + cli/cli/command/manifest/create_list.go | 82 + cli/cli/command/manifest/create_test.go | 116 + cli/cli/command/manifest/inspect.go | 148 + cli/cli/command/manifest/inspect_test.go | 146 + cli/cli/command/manifest/push.go | 281 + cli/cli/command/manifest/push_test.go | 69 + .../manifest/testdata/inspect-annotate.golden | 32 + .../testdata/inspect-manifest-list.golden | 24 + .../manifest/testdata/inspect-manifest.golden | 16 + cli/cli/command/manifest/util.go | 80 + cli/cli/command/network/client_test.go | 45 + cli/cli/command/network/cmd.go | 28 + cli/cli/command/network/connect.go | 63 + cli/cli/command/network/connect_test.go | 70 + cli/cli/command/network/create.go | 248 + cli/cli/command/network/create_test.go | 174 + cli/cli/command/network/disconnect.go | 41 + cli/cli/command/network/disconnect_test.go | 41 + cli/cli/command/network/inspect.go | 48 + cli/cli/command/network/list.go | 75 + cli/cli/command/network/list_test.go | 63 + cli/cli/command/network/prune.go | 76 + cli/cli/command/network/remove.go | 53 + .../network/testdata/network-list.golden | 2 + cli/cli/command/node/client_test.go | 69 + cli/cli/command/node/cmd.go | 60 + cli/cli/command/node/demote.go | 36 + cli/cli/command/node/demote_test.go | 84 + cli/cli/command/node/inspect.go | 72 + cli/cli/command/node/inspect_test.go | 118 + cli/cli/command/node/list.go | 85 + cli/cli/command/node/list_test.go | 141 + cli/cli/command/node/opts.go | 23 + cli/cli/command/node/promote.go | 36 + cli/cli/command/node/promote_test.go | 84 + cli/cli/command/node/ps.go | 104 + cli/cli/command/node/ps_test.go | 128 + cli/cli/command/node/remove.go | 56 + cli/cli/command/node/remove_test.go | 44 + .../node-inspect-pretty.manager-leader.golden | 24 + .../node-inspect-pretty.manager.golden | 24 + .../node-inspect-pretty.simple.golden | 22 + .../testdata/node-list-format-flag.golden | 2 + .../node-list-format-from-config.golden | 3 + .../node/testdata/node-list-sort.golden | 4 + .../node/testdata/node-ps.simple.golden | 2 + .../node/testdata/node-ps.with-errors.golden | 4 + cli/cli/command/node/update.go | 120 + cli/cli/command/node/update_test.go | 169 + cli/cli/command/orchestrator.go | 77 + cli/cli/command/orchestrator_test.go | 118 + cli/cli/command/out.go | 50 + cli/cli/command/plugin/client_test.go | 45 + cli/cli/command/plugin/cmd.go | 32 + cli/cli/command/plugin/create.go | 128 + cli/cli/command/plugin/create_test.go | 123 + cli/cli/command/plugin/disable.go | 36 + cli/cli/command/plugin/disable_test.go | 58 + cli/cli/command/plugin/enable.go | 48 + cli/cli/command/plugin/enable_test.go | 70 + cli/cli/command/plugin/inspect.go | 43 + cli/cli/command/plugin/install.go | 174 + cli/cli/command/plugin/list.go | 64 + cli/cli/command/plugin/push.go | 76 + cli/cli/command/plugin/remove.go | 54 + cli/cli/command/plugin/remove_test.go | 71 + cli/cli/command/plugin/set.go | 22 + cli/cli/command/plugin/upgrade.go | 90 + cli/cli/command/registry.go | 199 + cli/cli/command/registry/login.go | 169 + cli/cli/command/registry/login_test.go | 157 + cli/cli/command/registry/logout.go | 76 + cli/cli/command/registry/search.go | 104 + cli/cli/command/registry_test.go | 147 + cli/cli/command/secret/client_test.go | 45 + cli/cli/command/secret/cmd.go | 29 + cli/cli/command/secret/create.go | 109 + cli/cli/command/secret/create_test.go | 169 + cli/cli/command/secret/inspect.go | 65 + cli/cli/command/secret/inspect_test.go | 173 + cli/cli/command/secret/ls.go | 76 + cli/cli/command/secret/ls_test.go | 160 + cli/cli/command/secret/remove.go | 53 + cli/cli/command/secret/remove_test.go | 79 + .../testdata/secret-create-with-name.golden | 1 + .../secret-inspect-pretty.simple.golden | 7 + ...t-inspect-with-format.json-template.golden | 1 + ...inspect-with-format.simple-template.golden | 1 + ...format.multiple-secrets-with-labels.golden | 26 + ...nspect-without-format.single-secret.golden | 12 + .../secret/testdata/secret-list-sort.golden | 4 + .../secret-list-with-config-format.golden | 2 + .../testdata/secret-list-with-filter.golden | 3 + .../testdata/secret-list-with-format.golden | 2 + .../secret-list-with-quiet-option.golden | 2 + cli/cli/command/service/client_test.go | 77 + cli/cli/command/service/cmd.go | 34 + cli/cli/command/service/create.go | 137 + .../command/service/generic_resource_opts.go | 105 + .../service/generic_resource_opts_test.go | 23 + cli/cli/command/service/helpers.go | 33 + cli/cli/command/service/inspect.go | 93 + cli/cli/command/service/inspect_test.go | 160 + cli/cli/command/service/list.go | 139 + cli/cli/command/service/list_test.go | 28 + cli/cli/command/service/logs.go | 349 + cli/cli/command/service/opts.go | 909 + cli/cli/command/service/opts_test.go | 226 + cli/cli/command/service/parse.go | 118 + cli/cli/command/service/progress/progress.go | 504 + .../command/service/progress/progress_test.go | 375 + cli/cli/command/service/ps.go | 155 + cli/cli/command/service/ps_test.go | 135 + cli/cli/command/service/remove.go | 48 + cli/cli/command/service/rollback.go | 64 + cli/cli/command/service/rollback_test.go | 104 + cli/cli/command/service/scale.go | 122 + .../service/testdata/service-list-sort.golden | 3 + cli/cli/command/service/trust.go | 87 + cli/cli/command/service/update.go | 1205 + cli/cli/command/service/update_test.go | 778 + cli/cli/command/stack/client_test.go | 239 + cli/cli/command/stack/cmd.go | 128 + cli/cli/command/stack/common.go | 31 + cli/cli/command/stack/deploy.go | 90 + cli/cli/command/stack/deploy_test.go | 17 + cli/cli/command/stack/kubernetes/cli.go | 126 + cli/cli/command/stack/kubernetes/client.go | 103 + .../command/stack/kubernetes/conversion.go | 240 + .../stack/kubernetes/conversion_test.go | 192 + cli/cli/command/stack/kubernetes/convert.go | 332 + cli/cli/command/stack/kubernetes/deploy.go | 158 + cli/cli/command/stack/kubernetes/list.go | 136 + cli/cli/command/stack/kubernetes/ps.go | 112 + cli/cli/command/stack/kubernetes/remove.go | 27 + cli/cli/command/stack/kubernetes/services.go | 158 + .../command/stack/kubernetes/services_test.go | 138 + cli/cli/command/stack/kubernetes/stack.go | 107 + .../command/stack/kubernetes/stackclient.go | 197 + .../stack/kubernetes/stackclient_test.go | 64 + .../stack/kubernetes/testdata/warnings.golden | 31 + cli/cli/command/stack/kubernetes/warnings.go | 145 + .../command/stack/kubernetes/warnings_test.go | 78 + cli/cli/command/stack/kubernetes/watcher.go | 255 + .../command/stack/kubernetes/watcher_test.go | 218 + cli/cli/command/stack/list.go | 79 + cli/cli/command/stack/list_test.go | 152 + cli/cli/command/stack/loader/loader.go | 152 + cli/cli/command/stack/loader/loader_test.go | 47 + cli/cli/command/stack/options/opts.go | 43 + cli/cli/command/stack/ps.go | 48 + cli/cli/command/stack/ps_test.go | 171 + cli/cli/command/stack/remove.go | 43 + cli/cli/command/stack/remove_test.go | 166 + cli/cli/command/stack/services.go | 46 + cli/cli/command/stack/services_test.go | 170 + cli/cli/command/stack/swarm/client_test.go | 239 + cli/cli/command/stack/swarm/common.go | 50 + cli/cli/command/stack/swarm/deploy.go | 80 + .../command/stack/swarm/deploy_bundlefile.go | 124 + .../stack/swarm/deploy_bundlefile_test.go | 50 + .../command/stack/swarm/deploy_composefile.go | 281 + .../stack/swarm/deploy_composefile_test.go | 67 + cli/cli/command/stack/swarm/deploy_test.go | 110 + cli/cli/command/stack/swarm/list.go | 45 + cli/cli/command/stack/swarm/ps.go | 35 + cli/cli/command/stack/swarm/remove.go | 140 + cli/cli/command/stack/swarm/services.go | 66 + .../testdata/bundlefile_with_two_services.dab | 29 + .../testdata/stack-list-sort-natural.golden | 4 + .../stack/testdata/stack-list-sort.golden | 3 + .../testdata/stack-list-with-format.golden | 1 + .../testdata/stack-list-without-format.golden | 2 + .../stack-ps-with-config-format.golden | 1 + .../testdata/stack-ps-with-format.golden | 1 + .../stack-ps-with-no-resolve-option.golden | 1 + .../stack-ps-with-no-trunc-option.golden | 1 + .../stack-ps-with-quiet-option.golden | 1 + .../testdata/stack-ps-without-format.golden | 2 + .../stack-services-with-config-format.golden | 1 + .../stack-services-with-format.golden | 1 + .../stack-services-with-quiet-option.golden | 1 + .../stack-services-without-format.golden | 2 + cli/cli/command/stream.go | 34 + cli/cli/command/swarm/ca.go | 141 + cli/cli/command/swarm/ca_test.go | 300 + cli/cli/command/swarm/client_test.go | 85 + cli/cli/command/swarm/cmd.go | 33 + cli/cli/command/swarm/init.go | 98 + cli/cli/command/swarm/init_test.go | 125 + cli/cli/command/swarm/join.go | 87 + cli/cli/command/swarm/join_test.go | 100 + cli/cli/command/swarm/join_token.go | 119 + cli/cli/command/swarm/join_token_test.go | 211 + cli/cli/command/swarm/leave.go | 43 + cli/cli/command/swarm/leave_test.go | 50 + cli/cli/command/swarm/opts.go | 273 + cli/cli/command/swarm/opts_test.go | 111 + .../command/swarm/progress/root_rotation.go | 120 + .../swarm/testdata/init-init-autolock.golden | 11 + .../command/swarm/testdata/init-init.golden | 4 + .../testdata/jointoken-manager-quiet.golden | 1 + .../testdata/jointoken-manager-rotate.golden | 6 + .../swarm/testdata/jointoken-manager.golden | 4 + .../testdata/jointoken-worker-quiet.golden | 1 + .../swarm/testdata/jointoken-worker.golden | 4 + .../unlockkeys-unlock-key-quiet.golden | 1 + .../unlockkeys-unlock-key-rotate-quiet.golden | 1 + .../unlockkeys-unlock-key-rotate.golden | 9 + .../testdata/unlockkeys-unlock-key.golden | 7 + .../testdata/update-all-flags-quiet.golden | 1 + .../update-autolock-unlock-key.golden | 8 + .../swarm/testdata/update-noargs.golden | 14 + cli/cli/command/swarm/unlock.go | 74 + cli/cli/command/swarm/unlock_key.go | 89 + cli/cli/command/swarm/unlock_key_test.go | 171 + cli/cli/command/swarm/unlock_test.go | 98 + cli/cli/command/swarm/update.go | 71 + cli/cli/command/swarm/update_test.go | 185 + cli/cli/command/system/client_test.go | 23 + cli/cli/command/system/cmd.go | 25 + cli/cli/command/system/df.go | 70 + cli/cli/command/system/events.go | 142 + cli/cli/command/system/info.go | 360 + cli/cli/command/system/info_test.go | 238 + cli/cli/command/system/inspect.go | 218 + cli/cli/command/system/prune.go | 139 + cli/cli/command/system/prune_test.go | 22 + .../testdata/docker-client-version.golden | 44 + .../testdata/docker-info-no-swarm.golden | 51 + .../testdata/docker-info-warnings.golden | 11 + .../testdata/docker-info-with-swarm.golden | 73 + cli/cli/command/system/version.go | 270 + cli/cli/command/system/version_test.go | 113 + cli/cli/command/task/client_test.go | 29 + cli/cli/command/task/print.go | 93 + cli/cli/command/task/print_test.go | 128 + .../task-print-with-global-service.golden | 1 + .../task-print-with-indentation.golden | 3 + .../task-print-with-no-trunc-option.golden | 1 + .../task-print-with-quiet-option.golden | 1 + .../task-print-with-replicated-service.golden | 1 + .../task-print-with-resolution.golden | 1 + cli/cli/command/trust.go | 15 + cli/cli/command/trust/cmd.go | 25 + cli/cli/command/trust/common.go | 167 + cli/cli/command/trust/helpers.go | 47 + cli/cli/command/trust/helpers_test.go | 24 + cli/cli/command/trust/inspect.go | 115 + cli/cli/command/trust/inspect_pretty.go | 90 + cli/cli/command/trust/inspect_pretty_test.go | 442 + cli/cli/command/trust/inspect_test.go | 131 + cli/cli/command/trust/key.go | 22 + cli/cli/command/trust/key_generate.go | 134 + cli/cli/command/trust/key_generate_test.go | 134 + cli/cli/command/trust/key_load.go | 115 + cli/cli/command/trust/key_load_test.go | 253 + cli/cli/command/trust/revoke.go | 125 + cli/cli/command/trust/revoke_test.go | 148 + cli/cli/command/trust/sign.go | 247 + cli/cli/command/trust/sign_test.go | 309 + cli/cli/command/trust/signer.go | 22 + cli/cli/command/trust/signer_add.go | 141 + cli/cli/command/trust/signer_add_test.go | 147 + cli/cli/command/trust/signer_remove.go | 142 + cli/cli/command/trust/signer_remove_test.go | 128 + .../testdata/trust-inspect-empty-repo.golden | 25 + .../trust-inspect-full-repo-no-signers.golden | 33 + ...rust-inspect-full-repo-with-signers.golden | 65 + ...inspect-multiple-repos-with-signers.golden | 128 + .../trust-inspect-one-tag-no-signers.golden | 33 + ...inspect-pretty-full-repo-no-signers.golden | 10 + ...spect-pretty-full-repo-with-signers.golden | 18 + ...t-inspect-pretty-one-tag-no-signers.golden | 10 + ...ct-pretty-unsigned-tag-with-signers.golden | 14 + .../trust-inspect-uninitialized.golden | 1 + ...t-inspect-unsigned-tag-with-signers.golden | 42 + cli/cli/command/utils.go | 127 + cli/cli/command/volume/client_test.go | 54 + cli/cli/command/volume/cmd.go | 26 + cli/cli/command/volume/create.go | 69 + cli/cli/command/volume/create_test.go | 126 + cli/cli/command/volume/inspect.go | 46 + cli/cli/command/volume/inspect_test.go | 141 + cli/cli/command/volume/list.go | 73 + cli/cli/command/volume/list_test.go | 111 + cli/cli/command/volume/prune.go | 78 + cli/cli/command/volume/prune_test.go | 119 + cli/cli/command/volume/remove.go | 69 + cli/cli/command/volume/remove_test.go | 44 + ...e-inspect-with-format.json-template.golden | 1 + ...inspect-with-format.simple-template.golden | 1 + ...-format.multiple-volume-with-labels.golden | 22 + ...nspect-without-format.single-volume.golden | 10 + .../volume-list-with-config-format.golden | 3 + .../testdata/volume-list-with-format.golden | 3 + .../volume-list-without-format.golden | 4 + .../volume/testdata/volume-prune-no.golden | 2 + .../volume/testdata/volume-prune-yes.golden | 7 + .../volume-prune.deletedVolumes.golden | 6 + .../volume/testdata/volume-prune.empty.golden | 1 + cli/cli/compose/convert/compose.go | 159 + cli/cli/compose/convert/compose_test.go | 171 + cli/cli/compose/convert/service.go | 615 + cli/cli/compose/convert/service_test.go | 566 + cli/cli/compose/convert/volume.go | 140 + cli/cli/compose/convert/volume_test.go | 345 + .../compose/interpolation/interpolation.go | 163 + .../interpolation/interpolation_test.go | 147 + cli/cli/compose/loader/example1.env | 8 + cli/cli/compose/loader/example2.env | 4 + cli/cli/compose/loader/full-example.yml | 404 + cli/cli/compose/loader/full-struct_test.go | 889 + cli/cli/compose/loader/interpolate.go | 69 + cli/cli/compose/loader/loader.go | 907 + cli/cli/compose/loader/loader_test.go | 1465 + cli/cli/compose/loader/merge.go | 233 + cli/cli/compose/loader/merge_test.go | 1016 + cli/cli/compose/loader/types_test.go | 26 + cli/cli/compose/loader/volume.go | 122 + cli/cli/compose/loader/volume_test.go | 223 + cli/cli/compose/schema/bindata.go | 520 + .../schema/data/config_schema_v3.0.json | 384 + .../schema/data/config_schema_v3.1.json | 429 + .../schema/data/config_schema_v3.2.json | 476 + .../schema/data/config_schema_v3.3.json | 536 + .../schema/data/config_schema_v3.4.json | 544 + .../schema/data/config_schema_v3.5.json | 573 + .../schema/data/config_schema_v3.6.json | 582 + .../schema/data/config_schema_v3.7.json | 602 + cli/cli/compose/schema/schema.go | 168 + cli/cli/compose/schema/schema_test.go | 238 + cli/cli/compose/template/template.go | 168 + cli/cli/compose/template/template_test.go | 174 + cli/cli/compose/types/types.go | 428 + cli/cli/config/config.go | 120 + cli/cli/config/config_test.go | 550 + cli/cli/config/configfile/file.go | 335 + cli/cli/config/configfile/file_test.go | 415 + cli/cli/config/credentials/credentials.go | 17 + cli/cli/config/credentials/default_store.go | 21 + .../credentials/default_store_darwin.go | 5 + .../config/credentials/default_store_linux.go | 13 + .../credentials/default_store_unsupported.go | 7 + .../credentials/default_store_windows.go | 5 + cli/cli/config/credentials/file_store.go | 64 + cli/cli/config/credentials/file_store_test.go | 136 + cli/cli/config/credentials/native_store.go | 143 + .../config/credentials/native_store_test.go | 277 + cli/cli/debug/debug.go | 26 + cli/cli/debug/debug_test.go | 43 + cli/cli/error.go | 33 + cli/cli/flags/client.go | 12 + cli/cli/flags/common.go | 118 + cli/cli/flags/common_test.go | 43 + cli/cli/manifest/store/store.go | 180 + cli/cli/manifest/store/store_test.go | 136 + cli/cli/manifest/types/types.go | 114 + cli/cli/registry/client/client.go | 183 + cli/cli/registry/client/endpoint.go | 133 + cli/cli/registry/client/fetcher.go | 302 + cli/cli/required.go | 107 + cli/cli/required_test.go | 138 + cli/cli/trust/trust.go | 388 + cli/cli/trust/trust_test.go | 61 + cli/cli/version.go | 10 + cli/cli/winresources/res_windows.go | 18 + cli/cmd/docker/docker.go | 378 + cli/cmd/docker/docker_test.go | 33 + cli/cmd/docker/docker_windows.go | 3 + cli/codecov.yml | 22 + cli/contrib/completion/bash/docker | 5159 ++ cli/contrib/completion/fish/docker.fish | 564 + cli/contrib/completion/powershell/readme.txt | 2 + cli/contrib/completion/zsh/REVIEWERS | 2 + cli/contrib/completion/zsh/_docker | 3032 + cli/docker.Makefile | 123 + cli/dockerfiles/Dockerfile.binary-native | 8 + cli/dockerfiles/Dockerfile.cross | 3 + cli/dockerfiles/Dockerfile.dev | 24 + cli/dockerfiles/Dockerfile.e2e | 34 + cli/dockerfiles/Dockerfile.lint | 24 + cli/dockerfiles/Dockerfile.shellcheck | 9 + cli/docs/README.md | 30 + cli/docs/deprecated.md | 356 + cli/docs/extend/EBS_volume.md | 165 + cli/docs/extend/config.md | 237 + .../extend/images/authz_additional_info.png | Bin 0 -> 45916 bytes cli/docs/extend/images/authz_allow.png | Bin 0 -> 33505 bytes cli/docs/extend/images/authz_chunked.png | Bin 0 -> 33168 bytes .../extend/images/authz_connection_hijack.png | Bin 0 -> 38780 bytes cli/docs/extend/images/authz_deny.png | Bin 0 -> 27099 bytes cli/docs/extend/index.md | 263 + cli/docs/extend/legacy_plugins.md | 104 + cli/docs/extend/plugin_api.md | 195 + cli/docs/extend/plugins_authorization.md | 259 + cli/docs/extend/plugins_graphdriver.md | 403 + cli/docs/extend/plugins_logging.md | 219 + cli/docs/extend/plugins_metrics.md | 84 + cli/docs/extend/plugins_network.md | 78 + cli/docs/extend/plugins_services.md | 185 + cli/docs/extend/plugins_volume.md | 359 + cli/docs/reference/builder.md | 1983 + cli/docs/reference/commandline/attach.md | 160 + cli/docs/reference/commandline/build.md | 611 + cli/docs/reference/commandline/cli.md | 328 + cli/docs/reference/commandline/commit.md | 117 + cli/docs/reference/commandline/container.md | 61 + .../reference/commandline/container_prune.md | 126 + cli/docs/reference/commandline/cp.md | 118 + cli/docs/reference/commandline/create.md | 260 + cli/docs/reference/commandline/deploy.md | 111 + cli/docs/reference/commandline/diff.md | 67 + cli/docs/reference/commandline/dockerd.md | 1514 + cli/docs/reference/commandline/events.md | 413 + cli/docs/reference/commandline/exec.md | 125 + cli/docs/reference/commandline/export.md | 48 + cli/docs/reference/commandline/history.md | 87 + cli/docs/reference/commandline/image.md | 47 + cli/docs/reference/commandline/image_prune.md | 213 + cli/docs/reference/commandline/images.md | 343 + cli/docs/reference/commandline/import.md | 89 + cli/docs/reference/commandline/index.md | 184 + cli/docs/reference/commandline/info.md | 245 + cli/docs/reference/commandline/inspect.md | 122 + cli/docs/reference/commandline/kill.md | 71 + cli/docs/reference/commandline/load.md | 63 + cli/docs/reference/commandline/login.md | 184 + cli/docs/reference/commandline/logout.md | 32 + cli/docs/reference/commandline/logs.md | 85 + cli/docs/reference/commandline/manifest.md | 274 + cli/docs/reference/commandline/network.md | 51 + .../reference/commandline/network_connect.md | 117 + .../reference/commandline/network_create.md | 240 + .../commandline/network_disconnect.md | 48 + .../reference/commandline/network_inspect.md | 307 + cli/docs/reference/commandline/network_ls.md | 250 + .../reference/commandline/network_prune.md | 104 + cli/docs/reference/commandline/network_rm.md | 68 + cli/docs/reference/commandline/node.md | 42 + cli/docs/reference/commandline/node_demote.md | 47 + .../reference/commandline/node_inspect.md | 167 + cli/docs/reference/commandline/node_ls.md | 172 + .../reference/commandline/node_promote.md | 45 + cli/docs/reference/commandline/node_ps.md | 148 + cli/docs/reference/commandline/node_rm.md | 80 + cli/docs/reference/commandline/node_update.md | 77 + cli/docs/reference/commandline/pause.md | 48 + cli/docs/reference/commandline/plugin.md | 44 + .../reference/commandline/plugin_create.md | 66 + .../reference/commandline/plugin_disable.md | 69 + .../reference/commandline/plugin_enable.md | 68 + .../reference/commandline/plugin_inspect.md | 166 + .../reference/commandline/plugin_install.md | 75 + cli/docs/reference/commandline/plugin_ls.md | 118 + cli/docs/reference/commandline/plugin_push.md | 57 + cli/docs/reference/commandline/plugin_rm.md | 63 + cli/docs/reference/commandline/plugin_set.md | 172 + .../reference/commandline/plugin_upgrade.md | 100 + cli/docs/reference/commandline/port.md | 47 + cli/docs/reference/commandline/ps.md | 434 + cli/docs/reference/commandline/pull.md | 254 + cli/docs/reference/commandline/push.md | 82 + cli/docs/reference/commandline/rename.md | 35 + cli/docs/reference/commandline/restart.md | 32 + cli/docs/reference/commandline/rm.md | 100 + cli/docs/reference/commandline/rmi.md | 105 + cli/docs/reference/commandline/run.md | 816 + cli/docs/reference/commandline/save.md | 62 + cli/docs/reference/commandline/search.md | 202 + cli/docs/reference/commandline/secret.md | 45 + .../reference/commandline/secret_create.md | 99 + .../reference/commandline/secret_inspect.md | 95 + cli/docs/reference/commandline/secret_ls.md | 157 + cli/docs/reference/commandline/secret_rm.md | 54 + cli/docs/reference/commandline/service.md | 42 + .../reference/commandline/service_create.md | 964 + .../reference/commandline/service_inspect.md | 171 + .../reference/commandline/service_logs.md | 86 + cli/docs/reference/commandline/service_ls.md | 165 + cli/docs/reference/commandline/service_ps.md | 195 + cli/docs/reference/commandline/service_rm.md | 61 + .../reference/commandline/service_rollback.md | 94 + .../reference/commandline/service_scale.md | 106 + .../reference/commandline/service_update.md | 311 + cli/docs/reference/commandline/stack.md | 41 + .../reference/commandline/stack_deploy.md | 148 + cli/docs/reference/commandline/stack_ls.md | 81 + cli/docs/reference/commandline/stack_ps.md | 233 + cli/docs/reference/commandline/stack_rm.md | 81 + .../reference/commandline/stack_services.md | 122 + cli/docs/reference/commandline/start.md | 34 + cli/docs/reference/commandline/stats.md | 175 + cli/docs/reference/commandline/stop.md | 37 + cli/docs/reference/commandline/swarm.md | 41 + cli/docs/reference/commandline/swarm_ca.md | 122 + cli/docs/reference/commandline/swarm_init.md | 168 + cli/docs/reference/commandline/swarm_join.md | 133 + .../reference/commandline/swarm_join_token.md | 115 + cli/docs/reference/commandline/swarm_leave.md | 72 + .../reference/commandline/swarm_unlock.md | 49 + .../reference/commandline/swarm_unlock_key.md | 92 + .../reference/commandline/swarm_update.md | 52 + cli/docs/reference/commandline/system.md | 37 + cli/docs/reference/commandline/system_df.md | 140 + .../reference/commandline/system_events.md | 345 + .../reference/commandline/system_prune.md | 155 + cli/docs/reference/commandline/tag.md | 84 + cli/docs/reference/commandline/top.md | 25 + .../reference/commandline/trust_inspect.md | 470 + .../commandline/trust_key_generate.md | 67 + .../reference/commandline/trust_key_load.md | 57 + .../reference/commandline/trust_revoke.md | 130 + cli/docs/reference/commandline/trust_sign.md | 184 + .../reference/commandline/trust_signer_add.md | 211 + .../commandline/trust_signer_remove.md | 172 + cli/docs/reference/commandline/unpause.md | 45 + cli/docs/reference/commandline/update.md | 127 + cli/docs/reference/commandline/version.md | 75 + cli/docs/reference/commandline/volume.md | 48 + .../reference/commandline/volume_create.md | 125 + .../reference/commandline/volume_inspect.md | 61 + cli/docs/reference/commandline/volume_ls.md | 199 + .../reference/commandline/volume_prune.md | 73 + cli/docs/reference/commandline/volume_rm.md | 48 + cli/docs/reference/commandline/wait.md | 58 + cli/docs/reference/glossary.md | 373 + cli/docs/reference/index.md | 20 + cli/docs/reference/run.md | 1615 + cli/docs/yaml/Dockerfile | 4 + cli/docs/yaml/generate.go | 86 + cli/docs/yaml/yaml.go | 270 + cli/e2e/compose-env.yaml | 18 + cli/e2e/container/attach_test.go | 31 + cli/e2e/container/create_test.go | 35 + cli/e2e/container/kill_test.go | 50 + cli/e2e/container/main_test.go | 17 + cli/e2e/container/run_test.go | 61 + ...run-attached-from-remote-and-remove.golden | 4 + cli/e2e/image/build_test.go | 110 + cli/e2e/image/main_test.go | 17 + cli/e2e/image/pull_test.go | 71 + cli/e2e/image/push_test.go | 392 + cli/e2e/image/testdata/notary/delgkey1.crt | 21 + cli/e2e/image/testdata/notary/delgkey1.key | 27 + cli/e2e/image/testdata/notary/delgkey2.crt | 21 + cli/e2e/image/testdata/notary/delgkey2.key | 27 + cli/e2e/image/testdata/notary/delgkey3.crt | 21 + cli/e2e/image/testdata/notary/delgkey3.key | 27 + cli/e2e/image/testdata/notary/delgkey4.crt | 21 + cli/e2e/image/testdata/notary/delgkey4.key | 27 + cli/e2e/image/testdata/notary/gen.sh | 18 + cli/e2e/image/testdata/notary/localhost.cert | 19 + cli/e2e/image/testdata/notary/localhost.key | 27 + .../pull-with-content-trust-err.golden | 1 + .../testdata/pull-with-content-trust.golden | 4 + .../push-with-content-trust-err.golden | 0 cli/e2e/internal/fixtures/fixtures.go | 126 + cli/e2e/plugin/basic/basic.go | 34 + cli/e2e/plugin/main_test.go | 17 + cli/e2e/plugin/trust_test.go | 114 + cli/e2e/stack/deploy_test.go | 44 + cli/e2e/stack/help_test.go | 24 + cli/e2e/stack/main_test.go | 17 + cli/e2e/stack/remove_test.go | 85 + cli/e2e/stack/testdata/data | 1 + cli/e2e/stack/testdata/full-stack.yml | 9 + .../stack-deploy-help-kubernetes.golden | 14 + .../testdata/stack-deploy-help-swarm.golden | 19 + .../stack-deploy-with-names-kubernetes.golden | 7 + .../stack-deploy-with-names-swarm.golden | 7 + .../testdata/stack-deploy-with-names.golden | 7 + .../stack-remove-kubernetes-success.golden | 1 + .../stack-remove-swarm-success.golden | 3 + .../testdata/stack-with-named-resources.yml | 30 + cli/e2e/system/inspect_test.go | 18 + cli/e2e/system/main_test.go | 17 + cli/e2e/testdata/Dockerfile.notary-server | 4 + cli/e2e/testdata/notary/notary-config.json | 19 + cli/e2e/testdata/notary/notary-server.cert | 64 + cli/e2e/testdata/notary/notary-server.key | 28 + cli/e2e/testdata/notary/root-ca.cert | 32 + cli/e2e/trust/main_test.go | 17 + cli/e2e/trust/revoke_test.go | 71 + cli/e2e/trust/sign_test.go | 62 + cli/experimental/README.md | 53 + cli/experimental/checkpoint-restore.md | 88 + cli/experimental/docker-stacks-and-bundles.md | 202 + cli/experimental/images/ipvlan-l3.gliffy | 1 + cli/experimental/images/ipvlan-l3.png | Bin 0 -> 18260 bytes cli/experimental/images/ipvlan-l3.svg | 1 + .../images/ipvlan_l2_simple.gliffy | 1 + cli/experimental/images/ipvlan_l2_simple.png | Bin 0 -> 20145 bytes cli/experimental/images/ipvlan_l2_simple.svg | 1 + .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 + .../images/macvlan-bridge-ipvlan-l2.png | Bin 0 -> 14527 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 + .../images/multi_tenant_8021q_vlans.gliffy | 1 + .../images/multi_tenant_8021q_vlans.png | Bin 0 -> 17879 bytes .../images/multi_tenant_8021q_vlans.svg | 1 + .../images/vlans-deeper-look.gliffy | 1 + cli/experimental/images/vlans-deeper-look.png | Bin 0 -> 38837 bytes cli/experimental/images/vlans-deeper-look.svg | 1 + cli/experimental/vlan-networks.md | 475 + cli/gometalinter.json | 42 + cli/internal/test/builders/config.go | 68 + cli/internal/test/builders/container.go | 79 + cli/internal/test/builders/doc.go | 3 + cli/internal/test/builders/network.go | 45 + cli/internal/test/builders/node.go | 134 + cli/internal/test/builders/secret.go | 70 + cli/internal/test/builders/service.go | 74 + cli/internal/test/builders/swarm.go | 39 + cli/internal/test/builders/task.go | 160 + cli/internal/test/builders/volume.go | 43 + cli/internal/test/cli.go | 169 + cli/internal/test/doc.go | 5 + cli/internal/test/environment/testenv.go | 78 + cli/internal/test/network/client.go | 57 + cli/internal/test/notary/client.go | 543 + cli/internal/test/output/output.go | 65 + cli/internal/test/store.go | 79 + cli/kubernetes/README.md | 4 + cli/kubernetes/check.go | 55 + cli/kubernetes/check_test.go | 51 + cli/kubernetes/client/clientset/clientset.go | 96 + .../client/clientset/scheme/register.go | 41 + .../typed/compose/v1beta1/compose_client.go | 74 + .../clientset/typed/compose/v1beta1/stack.go | 157 + .../typed/compose/v1beta2/compose_client.go | 74 + .../clientset/typed/compose/v1beta2/stack.go | 155 + .../client/informers/compose/interface.go | 25 + .../informers/compose/v1beta2/interface.go | 25 + .../client/informers/compose/v1beta2/stack.go | 51 + cli/kubernetes/client/informers/factory.go | 101 + cli/kubernetes/client/informers/generic.go | 44 + .../internalinterfaces/factory_interfaces.go | 18 + .../compose/v1beta2/expansion_generated.go | 9 + .../client/listers/compose/v1beta2/stack.go | 78 + cli/kubernetes/compose/clone/maps.go | 25 + cli/kubernetes/compose/clone/slices.go | 11 + cli/kubernetes/compose/doc.go | 5 + .../impersonation/impersonationconfig.go | 26 + cli/kubernetes/compose/v1beta1/doc.go | 11 + cli/kubernetes/compose/v1beta1/owner.go | 31 + cli/kubernetes/compose/v1beta1/parsing.go | 4 + cli/kubernetes/compose/v1beta1/register.go | 39 + cli/kubernetes/compose/v1beta1/stack.go | 87 + cli/kubernetes/compose/v1beta1/stack_test.go | 1 + .../v1beta2/composefile_stack_types.go | 26 + cli/kubernetes/compose/v1beta2/doc.go | 6 + cli/kubernetes/compose/v1beta2/owner.go | 30 + cli/kubernetes/compose/v1beta2/register.go | 42 + cli/kubernetes/compose/v1beta2/scale.go | 29 + cli/kubernetes/compose/v1beta2/stack.go | 256 + cli/kubernetes/config.go | 26 + cli/kubernetes/doc.go | 4 + cli/kubernetes/labels/labels.go | 45 + cli/kubernetes/labels/labels_test.go | 23 + cli/man/Dockerfile.5.md | 474 + cli/man/README.md | 15 + cli/man/docker-build.1.md | 359 + cli/man/docker-config-json.5.md | 72 + cli/man/docker-run.1.md | 1126 + cli/man/docker.1.md | 70 + cli/man/dockerd.8.md | 839 + cli/man/generate.go | 108 + cli/man/import.go | 7 + cli/man/md2man-all.sh | 22 + cli/man/src/attach.md | 2 + cli/man/src/commit.md | 1 + cli/man/src/container/attach.md | 66 + cli/man/src/container/commit.md | 30 + cli/man/src/container/cp.md | 145 + cli/man/src/container/create-example.md | 35 + cli/man/src/container/create.md | 88 + cli/man/src/container/diff.md | 39 + cli/man/src/container/exec.md | 25 + cli/man/src/container/export.md | 20 + cli/man/src/container/kill.md | 2 + cli/man/src/container/logs.md | 40 + cli/man/src/container/ls.md | 117 + cli/man/src/container/pause.md | 12 + cli/man/src/container/port.md | 26 + cli/man/src/container/rename.md | 1 + cli/man/src/container/restart.md | 1 + cli/man/src/container/rm.md | 37 + cli/man/src/container/run.md | 1 + cli/man/src/container/start.md | 1 + cli/man/src/container/stats.md | 43 + cli/man/src/container/stop.md | 1 + cli/man/src/container/top.md | 11 + cli/man/src/container/unpause.md | 6 + cli/man/src/container/update.md | 102 + cli/man/src/container/wait.md | 8 + cli/man/src/cp.md | 1 + cli/man/src/create.md | 1 + cli/man/src/diff.md | 1 + cli/man/src/events.md | 1 + cli/man/src/exec.md | 1 + cli/man/src/export.md | 1 + cli/man/src/history.md | 1 + cli/man/src/image/build.md | 1 + cli/man/src/image/history.md | 54 + cli/man/src/image/import.md | 42 + cli/man/src/image/load.md | 25 + cli/man/src/image/ls.md | 118 + cli/man/src/image/pull.md | 189 + cli/man/src/image/push.md | 34 + cli/man/src/image/rm.md | 11 + cli/man/src/image/save.md | 19 + cli/man/src/image/tag.md | 54 + cli/man/src/images.md | 1 + cli/man/src/import.md | 1 + cli/man/src/info.md | 1 + cli/man/src/inspect.md | 286 + cli/man/src/kill.md | 1 + cli/man/src/load.md | 1 + cli/man/src/login.md | 22 + cli/man/src/logout.md | 13 + cli/man/src/logs.md | 1 + cli/man/src/network/connect.md | 39 + cli/man/src/network/create.md | 177 + cli/man/src/network/disconnect.md | 5 + cli/man/src/network/inspect.md | 183 + cli/man/src/network/ls.md | 182 + cli/man/src/network/rm.md | 20 + cli/man/src/pause.md | 1 + cli/man/src/plugin/ls.md | 43 + cli/man/src/port.md | 1 + cli/man/src/ps.md | 1 + cli/man/src/pull.md | 1 + cli/man/src/push.md | 1 + cli/man/src/rename.md | 1 + cli/man/src/restart.md | 1 + cli/man/src/rm.md | 1 + cli/man/src/rmi.md | 1 + cli/man/src/save.md | 1 + cli/man/src/search.md | 36 + cli/man/src/start.md | 1 + cli/man/src/stats.md | 1 + cli/man/src/stop.md | 1 + cli/man/src/system/events.md | 134 + cli/man/src/system/info.md | 163 + cli/man/src/tag.md | 1 + cli/man/src/top.md | 1 + cli/man/src/unpause.md | 1 + cli/man/src/update.md | 1 + cli/man/src/version.md | 37 + cli/man/src/volume.md | 14 + cli/man/src/volume/create.md | 35 + cli/man/src/volume/inspect.md | 4 + cli/man/src/volume/ls.md | 11 + cli/man/src/wait.md | 1 + cli/opts/config.go | 98 + cli/opts/duration.go | 64 + cli/opts/duration_test.go | 30 + cli/opts/env.go | 46 + cli/opts/env_test.go | 42 + cli/opts/envfile.go | 22 + cli/opts/envfile_test.go | 141 + cli/opts/file.go | 71 + cli/opts/hosts.go | 165 + cli/opts/hosts_test.go | 181 + cli/opts/hosts_unix.go | 8 + cli/opts/hosts_windows.go | 6 + cli/opts/ip.go | 47 + cli/opts/ip_test.go | 54 + cli/opts/mount.go | 174 + cli/opts/mount_test.go | 185 + cli/opts/network.go | 106 + cli/opts/network_test.go | 100 + cli/opts/opts.go | 524 + cli/opts/opts_test.go | 308 + cli/opts/opts_unix.go | 6 + cli/opts/opts_windows.go | 56 + cli/opts/parse.go | 99 + cli/opts/port.go | 167 + cli/opts/port_test.go | 296 + cli/opts/quotedstring.go | 37 + cli/opts/quotedstring_test.go | 30 + cli/opts/runtime.go | 79 + cli/opts/secret.go | 98 + cli/opts/secret_test.go | 80 + cli/opts/throttledevice.go | 108 + cli/opts/ulimit.go | 57 + cli/opts/ulimit_test.go | 42 + cli/opts/weightdevice.go | 84 + cli/poule.yml | 41 + cli/scripts/build/.variables | 26 + cli/scripts/build/binary | 14 + cli/scripts/build/cross | 33 + cli/scripts/build/dynbinary | 14 + cli/scripts/build/osx | 21 + cli/scripts/build/windows | 23 + cli/scripts/docs/generate-authors.sh | 15 + cli/scripts/docs/generate-man.sh | 14 + cli/scripts/docs/generate-yaml.sh | 8 + cli/scripts/gen/windows-resources | 44 + cli/scripts/make.ps1 | 172 + cli/scripts/test/e2e/entry | 11 + cli/scripts/test/e2e/load-image | 53 + cli/scripts/test/e2e/run | 104 + cli/scripts/test/e2e/wait-on-daemon | 9 + cli/scripts/test/e2e/wrapper | 17 + cli/scripts/test/unit | 4 + cli/scripts/test/unit-with-coverage | 19 + cli/scripts/validate/check-git-diff | 17 + cli/scripts/validate/shellcheck | 5 + cli/scripts/warn-outside-container | 18 + cli/scripts/winresources/common.rc | 38 + cli/scripts/winresources/docker.exe.manifest | 18 + cli/scripts/winresources/docker.ico | Bin 0 -> 370070 bytes cli/scripts/winresources/docker.png | Bin 0 -> 658195 bytes cli/scripts/winresources/docker.rc | 3 + cli/service/logs/parse_logs.go | 39 + cli/service/logs/parse_logs_test.go | 34 + cli/templates/templates.go | 84 + cli/templates/templates_test.go | 89 + cli/vendor.conf | 96 + cli/vendor/github.com/Nvveen/Gotty/LICENSE | 26 + cli/vendor/github.com/Nvveen/Gotty/README | 5 + .../github.com/Nvveen/Gotty/attributes.go | 514 + cli/vendor/github.com/Nvveen/Gotty/gotty.go | 244 + cli/vendor/github.com/Nvveen/Gotty/parser.go | 362 + cli/vendor/github.com/Nvveen/Gotty/types.go | 23 + .../github.com/containerd/continuity/LICENSE | 202 + .../containerd/continuity/README.md | 74 + .../continuity/pathdriver/path_driver.go | 85 + .../continuity/syscallx/syscall_unix.go | 10 + .../continuity/syscallx/syscall_windows.go | 96 + .../containerd/continuity/sysx/asm.s | 10 + .../continuity/sysx/chmod_darwin.go | 18 + .../continuity/sysx/chmod_darwin_386.go | 25 + .../continuity/sysx/chmod_darwin_amd64.go | 25 + .../continuity/sysx/chmod_freebsd.go | 17 + .../continuity/sysx/chmod_freebsd_amd64.go | 25 + .../containerd/continuity/sysx/chmod_linux.go | 12 + .../continuity/sysx/chmod_solaris.go | 11 + .../containerd/continuity/sysx/file_posix.go | 112 + .../continuity/sysx/nodata_linux.go | 7 + .../continuity/sysx/nodata_solaris.go | 8 + .../containerd/continuity/sysx/nodata_unix.go | 9 + .../containerd/continuity/sysx/sys.go | 37 + .../containerd/continuity/sysx/xattr.go | 67 + .../continuity/sysx/xattr_darwin.go | 71 + .../continuity/sysx/xattr_darwin_386.go | 111 + .../continuity/sysx/xattr_darwin_amd64.go | 111 + .../continuity/sysx/xattr_freebsd.go | 12 + .../containerd/continuity/sysx/xattr_linux.go | 44 + .../continuity/sysx/xattr_openbsd.go | 7 + .../continuity/sysx/xattr_solaris.go | 12 + .../continuity/sysx/xattr_unsupported.go | 44 + .../containerd/continuity/vendor.conf | 13 + .../gregjones/httpcache/LICENSE.txt | 7 + .../github.com/gregjones/httpcache/README.md | 24 + .../httpcache/diskcache/diskcache.go | 61 + .../gregjones/httpcache/httpcache.go | 557 + .../grpc-ecosystem/grpc-opentracing/LICENSE | 27 + .../grpc-ecosystem/grpc-opentracing/PATENTS | 23 + .../grpc-opentracing/README.rst | 25 + .../grpc-opentracing/go/otgrpc/README.md | 57 + .../grpc-opentracing/go/otgrpc/client.go | 239 + .../grpc-opentracing/go/otgrpc/errors.go | 69 + .../grpc-opentracing/go/otgrpc/options.go | 76 + .../grpc-opentracing/go/otgrpc/package.go | 5 + .../grpc-opentracing/go/otgrpc/server.go | 141 + .../grpc-opentracing/go/otgrpc/shared.go | 42 + .../grpc-opentracing/python/README.md | 4 + .../python/examples/protos/command_line.proto | 15 + .../python/examples/protos/store.proto | 37 + .../github.com/howeyc/gopass/LICENSE.txt | 15 + cli/vendor/github.com/howeyc/gopass/README.md | 27 + cli/vendor/github.com/howeyc/gopass/pass.go | 91 + .../github.com/howeyc/gopass/terminal.go | 25 + cli/vendor/github.com/moby/buildkit/LICENSE | 201 + cli/vendor/github.com/moby/buildkit/README.md | 274 + .../api/services/control/control.pb.go | 4589 ++ .../api/services/control/control.proto | 121 + .../buildkit/api/services/control/generate.go | 3 + .../moby/buildkit/api/types/generate.go | 3 + .../moby/buildkit/api/types/worker.pb.go | 523 + .../moby/buildkit/api/types/worker.proto | 16 + .../github.com/moby/buildkit/client/client.go | 132 + .../moby/buildkit/client/client_unix.go | 19 + .../moby/buildkit/client/client_windows.go | 24 + .../moby/buildkit/client/diskusage.go | 73 + .../moby/buildkit/client/exporters.go | 8 + .../github.com/moby/buildkit/client/graph.go | 45 + .../moby/buildkit/client/llb/exec.go | 409 + .../moby/buildkit/client/llb/marshal.go | 112 + .../moby/buildkit/client/llb/meta.go | 170 + .../moby/buildkit/client/llb/resolver.go | 18 + .../moby/buildkit/client/llb/source.go | 363 + .../moby/buildkit/client/llb/state.go | 396 + .../github.com/moby/buildkit/client/prune.go | 50 + .../github.com/moby/buildkit/client/solve.go | 251 + .../moby/buildkit/client/workers.go | 53 + .../moby/buildkit/identity/randomid.go | 53 + .../moby/buildkit/session/auth/auth.go | 26 + .../moby/buildkit/session/auth/auth.pb.go | 673 + .../moby/buildkit/session/auth/auth.proto | 19 + .../session/auth/authprovider/authprovider.go | 44 + .../moby/buildkit/session/auth/generate.go | 3 + .../moby/buildkit/session/context.go | 22 + .../buildkit/session/filesync/diffcopy.go | 114 + .../buildkit/session/filesync/filesync.go | 289 + .../buildkit/session/filesync/filesync.pb.go | 644 + .../buildkit/session/filesync/filesync.proto | 20 + .../buildkit/session/filesync/generate.go | 3 + .../github.com/moby/buildkit/session/grpc.go | 81 + .../moby/buildkit/session/grpchijack/dial.go | 156 + .../buildkit/session/grpchijack/hijack.go | 14 + .../moby/buildkit/session/manager.go | 218 + .../moby/buildkit/session/session.go | 143 + .../moby/buildkit/solver/pb/attr.go | 17 + .../moby/buildkit/solver/pb/const.go | 12 + .../moby/buildkit/solver/pb/generate.go | 3 + .../moby/buildkit/solver/pb/ops.pb.go | 4960 ++ .../moby/buildkit/solver/pb/ops.proto | 165 + .../moby/buildkit/solver/pb/platform.go | 41 + .../buildkit/util/appcontext/appcontext.go | 41 + .../util/appcontext/appcontext_unix.go | 11 + .../util/appcontext/appcontext_windows.go | 7 + .../util/appdefaults/appdefaults_unix.go | 55 + .../util/appdefaults/appdefaults_windows.go | 18 + .../util/progress/progressui/display.go | 427 + .../util/progress/progressui/printer.go | 248 + .../moby/buildkit/util/system/path_unix.go | 14 + .../moby/buildkit/util/system/path_windows.go | 37 + .../buildkit/util/system/seccomp_linux.go | 29 + .../buildkit/util/system/seccomp_nolinux.go | 7 + .../buildkit/util/system/seccomp_noseccomp.go | 7 + .../github.com/moby/buildkit/vendor.conf | 69 + cli/vendor/github.com/morikuni/aec/LICENSE | 21 + cli/vendor/github.com/morikuni/aec/README.md | 178 + cli/vendor/github.com/morikuni/aec/aec.go | 137 + cli/vendor/github.com/morikuni/aec/ansi.go | 59 + cli/vendor/github.com/morikuni/aec/builder.go | 388 + cli/vendor/github.com/morikuni/aec/sgr.go | 202 + .../github.com/tonistiigi/fsutil/LICENSE | 22 + .../tonistiigi/fsutil/chtimes_linux.go | 20 + .../tonistiigi/fsutil/chtimes_nolinux.go | 20 + .../github.com/tonistiigi/fsutil/diff.go | 43 + .../tonistiigi/fsutil/diff_containerd.go | 199 + .../fsutil/diff_containerd_linux.go | 37 + .../tonistiigi/fsutil/diskwriter.go | 340 + .../tonistiigi/fsutil/diskwriter_unix.go | 51 + .../tonistiigi/fsutil/diskwriter_windows.go | 17 + .../tonistiigi/fsutil/followlinks.go | 150 + .../github.com/tonistiigi/fsutil/generate.go | 3 + .../github.com/tonistiigi/fsutil/hardlinks.go | 46 + .../github.com/tonistiigi/fsutil/readme.md | 45 + .../github.com/tonistiigi/fsutil/receive.go | 269 + .../github.com/tonistiigi/fsutil/send.go | 208 + .../github.com/tonistiigi/fsutil/stat.pb.go | 931 + .../github.com/tonistiigi/fsutil/stat.proto | 17 + .../github.com/tonistiigi/fsutil/validator.go | 92 + .../github.com/tonistiigi/fsutil/walker.go | 246 + .../tonistiigi/fsutil/walker_unix.go | 61 + .../tonistiigi/fsutil/walker_windows.go | 14 + .../github.com/tonistiigi/fsutil/wire.pb.go | 567 + .../github.com/tonistiigi/fsutil/wire.proto | 19 + .../github.com/tonistiigi/units/LICENSE | 21 + .../github.com/tonistiigi/units/bytes.go | 125 + .../github.com/tonistiigi/units/readme.md | 29 + cli/vendor/k8s.io/api/LICENSE | 202 + cli/vendor/k8s.io/api/README.md | 1 + .../api/admissionregistration/v1alpha1/doc.go | 25 + .../v1alpha1/generated.pb.go | 2187 + .../v1alpha1/generated.proto | 196 + .../v1alpha1/register.go | 53 + .../admissionregistration/v1alpha1/types.go | 219 + .../v1alpha1/types_swagger_doc_generated.go | 132 + .../v1alpha1/zz_generated.deepcopy.go | 363 + cli/vendor/k8s.io/api/apps/v1beta1/doc.go | 20 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 4964 ++ .../k8s.io/api/apps/v1beta1/generated.proto | 457 + .../k8s.io/api/apps/v1beta1/register.go | 58 + cli/vendor/k8s.io/api/apps/v1beta1/types.go | 542 + .../v1beta1/types_swagger_doc_generated.go | 259 + .../api/apps/v1beta1/zz_generated.deepcopy.go | 737 + cli/vendor/k8s.io/api/apps/v1beta2/doc.go | 20 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 6934 +++ .../k8s.io/api/apps/v1beta2/generated.proto | 688 + .../k8s.io/api/apps/v1beta2/register.go | 61 + cli/vendor/k8s.io/api/apps/v1beta2/types.go | 820 + .../v1beta2/types_swagger_doc_generated.go | 368 + .../api/apps/v1beta2/zz_generated.deepcopy.go | 1017 + .../k8s.io/api/authentication/v1/doc.go | 20 + .../api/authentication/v1/generated.pb.go | 1302 + .../api/authentication/v1/generated.proto | 99 + .../k8s.io/api/authentication/v1/register.go | 51 + .../k8s.io/api/authentication/v1/types.go | 107 + .../v1/types_swagger_doc_generated.go | 72 + .../v1/zz_generated.deepcopy.go | 147 + .../k8s.io/api/authentication/v1beta1/doc.go | 20 + .../authentication/v1beta1/generated.pb.go | 1302 + .../authentication/v1beta1/generated.proto | 99 + .../api/authentication/v1beta1/register.go | 51 + .../api/authentication/v1beta1/types.go | 92 + .../v1beta1/types_swagger_doc_generated.go | 72 + .../v1beta1/zz_generated.deepcopy.go | 147 + cli/vendor/k8s.io/api/authorization/v1/doc.go | 21 + .../api/authorization/v1/generated.pb.go | 3498 ++ .../api/authorization/v1/generated.proto | 265 + .../k8s.io/api/authorization/v1/register.go | 55 + .../k8s.io/api/authorization/v1/types.go | 261 + .../v1/types_swagger_doc_generated.go | 172 + .../authorization/v1/zz_generated.deepcopy.go | 445 + .../k8s.io/api/authorization/v1beta1/doc.go | 21 + .../api/authorization/v1beta1/generated.pb.go | 3498 ++ .../api/authorization/v1beta1/generated.proto | 265 + .../api/authorization/v1beta1/register.go | 55 + .../k8s.io/api/authorization/v1beta1/types.go | 261 + .../v1beta1/types_swagger_doc_generated.go | 172 + .../v1beta1/zz_generated.deepcopy.go | 445 + cli/vendor/k8s.io/api/autoscaling/v1/doc.go | 20 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 3786 ++ .../k8s.io/api/autoscaling/v1/generated.proto | 321 + .../k8s.io/api/autoscaling/v1/register.go | 53 + cli/vendor/k8s.io/api/autoscaling/v1/types.go | 337 + .../v1/types_swagger_doc_generated.go | 218 + .../autoscaling/v1/zz_generated.deepcopy.go | 561 + .../k8s.io/api/autoscaling/v2beta1/doc.go | 20 + .../api/autoscaling/v2beta1/generated.pb.go | 3401 ++ .../api/autoscaling/v2beta1/generated.proto | 301 + .../api/autoscaling/v2beta1/register.go | 52 + .../k8s.io/api/autoscaling/v2beta1/types.go | 312 + .../v2beta1/types_swagger_doc_generated.go | 189 + .../v2beta1/zz_generated.deepcopy.go | 491 + cli/vendor/k8s.io/api/batch/v1/doc.go | 20 + .../k8s.io/api/batch/v1/generated.pb.go | 1615 + .../k8s.io/api/batch/v1/generated.proto | 173 + cli/vendor/k8s.io/api/batch/v1/register.go | 52 + cli/vendor/k8s.io/api/batch/v1/types.go | 181 + .../batch/v1/types_swagger_doc_generated.go | 94 + .../api/batch/v1/zz_generated.deepcopy.go | 254 + cli/vendor/k8s.io/api/batch/v1beta1/doc.go | 20 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 1509 + .../k8s.io/api/batch/v1beta1/generated.proto | 135 + .../k8s.io/api/batch/v1beta1/register.go | 53 + cli/vendor/k8s.io/api/batch/v1beta1/types.go | 155 + .../v1beta1/types_swagger_doc_generated.go | 96 + .../batch/v1beta1/zz_generated.deepcopy.go | 258 + cli/vendor/k8s.io/api/batch/v2alpha1/doc.go | 20 + .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1510 + .../k8s.io/api/batch/v2alpha1/generated.proto | 133 + .../k8s.io/api/batch/v2alpha1/register.go | 53 + cli/vendor/k8s.io/api/batch/v2alpha1/types.go | 153 + .../v2alpha1/types_swagger_doc_generated.go | 96 + .../batch/v2alpha1/zz_generated.deepcopy.go | 258 + .../k8s.io/api/certificates/v1beta1/doc.go | 21 + .../api/certificates/v1beta1/generated.pb.go | 1693 + .../api/certificates/v1beta1/generated.proto | 122 + .../api/certificates/v1beta1/register.go | 59 + .../k8s.io/api/certificates/v1beta1/types.go | 155 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.deepcopy.go | 207 + .../api/core/v1/annotation_key_constants.go | 94 + cli/vendor/k8s.io/api/core/v1/doc.go | 21 + cli/vendor/k8s.io/api/core/v1/generated.pb.go | 47534 ++++++++++++++++ cli/vendor/k8s.io/api/core/v1/generated.proto | 4337 ++ cli/vendor/k8s.io/api/core/v1/meta.go | 108 + .../k8s.io/api/core/v1/objectreference.go | 33 + cli/vendor/k8s.io/api/core/v1/register.go | 102 + cli/vendor/k8s.io/api/core/v1/resource.go | 63 + cli/vendor/k8s.io/api/core/v1/taint.go | 33 + cli/vendor/k8s.io/api/core/v1/toleration.go | 56 + cli/vendor/k8s.io/api/core/v1/types.go | 4932 ++ .../core/v1/types_swagger_doc_generated.go | 2133 + .../api/core/v1/zz_generated.deepcopy.go | 6351 +++ .../k8s.io/api/extensions/v1beta1/doc.go | 20 + .../api/extensions/v1beta1/generated.pb.go | 12890 +++++ .../api/extensions/v1beta1/generated.proto | 1124 + .../k8s.io/api/extensions/v1beta1/register.go | 70 + .../k8s.io/api/extensions/v1beta1/types.go | 1315 + .../v1beta1/types_swagger_doc_generated.go | 667 + .../v1beta1/zz_generated.deepcopy.go | 1965 + cli/vendor/k8s.io/api/networking/v1/doc.go | 20 + .../k8s.io/api/networking/v1/generated.pb.go | 1869 + .../k8s.io/api/networking/v1/generated.proto | 190 + .../k8s.io/api/networking/v1/register.go | 53 + cli/vendor/k8s.io/api/networking/v1/types.go | 196 + .../v1/types_swagger_doc_generated.go | 113 + .../networking/v1/zz_generated.deepcopy.go | 331 + cli/vendor/k8s.io/api/policy/v1beta1/doc.go | 23 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 1454 + .../k8s.io/api/policy/v1beta1/generated.proto | 114 + .../k8s.io/api/policy/v1beta1/register.go | 54 + cli/vendor/k8s.io/api/policy/v1beta1/types.go | 115 + .../v1beta1/types_swagger_doc_generated.go | 83 + .../policy/v1beta1/zz_generated.deepcopy.go | 227 + cli/vendor/k8s.io/api/rbac/v1/doc.go | 21 + cli/vendor/k8s.io/api/rbac/v1/generated.pb.go | 2555 + cli/vendor/k8s.io/api/rbac/v1/generated.proto | 182 + cli/vendor/k8s.io/api/rbac/v1/register.go | 58 + cli/vendor/k8s.io/api/rbac/v1/types.go | 219 + .../rbac/v1/types_swagger_doc_generated.go | 148 + .../api/rbac/v1/zz_generated.deepcopy.go | 427 + cli/vendor/k8s.io/api/rbac/v1alpha1/doc.go | 21 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 2556 + .../k8s.io/api/rbac/v1alpha1/generated.proto | 184 + .../k8s.io/api/rbac/v1alpha1/register.go | 58 + cli/vendor/k8s.io/api/rbac/v1alpha1/types.go | 221 + .../v1alpha1/types_swagger_doc_generated.go | 148 + .../rbac/v1alpha1/zz_generated.deepcopy.go | 427 + cli/vendor/k8s.io/api/rbac/v1beta1/doc.go | 21 + .../k8s.io/api/rbac/v1beta1/generated.pb.go | 2555 + .../k8s.io/api/rbac/v1beta1/generated.proto | 182 + .../k8s.io/api/rbac/v1beta1/register.go | 58 + cli/vendor/k8s.io/api/rbac/v1beta1/types.go | 219 + .../v1beta1/types_swagger_doc_generated.go | 148 + .../api/rbac/v1beta1/zz_generated.deepcopy.go | 427 + .../k8s.io/api/scheduling/v1alpha1/doc.go | 21 + .../api/scheduling/v1alpha1/generated.pb.go | 641 + .../api/scheduling/v1alpha1/generated.proto | 65 + .../api/scheduling/v1alpha1/register.go | 52 + .../k8s.io/api/scheduling/v1alpha1/types.go | 63 + .../v1alpha1/types_swagger_doc_generated.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 109 + .../k8s.io/api/settings/v1alpha1/doc.go | 21 + .../api/settings/v1alpha1/generated.pb.go | 930 + .../api/settings/v1alpha1/generated.proto | 76 + .../k8s.io/api/settings/v1alpha1/register.go | 52 + .../k8s.io/api/settings/v1alpha1/types.go | 70 + .../v1alpha1/types_swagger_doc_generated.go | 61 + .../v1alpha1/zz_generated.deepcopy.go | 160 + cli/vendor/k8s.io/api/storage/v1/doc.go | 20 + .../k8s.io/api/storage/v1/generated.pb.go | 884 + .../k8s.io/api/storage/v1/generated.proto | 78 + cli/vendor/k8s.io/api/storage/v1/register.go | 53 + cli/vendor/k8s.io/api/storage/v1/types.go | 76 + .../storage/v1/types_swagger_doc_generated.go | 54 + .../api/storage/v1/zz_generated.deepcopy.go | 140 + cli/vendor/k8s.io/api/storage/v1beta1/doc.go | 20 + .../api/storage/v1beta1/generated.pb.go | 883 + .../api/storage/v1beta1/generated.proto | 78 + .../k8s.io/api/storage/v1beta1/register.go | 53 + .../k8s.io/api/storage/v1beta1/types.go | 76 + .../v1beta1/types_swagger_doc_generated.go | 54 + .../storage/v1beta1/zz_generated.deepcopy.go | 140 + cli/vendor/k8s.io/apimachinery/LICENSE | 202 + cli/vendor/k8s.io/apimachinery/README.md | 29 + .../apimachinery/pkg/api/equality/semantic.go | 49 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 + .../apimachinery/pkg/api/errors/errors.go | 545 + .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 + .../apimachinery/pkg/api/meta/errors.go | 105 + .../pkg/api/meta/firsthit_restmapper.go | 97 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 205 + .../apimachinery/pkg/api/meta/interfaces.go | 146 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 636 + .../pkg/api/meta/multirestmapper.go | 210 + .../apimachinery/pkg/api/meta/priority.go | 222 + .../apimachinery/pkg/api/meta/restmapper.go | 548 + .../apimachinery/pkg/api/meta/unstructured.go | 31 + .../apimachinery/pkg/api/resource/amount.go | 299 + .../pkg/api/resource/generated.pb.go | 77 + .../pkg/api/resource/generated.proto | 94 + .../apimachinery/pkg/api/resource/math.go | 314 + .../apimachinery/pkg/api/resource/quantity.go | 792 + .../pkg/api/resource/quantity_proto.go | 284 + .../pkg/api/resource/scale_int.go | 95 + .../apimachinery/pkg/api/resource/suffix.go | 198 + .../apis/meta/internalversion/conversion.go | 77 + .../pkg/apis/meta/internalversion/doc.go | 19 + .../pkg/apis/meta/internalversion/register.go | 108 + .../pkg/apis/meta/internalversion/types.go | 70 + .../zz_generated.conversion.go | 113 + .../internalversion/zz_generated.deepcopy.go | 126 + .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/conversion.go | 282 + .../apimachinery/pkg/apis/meta/v1/doc.go | 22 + .../apimachinery/pkg/apis/meta/v1/duration.go | 47 + .../pkg/apis/meta/v1/generated.pb.go | 7871 +++ .../pkg/apis/meta/v1/generated.proto | 828 + .../pkg/apis/meta/v1/group_version.go | 148 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 234 + .../apimachinery/pkg/apis/meta/v1/labels.go | 75 + .../apimachinery/pkg/apis/meta/v1/meta.go | 216 + .../pkg/apis/meta/v1/micro_time.go | 184 + .../pkg/apis/meta/v1/micro_time_proto.go | 72 + .../apimachinery/pkg/apis/meta/v1/register.go | 95 + .../apimachinery/pkg/apis/meta/v1/time.go | 180 + .../pkg/apis/meta/v1/time_proto.go | 92 + .../apimachinery/pkg/apis/meta/v1/types.go | 931 + .../meta/v1/types_swagger_doc_generated.go | 330 + .../apis/meta/v1/unstructured/unstructured.go | 862 + .../v1/unstructured/zz_generated.deepcopy.go | 75 + .../apimachinery/pkg/apis/meta/v1/watch.go | 89 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1096 + .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 + .../pkg/apis/meta/v1alpha1/conversion.go | 27 + .../pkg/apis/meta/v1alpha1/deepcopy.go | 61 + .../pkg/apis/meta/v1alpha1/doc.go | 22 + .../pkg/apis/meta/v1alpha1/generated.pb.go | 633 + .../pkg/apis/meta/v1alpha1/generated.proto | 58 + .../pkg/apis/meta/v1alpha1/register.go | 57 + .../pkg/apis/meta/v1alpha1/types.go | 161 + .../v1alpha1/types_swagger_doc_generated.go | 104 + .../meta/v1alpha1/zz_generated.deepcopy.go | 232 + .../meta/v1alpha1/zz_generated.defaults.go | 32 + .../apimachinery/pkg/conversion/cloner.go | 249 + .../apimachinery/pkg/conversion/converter.go | 898 + .../apimachinery/pkg/conversion/deep_equal.go | 36 + .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 + .../apimachinery/pkg/conversion/helper.go | 39 + .../pkg/conversion/queryparams/convert.go | 188 + .../pkg/conversion/queryparams/doc.go | 19 + .../pkg/conversion/unstructured/converter.go | 738 + .../pkg/conversion/unstructured/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/fields.go | 62 + .../apimachinery/pkg/fields/requirements.go | 30 + .../apimachinery/pkg/fields/selector.go | 455 + .../k8s.io/apimachinery/pkg/labels/doc.go | 19 + .../k8s.io/apimachinery/pkg/labels/labels.go | 181 + .../apimachinery/pkg/labels/selector.go | 879 + .../pkg/labels/zz_generated.deepcopy.go | 59 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 316 + .../apimachinery/pkg/runtime/codec_check.go | 48 + .../apimachinery/pkg/runtime/conversion.go | 113 + .../k8s.io/apimachinery/pkg/runtime/doc.go | 45 + .../apimachinery/pkg/runtime/embedded.go | 142 + .../k8s.io/apimachinery/pkg/runtime/error.go | 113 + .../apimachinery/pkg/runtime/extension.go | 51 + .../apimachinery/pkg/runtime/generated.pb.go | 773 + .../apimachinery/pkg/runtime/generated.proto | 129 + .../k8s.io/apimachinery/pkg/runtime/helper.go | 212 + .../apimachinery/pkg/runtime/interfaces.go | 256 + .../apimachinery/pkg/runtime/register.go | 61 + .../pkg/runtime/schema/generated.pb.go | 65 + .../pkg/runtime/schema/generated.proto | 28 + .../pkg/runtime/schema/group_version.go | 277 + .../pkg/runtime/schema/interfaces.go | 40 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 569 + .../pkg/runtime/scheme_builder.go | 48 + .../pkg/runtime/serializer/codec_factory.go | 237 + .../pkg/runtime/serializer/json/json.go | 277 + .../pkg/runtime/serializer/json/meta.go | 63 + .../runtime/serializer/negotiated_codec.go | 43 + .../pkg/runtime/serializer/protobuf/doc.go | 18 + .../runtime/serializer/protobuf/protobuf.go | 448 + .../runtime/serializer/protobuf_extension.go | 48 + .../serializer/recognizer/recognizer.go | 127 + .../runtime/serializer/streaming/streaming.go | 137 + .../serializer/versioning/versioning.go | 273 + .../pkg/runtime/swagger_doc_generator.go | 262 + .../k8s.io/apimachinery/pkg/runtime/types.go | 137 + .../apimachinery/pkg/runtime/types_proto.go | 69 + .../pkg/runtime/zz_generated.deepcopy.go | 139 + .../apimachinery/pkg/selection/operator.go | 33 + .../k8s.io/apimachinery/pkg/types/doc.go | 18 + .../apimachinery/pkg/types/namespacedname.go | 60 + .../k8s.io/apimachinery/pkg/types/nodename.go | 43 + .../k8s.io/apimachinery/pkg/types/patch.go | 28 + .../k8s.io/apimachinery/pkg/types/uid.go | 22 + .../apimachinery/pkg/util/cache/cache.go | 83 + .../pkg/util/cache/lruexpirecache.go | 102 + .../apimachinery/pkg/util/clock/clock.go | 327 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 280 + .../apimachinery/pkg/util/errors/doc.go | 18 + .../apimachinery/pkg/util/errors/errors.go | 201 + .../apimachinery/pkg/util/framer/framer.go | 167 + .../pkg/util/intstr/generated.pb.go | 381 + .../pkg/util/intstr/generated.proto | 43 + .../apimachinery/pkg/util/intstr/intstr.go | 177 + .../k8s.io/apimachinery/pkg/util/json/json.go | 119 + .../k8s.io/apimachinery/pkg/util/net/http.go | 429 + .../apimachinery/pkg/util/net/interface.go | 392 + .../apimachinery/pkg/util/net/port_range.go | 113 + .../apimachinery/pkg/util/net/port_split.go | 77 + .../k8s.io/apimachinery/pkg/util/net/util.go | 56 + .../apimachinery/pkg/util/runtime/runtime.go | 161 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 + .../apimachinery/pkg/util/sets/empty.go | 23 + .../k8s.io/apimachinery/pkg/util/sets/int.go | 203 + .../apimachinery/pkg/util/sets/int64.go | 203 + .../apimachinery/pkg/util/sets/string.go | 203 + .../pkg/util/validation/field/errors.go | 254 + .../pkg/util/validation/field/path.go | 91 + .../pkg/util/validation/validation.go | 391 + .../k8s.io/apimachinery/pkg/util/wait/doc.go | 19 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 385 + .../apimachinery/pkg/util/yaml/decoder.go | 346 + .../k8s.io/apimachinery/pkg/version/doc.go | 19 + .../k8s.io/apimachinery/pkg/version/types.go | 37 + .../k8s.io/apimachinery/pkg/watch/doc.go | 19 + .../k8s.io/apimachinery/pkg/watch/filter.go | 109 + .../k8s.io/apimachinery/pkg/watch/mux.go | 264 + .../apimachinery/pkg/watch/streamwatcher.go | 119 + .../k8s.io/apimachinery/pkg/watch/until.go | 87 + .../k8s.io/apimachinery/pkg/watch/watch.go | 270 + .../pkg/watch/zz_generated.deepcopy.go | 59 + .../forked/golang/reflect/deep_equal.go | 388 + cli/vendor/k8s.io/client-go/LICENSE | 202 + cli/vendor/k8s.io/client-go/README.md | 157 + .../client-go/discovery/discovery_client.go | 458 + .../k8s.io/client-go/discovery/helper.go | 121 + .../k8s.io/client-go/discovery/restmapper.go | 331 + .../client-go/discovery/unstructured.go | 95 + .../k8s.io/client-go/kubernetes/clientset.go | 532 + cli/vendor/k8s.io/client-go/kubernetes/doc.go | 20 + .../k8s.io/client-go/kubernetes/import.go | 19 + .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 + .../client-go/kubernetes/scheme/register.go | 99 + .../v1alpha1/admissionregistration_client.go | 93 + .../admissionregistration/v1alpha1/doc.go | 20 + .../externaladmissionhookconfiguration.go | 145 + .../v1alpha1/generated_expansion.go | 21 + .../v1alpha1/initializerconfiguration.go | 145 + .../typed/apps/v1beta1/apps_client.go | 103 + .../typed/apps/v1beta1/controllerrevision.go | 155 + .../typed/apps/v1beta1/deployment.go | 172 + .../kubernetes/typed/apps/v1beta1/doc.go | 20 + .../typed/apps/v1beta1/generated_expansion.go | 25 + .../kubernetes/typed/apps/v1beta1/scale.go | 46 + .../typed/apps/v1beta1/statefulset.go | 172 + .../typed/apps/v1beta2/apps_client.go | 113 + .../typed/apps/v1beta2/controllerrevision.go | 155 + .../typed/apps/v1beta2/daemonset.go | 172 + .../typed/apps/v1beta2/deployment.go | 172 + .../kubernetes/typed/apps/v1beta2/doc.go | 20 + .../typed/apps/v1beta2/generated_expansion.go | 29 + .../typed/apps/v1beta2/replicaset.go | 172 + .../kubernetes/typed/apps/v1beta2/scale.go | 46 + .../typed/apps/v1beta2/statefulset.go | 203 + .../v1/authentication_client.go | 88 + .../kubernetes/typed/authentication/v1/doc.go | 20 + .../authentication/v1/generated_expansion.go | 17 + .../typed/authentication/v1/tokenreview.go | 44 + .../v1/tokenreview_expansion.go | 35 + .../v1beta1/authentication_client.go | 88 + .../typed/authentication/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 17 + .../authentication/v1beta1/tokenreview.go | 44 + .../v1beta1/tokenreview_expansion.go | 35 + .../authorization/v1/authorization_client.go | 103 + .../kubernetes/typed/authorization/v1/doc.go | 20 + .../authorization/v1/generated_expansion.go | 17 + .../v1/localsubjectaccessreview.go | 46 + .../v1/localsubjectaccessreview_expansion.go | 36 + .../v1/selfsubjectaccessreview.go | 44 + .../v1/selfsubjectaccessreview_expansion.go | 35 + .../v1/selfsubjectrulesreview.go | 44 + .../v1/selfsubjectrulesreview_expansion.go | 35 + .../authorization/v1/subjectaccessreview.go | 44 + .../v1/subjectaccessreview_expansion.go | 36 + .../v1beta1/authorization_client.go | 103 + .../typed/authorization/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 17 + .../v1beta1/localsubjectaccessreview.go | 46 + .../localsubjectaccessreview_expansion.go | 36 + .../v1beta1/selfsubjectaccessreview.go | 44 + .../selfsubjectaccessreview_expansion.go | 35 + .../v1beta1/selfsubjectrulesreview.go | 44 + .../selfsubjectrulesreview_expansion.go | 35 + .../v1beta1/subjectaccessreview.go | 44 + .../v1beta1/subjectaccessreview_expansion.go | 36 + .../autoscaling/v1/autoscaling_client.go | 88 + .../kubernetes/typed/autoscaling/v1/doc.go | 20 + .../autoscaling/v1/generated_expansion.go | 19 + .../autoscaling/v1/horizontalpodautoscaler.go | 172 + .../autoscaling/v2beta1/autoscaling_client.go | 88 + .../typed/autoscaling/v2beta1/doc.go | 20 + .../v2beta1/generated_expansion.go | 19 + .../v2beta1/horizontalpodautoscaler.go | 172 + .../kubernetes/typed/batch/v1/batch_client.go | 88 + .../kubernetes/typed/batch/v1/doc.go | 20 + .../typed/batch/v1/generated_expansion.go | 19 + .../kubernetes/typed/batch/v1/job.go | 172 + .../typed/batch/v1beta1/batch_client.go | 88 + .../kubernetes/typed/batch/v1beta1/cronjob.go | 172 + .../kubernetes/typed/batch/v1beta1/doc.go | 20 + .../batch/v1beta1/generated_expansion.go | 19 + .../typed/batch/v2alpha1/batch_client.go | 88 + .../typed/batch/v2alpha1/cronjob.go | 172 + .../kubernetes/typed/batch/v2alpha1/doc.go | 20 + .../batch/v2alpha1/generated_expansion.go | 19 + .../v1beta1/certificates_client.go | 88 + .../v1beta1/certificatesigningrequest.go | 161 + .../certificatesigningrequest_expansion.go | 37 + .../typed/certificates/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 17 + .../typed/core/v1/componentstatus.go | 145 + .../kubernetes/typed/core/v1/configmap.go | 155 + .../kubernetes/typed/core/v1/core_client.go | 163 + .../client-go/kubernetes/typed/core/v1/doc.go | 20 + .../kubernetes/typed/core/v1/endpoints.go | 155 + .../kubernetes/typed/core/v1/event.go | 155 + .../typed/core/v1/event_expansion.go | 164 + .../typed/core/v1/generated_expansion.go | 39 + .../kubernetes/typed/core/v1/limitrange.go | 155 + .../kubernetes/typed/core/v1/namespace.go | 161 + .../typed/core/v1/namespace_expansion.go | 31 + .../kubernetes/typed/core/v1/node.go | 161 + .../typed/core/v1/node_expansion.go | 43 + .../typed/core/v1/persistentvolume.go | 161 + .../typed/core/v1/persistentvolumeclaim.go | 172 + .../client-go/kubernetes/typed/core/v1/pod.go | 172 + .../kubernetes/typed/core/v1/pod_expansion.go | 45 + .../kubernetes/typed/core/v1/podtemplate.go | 155 + .../typed/core/v1/replicationcontroller.go | 204 + .../kubernetes/typed/core/v1/resourcequota.go | 172 + .../kubernetes/typed/core/v1/secret.go | 155 + .../kubernetes/typed/core/v1/service.go | 172 + .../typed/core/v1/service_expansion.go | 41 + .../typed/core/v1/serviceaccount.go | 155 + .../typed/extensions/v1beta1/daemonset.go | 172 + .../typed/extensions/v1beta1/deployment.go | 203 + .../v1beta1/deployment_expansion.go | 29 + .../typed/extensions/v1beta1/doc.go | 20 + .../extensions/v1beta1/extensions_client.go | 118 + .../extensions/v1beta1/generated_expansion.go | 27 + .../typed/extensions/v1beta1/ingress.go | 172 + .../extensions/v1beta1/podsecuritypolicy.go | 145 + .../typed/extensions/v1beta1/replicaset.go | 203 + .../typed/extensions/v1beta1/scale.go | 46 + .../extensions/v1beta1/scale_expansion.go | 65 + .../extensions/v1beta1/thirdpartyresource.go | 145 + .../kubernetes/typed/networking/v1/doc.go | 20 + .../networking/v1/generated_expansion.go | 19 + .../typed/networking/v1/networking_client.go | 88 + .../typed/networking/v1/networkpolicy.go | 155 + .../kubernetes/typed/policy/v1beta1/doc.go | 20 + .../typed/policy/v1beta1/eviction.go | 46 + .../policy/v1beta1/eviction_expansion.go | 38 + .../policy/v1beta1/generated_expansion.go | 19 + .../policy/v1beta1/poddisruptionbudget.go | 172 + .../typed/policy/v1beta1/policy_client.go | 93 + .../kubernetes/typed/rbac/v1/clusterrole.go | 145 + .../typed/rbac/v1/clusterrolebinding.go | 145 + .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 + .../typed/rbac/v1/generated_expansion.go | 25 + .../kubernetes/typed/rbac/v1/rbac_client.go | 103 + .../kubernetes/typed/rbac/v1/role.go | 155 + .../kubernetes/typed/rbac/v1/rolebinding.go | 155 + .../typed/rbac/v1alpha1/clusterrole.go | 145 + .../typed/rbac/v1alpha1/clusterrolebinding.go | 145 + .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 + .../rbac/v1alpha1/generated_expansion.go | 25 + .../typed/rbac/v1alpha1/rbac_client.go | 103 + .../kubernetes/typed/rbac/v1alpha1/role.go | 155 + .../typed/rbac/v1alpha1/rolebinding.go | 155 + .../typed/rbac/v1beta1/clusterrole.go | 145 + .../typed/rbac/v1beta1/clusterrolebinding.go | 145 + .../kubernetes/typed/rbac/v1beta1/doc.go | 20 + .../typed/rbac/v1beta1/generated_expansion.go | 25 + .../typed/rbac/v1beta1/rbac_client.go | 103 + .../kubernetes/typed/rbac/v1beta1/role.go | 155 + .../typed/rbac/v1beta1/rolebinding.go | 155 + .../typed/scheduling/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 19 + .../scheduling/v1alpha1/priorityclass.go | 145 + .../scheduling/v1alpha1/scheduling_client.go | 88 + .../kubernetes/typed/settings/v1alpha1/doc.go | 20 + .../settings/v1alpha1/generated_expansion.go | 19 + .../typed/settings/v1alpha1/podpreset.go | 155 + .../settings/v1alpha1/settings_client.go | 88 + .../kubernetes/typed/storage/v1/doc.go | 20 + .../typed/storage/v1/generated_expansion.go | 19 + .../typed/storage/v1/storage_client.go | 88 + .../typed/storage/v1/storageclass.go | 145 + .../kubernetes/typed/storage/v1beta1/doc.go | 20 + .../storage/v1beta1/generated_expansion.go | 19 + .../typed/storage/v1beta1/storage_client.go | 88 + .../typed/storage/v1beta1/storageclass.go | 145 + .../k8s.io/client-go/pkg/version/base.go | 63 + .../k8s.io/client-go/pkg/version/doc.go | 20 + .../k8s.io/client-go/pkg/version/version.go | 42 + cli/vendor/k8s.io/client-go/rest/client.go | 258 + cli/vendor/k8s.io/client-go/rest/config.go | 424 + cli/vendor/k8s.io/client-go/rest/plugin.go | 73 + cli/vendor/k8s.io/client-go/rest/request.go | 1095 + cli/vendor/k8s.io/client-go/rest/transport.go | 101 + cli/vendor/k8s.io/client-go/rest/url_utils.go | 90 + .../k8s.io/client-go/rest/urlbackoff.go | 107 + cli/vendor/k8s.io/client-go/rest/versions.go | 88 + .../k8s.io/client-go/rest/watch/decoder.go | 72 + .../k8s.io/client-go/rest/watch/encoder.go | 56 + .../client-go/rest/zz_generated.deepcopy.go | 69 + .../k8s.io/client-go/testing/actions.go | 528 + cli/vendor/k8s.io/client-go/testing/fake.go | 259 + .../k8s.io/client-go/testing/fixture.go | 464 + .../k8s.io/client-go/tools/auth/clientauth.go | 125 + .../client-go/tools/cache/controller.go | 394 + .../client-go/tools/cache/delta_fifo.go | 681 + .../k8s.io/client-go/tools/cache/doc.go | 24 + .../client-go/tools/cache/expiration_cache.go | 208 + .../tools/cache/expiration_cache_fakes.go | 54 + .../tools/cache/fake_custom_store.go | 102 + .../k8s.io/client-go/tools/cache/fifo.go | 358 + .../k8s.io/client-go/tools/cache/heap.go | 323 + .../k8s.io/client-go/tools/cache/index.go | 87 + .../k8s.io/client-go/tools/cache/listers.go | 160 + .../k8s.io/client-go/tools/cache/listwatch.go | 173 + .../client-go/tools/cache/mutation_cache.go | 261 + .../tools/cache/mutation_detector.go | 133 + .../k8s.io/client-go/tools/cache/reflector.go | 442 + .../tools/cache/reflector_metrics.go | 119 + .../client-go/tools/cache/shared_informer.go | 576 + .../k8s.io/client-go/tools/cache/store.go | 244 + .../tools/cache/thread_safe_store.go | 306 + .../client-go/tools/cache/undelta_store.go | 83 + .../client-go/tools/clientcmd/api/doc.go | 18 + .../client-go/tools/clientcmd/api/helpers.go | 183 + .../tools/clientcmd/api/latest/latest.go | 66 + .../client-go/tools/clientcmd/api/register.go | 46 + .../client-go/tools/clientcmd/api/types.go | 186 + .../tools/clientcmd/api/v1/conversion.go | 227 + .../client-go/tools/clientcmd/api/v1/doc.go | 18 + .../tools/clientcmd/api/v1/register.go | 56 + .../client-go/tools/clientcmd/api/v1/types.go | 171 + .../clientcmd/api/v1/zz_generated.deepcopy.go | 358 + .../clientcmd/api/zz_generated.deepcopy.go | 309 + .../client-go/tools/clientcmd/auth_loaders.go | 106 + .../tools/clientcmd/client_config.go | 549 + .../client-go/tools/clientcmd/config.go | 472 + .../k8s.io/client-go/tools/clientcmd/doc.go | 37 + .../k8s.io/client-go/tools/clientcmd/flag.go | 49 + .../client-go/tools/clientcmd/helpers.go | 35 + .../client-go/tools/clientcmd/loader.go | 612 + .../tools/clientcmd/merged_client_builder.go | 169 + .../client-go/tools/clientcmd/overrides.go | 247 + .../client-go/tools/clientcmd/validation.go | 275 + .../k8s.io/client-go/tools/metrics/metrics.go | 61 + .../k8s.io/client-go/tools/pager/pager.go | 118 + .../k8s.io/client-go/tools/reference/ref.go | 122 + .../k8s.io/client-go/transport/cache.go | 92 + .../k8s.io/client-go/transport/config.go | 105 + .../client-go/transport/round_trippers.go | 455 + .../k8s.io/client-go/transport/transport.go | 141 + cli/vendor/k8s.io/client-go/util/cert/cert.go | 215 + cli/vendor/k8s.io/client-go/util/cert/csr.go | 75 + cli/vendor/k8s.io/client-go/util/cert/io.go | 164 + cli/vendor/k8s.io/client-go/util/cert/pem.go | 269 + .../client-go/util/flowcontrol/backoff.go | 149 + .../client-go/util/flowcontrol/throttle.go | 148 + .../k8s.io/client-go/util/homedir/homedir.go | 47 + .../k8s.io/client-go/util/integer/integer.go | 67 + cli/vendor/k8s.io/kube-openapi/LICENSE | 202 + cli/vendor/k8s.io/kube-openapi/README.md | 14 + .../k8s.io/kube-openapi/pkg/common/common.go | 167 + .../k8s.io/kube-openapi/pkg/common/doc.go | 19 + cli/vendor/k8s.io/kubernetes/LICENSE | 202 + cli/vendor/k8s.io/kubernetes/README.md | 86 + cli/vendor/k8s.io/kubernetes/build/README.md | 112 + .../k8s.io/kubernetes/build/pause/orphan.c | 36 + .../k8s.io/kubernetes/build/pause/pause.c | 51 + .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 296 + .../google/protobuf/compiler/plugin.proto | 150 + .../protobuf/google/protobuf/descriptor.proto | 779 + cli/vendor/vbom.ml/util/LICENSE | 17 + cli/vendor/vbom.ml/util/README.md | 5 + cli/vendor/vbom.ml/util/sortorder/README.md | 5 + cli/vendor/vbom.ml/util/sortorder/doc.go | 5 + cli/vendor/vbom.ml/util/sortorder/natsort.go | 76 + components.conf | 9 + engine/.DEREK.yml | 17 + engine/.dockerignore | 7 + engine/.mailmap | 491 + engine/AUTHORS | 1984 + engine/CHANGELOG.md | 3609 ++ engine/CONTRIBUTING.md | 458 + engine/Dockerfile | 240 + engine/Dockerfile.e2e | 74 + engine/Dockerfile.simple | 62 + engine/Dockerfile.windows | 256 + engine/LICENSE | 191 + engine/MAINTAINERS | 486 + engine/Makefile | 208 + engine/NOTICE | 19 + engine/README.md | 57 + engine/ROADMAP.md | 68 + engine/TESTING.md | 89 + engine/VENDORING.md | 46 + engine/api/README.md | 42 + engine/api/common.go | 11 + engine/api/common_unix.go | 6 + engine/api/common_windows.go | 8 + engine/api/server/backend/build/backend.go | 136 + engine/api/server/backend/build/tag.go | 77 + engine/api/server/httputils/decoder.go | 16 + engine/api/server/httputils/errors.go | 131 + engine/api/server/httputils/form.go | 76 + engine/api/server/httputils/form_test.go | 105 + engine/api/server/httputils/httputils.go | 100 + engine/api/server/httputils/httputils_test.go | 18 + .../server/httputils/httputils_write_json.go | 15 + .../api/server/httputils/write_log_stream.go | 84 + engine/api/server/middleware.go | 24 + engine/api/server/middleware/cors.go | 37 + engine/api/server/middleware/debug.go | 94 + engine/api/server/middleware/debug_test.go | 59 + engine/api/server/middleware/experimental.go | 28 + engine/api/server/middleware/middleware.go | 12 + engine/api/server/middleware/version.go | 65 + engine/api/server/middleware/version_test.go | 92 + engine/api/server/router/build/backend.go | 24 + engine/api/server/router/build/build.go | 30 + .../api/server/router/build/build_routes.go | 409 + .../api/server/router/checkpoint/backend.go | 10 + .../server/router/checkpoint/checkpoint.go | 36 + .../router/checkpoint/checkpoint_routes.go | 65 + engine/api/server/router/container/backend.go | 83 + .../api/server/router/container/container.go | 70 + .../router/container/container_routes.go | 661 + engine/api/server/router/container/copy.go | 140 + engine/api/server/router/container/exec.go | 149 + engine/api/server/router/container/inspect.go | 21 + engine/api/server/router/debug/debug.go | 53 + .../api/server/router/debug/debug_routes.go | 12 + .../api/server/router/distribution/backend.go | 15 + .../router/distribution/distribution.go | 31 + .../distribution/distribution_routes.go | 138 + engine/api/server/router/experimental.go | 68 + engine/api/server/router/image/backend.go | 41 + engine/api/server/router/image/image.go | 44 + .../api/server/router/image/image_routes.go | 324 + engine/api/server/router/local.go | 104 + engine/api/server/router/network/backend.go | 32 + engine/api/server/router/network/filter.go | 93 + .../api/server/router/network/filter_test.go | 149 + engine/api/server/router/network/network.go | 43 + .../server/router/network/network_routes.go | 597 + engine/api/server/router/plugin/backend.go | 27 + engine/api/server/router/plugin/plugin.go | 39 + .../api/server/router/plugin/plugin_routes.go | 310 + engine/api/server/router/router.go | 19 + engine/api/server/router/session/backend.go | 11 + engine/api/server/router/session/session.go | 29 + .../server/router/session/session_routes.go | 16 + engine/api/server/router/swarm/backend.go | 48 + engine/api/server/router/swarm/cluster.go | 63 + .../api/server/router/swarm/cluster_routes.go | 494 + engine/api/server/router/swarm/helpers.go | 66 + engine/api/server/router/system/backend.go | 28 + engine/api/server/router/system/system.go | 44 + .../api/server/router/system/system_routes.go | 230 + engine/api/server/router/volume/backend.go | 20 + engine/api/server/router/volume/volume.go | 36 + .../api/server/router/volume/volume_routes.go | 96 + engine/api/server/router_swapper.go | 30 + engine/api/server/server.go | 209 + engine/api/server/server_test.go | 45 + engine/api/swagger-gen.yaml | 12 + engine/api/swagger.yaml | 10136 ++++ engine/api/templates/server/operation.gotmpl | 26 + engine/api/types/auth.go | 22 + engine/api/types/backend/backend.go | 128 + engine/api/types/backend/build.go | 45 + engine/api/types/blkiodev/blkio.go | 23 + engine/api/types/client.go | 406 + engine/api/types/configs.go | 57 + engine/api/types/container/config.go | 69 + .../api/types/container/container_changes.go | 21 + .../api/types/container/container_create.go | 21 + engine/api/types/container/container_top.go | 21 + .../api/types/container/container_update.go | 17 + engine/api/types/container/container_wait.go | 29 + engine/api/types/container/host_config.go | 412 + engine/api/types/container/hostconfig_unix.go | 41 + .../api/types/container/hostconfig_windows.go | 40 + engine/api/types/container/waitcondition.go | 22 + engine/api/types/error_response.go | 13 + engine/api/types/events/events.go | 52 + engine/api/types/filters/example_test.go | 24 + engine/api/types/filters/parse.go | 350 + engine/api/types/filters/parse_test.go | 423 + engine/api/types/graph_driver_data.go | 17 + engine/api/types/id_response.go | 13 + engine/api/types/image/image_history.go | 37 + .../api/types/image_delete_response_item.go | 15 + engine/api/types/image_summary.go | 49 + engine/api/types/mount/mount.go | 130 + engine/api/types/network/network.go | 108 + engine/api/types/plugin.go | 203 + engine/api/types/plugin_device.go | 25 + engine/api/types/plugin_env.go | 25 + engine/api/types/plugin_interface_type.go | 21 + engine/api/types/plugin_mount.go | 37 + engine/api/types/plugin_responses.go | 71 + .../api/types/plugins/logdriver/entry.pb.go | 449 + .../api/types/plugins/logdriver/entry.proto | 8 + engine/api/types/plugins/logdriver/gen.go | 3 + engine/api/types/plugins/logdriver/io.go | 87 + engine/api/types/port.go | 23 + engine/api/types/registry/authenticate.go | 21 + engine/api/types/registry/registry.go | 119 + engine/api/types/seccomp.go | 93 + engine/api/types/service_update_response.go | 12 + engine/api/types/stats.go | 181 + engine/api/types/strslice/strslice.go | 30 + engine/api/types/strslice/strslice_test.go | 86 + engine/api/types/swarm/common.go | 40 + engine/api/types/swarm/config.go | 35 + engine/api/types/swarm/container.go | 74 + engine/api/types/swarm/network.go | 121 + engine/api/types/swarm/node.go | 115 + engine/api/types/swarm/runtime.go | 27 + engine/api/types/swarm/runtime/gen.go | 3 + engine/api/types/swarm/runtime/plugin.pb.go | 712 + engine/api/types/swarm/runtime/plugin.proto | 20 + engine/api/types/swarm/secret.go | 36 + engine/api/types/swarm/service.go | 124 + engine/api/types/swarm/swarm.go | 217 + engine/api/types/swarm/task.go | 191 + engine/api/types/time/duration_convert.go | 12 + .../api/types/time/duration_convert_test.go | 26 + engine/api/types/time/timestamp.go | 129 + engine/api/types/time/timestamp_test.go | 93 + engine/api/types/types.go | 602 + engine/api/types/versions/README.md | 14 + engine/api/types/versions/compare.go | 62 + engine/api/types/versions/compare_test.go | 26 + engine/api/types/versions/v1p19/types.go | 35 + engine/api/types/versions/v1p20/types.go | 40 + engine/api/types/volume.go | 69 + engine/api/types/volume/volume_create.go | 29 + engine/api/types/volume/volume_list.go | 23 + .../adapters/containerimage/pull.go | 733 + .../builder-next/adapters/snapshot/layer.go | 113 + .../adapters/snapshot/snapshot.go | 467 + engine/builder/builder-next/builder.go | 419 + engine/builder/builder-next/controller.go | 154 + engine/builder/builder-next/executor_unix.go | 17 + .../builder/builder-next/executor_windows.go | 21 + .../builder/builder-next/exporter/export.go | 146 + .../builder/builder-next/exporter/writer.go | 177 + engine/builder/builder-next/reqbodyhandler.go | 67 + engine/builder/builder-next/worker/worker.go | 331 + engine/builder/builder.go | 115 + engine/builder/dockerfile/buildargs.go | 172 + engine/builder/dockerfile/buildargs_test.go | 102 + engine/builder/dockerfile/builder.go | 437 + engine/builder/dockerfile/builder_unix.go | 7 + engine/builder/dockerfile/builder_windows.go | 8 + engine/builder/dockerfile/clientsession.go | 76 + engine/builder/dockerfile/containerbackend.go | 146 + engine/builder/dockerfile/copy.go | 567 + engine/builder/dockerfile/copy_test.go | 148 + engine/builder/dockerfile/copy_unix.go | 48 + engine/builder/dockerfile/copy_windows.go | 43 + engine/builder/dockerfile/dispatchers.go | 585 + engine/builder/dockerfile/dispatchers_test.go | 552 + engine/builder/dockerfile/dispatchers_unix.go | 23 + .../dockerfile/dispatchers_unix_test.go | 34 + .../builder/dockerfile/dispatchers_windows.go | 95 + .../dockerfile/dispatchers_windows_test.go | 46 + engine/builder/dockerfile/evaluator.go | 250 + engine/builder/dockerfile/evaluator_test.go | 144 + engine/builder/dockerfile/imagecontext.go | 122 + engine/builder/dockerfile/imageprobe.go | 63 + engine/builder/dockerfile/internals.go | 496 + engine/builder/dockerfile/internals_linux.go | 88 + .../dockerfile/internals_linux_test.go | 138 + engine/builder/dockerfile/internals_test.go | 173 + .../builder/dockerfile/internals_windows.go | 7 + .../dockerfile/internals_windows_test.go | 53 + engine/builder/dockerfile/metrics.go | 44 + engine/builder/dockerfile/mockbackend_test.go | 148 + engine/builder/dockerfile/utils_test.go | 50 + engine/builder/dockerignore/dockerignore.go | 64 + .../builder/dockerignore/dockerignore_test.go | 69 + engine/builder/fscache/fscache.go | 652 + engine/builder/fscache/fscache_test.go | 132 + engine/builder/fscache/naivedriver.go | 28 + engine/builder/remotecontext/archive.go | 125 + engine/builder/remotecontext/detect.go | 180 + engine/builder/remotecontext/detect_test.go | 123 + engine/builder/remotecontext/filehash.go | 45 + engine/builder/remotecontext/generate.go | 3 + engine/builder/remotecontext/git.go | 35 + engine/builder/remotecontext/git/gitutils.go | 204 + .../remotecontext/git/gitutils_test.go | 278 + engine/builder/remotecontext/lazycontext.go | 102 + engine/builder/remotecontext/mimetype.go | 27 + engine/builder/remotecontext/mimetype_test.go | 16 + engine/builder/remotecontext/remote.go | 127 + engine/builder/remotecontext/remote_test.go | 242 + engine/builder/remotecontext/tarsum.go | 157 + engine/builder/remotecontext/tarsum.pb.go | 525 + engine/builder/remotecontext/tarsum.proto | 7 + engine/builder/remotecontext/tarsum_test.go | 151 + engine/builder/remotecontext/utils_test.go | 55 + engine/cli/cobra.go | 131 + engine/cli/config/configdir.go | 25 + engine/cli/debug/debug.go | 26 + engine/cli/debug/debug_test.go | 43 + engine/cli/error.go | 33 + engine/cli/required.go | 27 + engine/client/README.md | 35 + engine/client/build_cancel.go | 21 + engine/client/build_prune.go | 30 + engine/client/checkpoint_create.go | 14 + engine/client/checkpoint_create_test.go | 73 + engine/client/checkpoint_delete.go | 20 + engine/client/checkpoint_delete_test.go | 54 + engine/client/checkpoint_list.go | 28 + engine/client/checkpoint_list_test.go | 68 + engine/client/client.go | 402 + engine/client/client_mock_test.go | 53 + engine/client/client_test.go | 321 + engine/client/client_unix.go | 9 + engine/client/client_windows.go | 7 + engine/client/config_create.go | 25 + engine/client/config_create_test.go | 70 + engine/client/config_inspect.go | 36 + engine/client/config_inspect_test.go | 103 + engine/client/config_list.go | 38 + engine/client/config_list_test.go | 107 + engine/client/config_remove.go | 13 + engine/client/config_remove_test.go | 60 + engine/client/config_update.go | 21 + engine/client/config_update_test.go | 61 + engine/client/container_attach.go | 57 + engine/client/container_commit.go | 55 + engine/client/container_commit_test.go | 96 + engine/client/container_copy.go | 101 + engine/client/container_copy_test.go | 273 + engine/client/container_create.go | 56 + engine/client/container_create_test.go | 118 + engine/client/container_diff.go | 23 + engine/client/container_diff_test.go | 61 + engine/client/container_exec.go | 54 + engine/client/container_exec_test.go | 156 + engine/client/container_export.go | 19 + engine/client/container_export_test.go | 49 + engine/client/container_inspect.go | 53 + engine/client/container_inspect_test.go | 138 + engine/client/container_kill.go | 16 + engine/client/container_kill_test.go | 45 + engine/client/container_list.go | 56 + engine/client/container_list_test.go | 96 + engine/client/container_logs.go | 80 + engine/client/container_logs_test.go | 166 + engine/client/container_pause.go | 10 + engine/client/container_pause_test.go | 40 + engine/client/container_prune.go | 36 + engine/client/container_prune_test.go | 125 + engine/client/container_remove.go | 27 + engine/client/container_remove_test.go | 66 + engine/client/container_rename.go | 15 + engine/client/container_rename_test.go | 45 + engine/client/container_resize.go | 29 + engine/client/container_resize_test.go | 82 + engine/client/container_restart.go | 22 + engine/client/container_restart_test.go | 47 + engine/client/container_start.go | 23 + engine/client/container_start_test.go | 57 + engine/client/container_stats.go | 26 + engine/client/container_stats_test.go | 69 + engine/client/container_stop.go | 26 + engine/client/container_stop_test.go | 47 + engine/client/container_top.go | 28 + engine/client/container_top_test.go | 74 + engine/client/container_unpause.go | 10 + engine/client/container_unpause_test.go | 40 + engine/client/container_update.go | 22 + engine/client/container_update_test.go | 58 + engine/client/container_wait.go | 83 + engine/client/container_wait_test.go | 73 + engine/client/disk_usage.go | 26 + engine/client/disk_usage_test.go | 55 + engine/client/distribution_inspect.go | 38 + engine/client/distribution_inspect_test.go | 32 + engine/client/errors.go | 132 + engine/client/events.go | 101 + engine/client/events_test.go | 164 + engine/client/hijack.go | 129 + engine/client/hijack_test.go | 103 + engine/client/image_build.go | 138 + engine/client/image_build_test.go | 232 + engine/client/image_create.go | 37 + engine/client/image_create_test.go | 75 + engine/client/image_history.go | 22 + engine/client/image_history_test.go | 60 + engine/client/image_import.go | 40 + engine/client/image_import_test.go | 81 + engine/client/image_inspect.go | 32 + engine/client/image_inspect_test.go | 84 + engine/client/image_list.go | 45 + engine/client/image_list_test.go | 159 + engine/client/image_load.go | 29 + engine/client/image_load_test.go | 94 + engine/client/image_prune.go | 36 + engine/client/image_prune_test.go | 120 + engine/client/image_pull.go | 64 + engine/client/image_pull_test.go | 198 + engine/client/image_push.go | 55 + engine/client/image_push_test.go | 179 + engine/client/image_remove.go | 31 + engine/client/image_remove_test.go | 105 + engine/client/image_save.go | 21 + engine/client/image_save_test.go | 56 + engine/client/image_search.go | 51 + engine/client/image_search_test.go | 164 + engine/client/image_tag.go | 37 + engine/client/image_tag_test.go | 142 + engine/client/info.go | 26 + engine/client/info_test.go | 76 + engine/client/interface.go | 198 + engine/client/interface_experimental.go | 18 + engine/client/interface_stable.go | 10 + engine/client/login.go | 29 + engine/client/network_connect.go | 19 + engine/client/network_connect_test.go | 110 + engine/client/network_create.go | 25 + engine/client/network_create_test.go | 72 + engine/client/network_disconnect.go | 15 + engine/client/network_disconnect_test.go | 64 + engine/client/network_inspect.go | 49 + engine/client/network_inspect_test.go | 118 + engine/client/network_list.go | 31 + engine/client/network_list_test.go | 108 + engine/client/network_prune.go | 36 + engine/client/network_prune_test.go | 113 + engine/client/network_remove.go | 10 + engine/client/network_remove_test.go | 46 + engine/client/node_inspect.go | 32 + engine/client/node_inspect_test.go | 78 + engine/client/node_list.go | 36 + engine/client/node_list_test.go | 94 + engine/client/node_remove.go | 20 + engine/client/node_remove_test.go | 68 + engine/client/node_update.go | 18 + engine/client/node_update_test.go | 48 + engine/client/ping.go | 32 + engine/client/ping_test.go | 83 + engine/client/plugin_create.go | 26 + engine/client/plugin_disable.go | 19 + engine/client/plugin_disable_test.go | 48 + engine/client/plugin_enable.go | 19 + engine/client/plugin_enable_test.go | 48 + engine/client/plugin_inspect.go | 31 + engine/client/plugin_inspect_test.go | 67 + engine/client/plugin_install.go | 113 + engine/client/plugin_list.go | 32 + engine/client/plugin_list_test.go | 107 + engine/client/plugin_push.go | 16 + engine/client/plugin_push_test.go | 50 + engine/client/plugin_remove.go | 20 + engine/client/plugin_remove_test.go | 48 + engine/client/plugin_set.go | 12 + engine/client/plugin_set_test.go | 46 + engine/client/plugin_upgrade.go | 39 + engine/client/request.go | 259 + engine/client/request_test.go | 89 + engine/client/secret_create.go | 25 + engine/client/secret_create_test.go | 70 + engine/client/secret_inspect.go | 36 + engine/client/secret_inspect_test.go | 92 + engine/client/secret_list.go | 38 + engine/client/secret_list_test.go | 107 + engine/client/secret_remove.go | 13 + engine/client/secret_remove_test.go | 60 + engine/client/secret_update.go | 21 + engine/client/secret_update_test.go | 61 + engine/client/service_create.go | 166 + engine/client/service_create_test.go | 211 + engine/client/service_inspect.go | 37 + engine/client/service_inspect_test.go | 79 + engine/client/service_list.go | 35 + engine/client/service_list_test.go | 94 + engine/client/service_logs.go | 52 + engine/client/service_logs_test.go | 135 + engine/client/service_remove.go | 10 + engine/client/service_remove_test.go | 57 + engine/client/service_update.go | 92 + engine/client/service_update_test.go | 76 + engine/client/session.go | 18 + engine/client/swarm_get_unlock_key.go | 21 + engine/client/swarm_get_unlock_key_test.go | 59 + engine/client/swarm_init.go | 21 + engine/client/swarm_init_test.go | 53 + engine/client/swarm_inspect.go | 21 + engine/client/swarm_inspect_test.go | 56 + engine/client/swarm_join.go | 14 + engine/client/swarm_join_test.go | 50 + engine/client/swarm_leave.go | 17 + engine/client/swarm_leave_test.go | 65 + engine/client/swarm_unlock.go | 14 + engine/client/swarm_unlock_test.go | 48 + engine/client/swarm_update.go | 22 + engine/client/swarm_update_test.go | 48 + engine/client/task_inspect.go | 32 + engine/client/task_inspect_test.go | 67 + engine/client/task_list.go | 35 + engine/client/task_list_test.go | 94 + engine/client/task_logs.go | 51 + engine/client/testdata/ca.pem | 18 + engine/client/testdata/cert.pem | 18 + engine/client/testdata/key.pem | 27 + engine/client/transport.go | 17 + engine/client/utils.go | 34 + engine/client/version.go | 21 + engine/client/volume_create.go | 21 + engine/client/volume_create_test.go | 75 + engine/client/volume_inspect.go | 38 + engine/client/volume_inspect_test.go | 79 + engine/client/volume_list.go | 32 + engine/client/volume_list_test.go | 98 + engine/client/volume_prune.go | 36 + engine/client/volume_remove.go | 21 + engine/client/volume_remove_test.go | 46 + engine/cmd/dockerd/README.md | 3 + engine/cmd/dockerd/config.go | 99 + engine/cmd/dockerd/config_common_unix.go | 34 + engine/cmd/dockerd/config_unix.go | 50 + engine/cmd/dockerd/config_unix_test.go | 23 + engine/cmd/dockerd/config_windows.go | 26 + engine/cmd/dockerd/daemon.go | 638 + engine/cmd/dockerd/daemon_freebsd.go | 9 + engine/cmd/dockerd/daemon_linux.go | 13 + engine/cmd/dockerd/daemon_test.go | 182 + engine/cmd/dockerd/daemon_unix.go | 117 + engine/cmd/dockerd/daemon_unix_test.go | 99 + engine/cmd/dockerd/daemon_windows.go | 85 + engine/cmd/dockerd/docker.go | 74 + engine/cmd/dockerd/docker_unix.go | 8 + engine/cmd/dockerd/docker_windows.go | 38 + .../dockerd/hack/malformed_host_override.go | 121 + .../hack/malformed_host_override_test.go | 124 + engine/cmd/dockerd/metrics.go | 27 + engine/cmd/dockerd/options.go | 122 + engine/cmd/dockerd/options_test.go | 44 + engine/cmd/dockerd/service_unsupported.go | 10 + engine/cmd/dockerd/service_windows.go | 430 + engine/codecov.yml | 17 + engine/container/archive.go | 86 + engine/container/container.go | 720 + engine/container/container_unit_test.go | 126 + engine/container/container_unix.go | 463 + engine/container/container_windows.go | 213 + engine/container/env.go | 43 + engine/container/env_test.go | 24 + engine/container/health.go | 82 + engine/container/history.go | 30 + engine/container/memory_store.go | 95 + engine/container/memory_store_test.go | 106 + engine/container/monitor.go | 46 + engine/container/mounts_unix.go | 12 + engine/container/mounts_windows.go | 8 + engine/container/state.go | 409 + engine/container/state_test.go | 192 + engine/container/store.go | 28 + engine/container/stream/attach.go | 175 + engine/container/stream/streams.go | 146 + engine/container/view.go | 494 + engine/container/view_test.go | 186 + engine/contrib/README.md | 4 + engine/contrib/REVIEWERS | 1 + engine/contrib/apparmor/main.go | 56 + engine/contrib/apparmor/template.go | 268 + engine/contrib/check-config.sh | 360 + engine/contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 36 + .../desktop-integration/gparted/Dockerfile | 31 + engine/contrib/docker-device-tool/README.md | 14 + .../contrib/docker-device-tool/device_tool.go | 167 + .../docker-device-tool/device_tool_windows.go | 4 + .../contrib/docker-machine-install-bundle.sh | 111 + engine/contrib/dockerize-disk.sh | 118 + engine/contrib/download-frozen-image-v1.sh | 108 + engine/contrib/download-frozen-image-v2.sh | 345 + engine/contrib/editorconfig | 13 + engine/contrib/gitdm/aliases | 148 + engine/contrib/gitdm/domain-map | 47 + engine/contrib/gitdm/generate_aliases.sh | 16 + engine/contrib/gitdm/gitdm.config | 17 + engine/contrib/httpserver/Dockerfile | 4 + engine/contrib/httpserver/server.go | 12 + engine/contrib/init/openrc/docker.confd | 23 + engine/contrib/init/openrc/docker.initd | 24 + engine/contrib/init/systemd/REVIEWERS | 3 + engine/contrib/init/systemd/docker.service | 34 + .../contrib/init/systemd/docker.service.rpm | 33 + engine/contrib/init/systemd/docker.socket | 12 + engine/contrib/init/sysvinit-debian/docker | 156 + .../init/sysvinit-debian/docker.default | 20 + engine/contrib/init/sysvinit-redhat/docker | 153 + .../init/sysvinit-redhat/docker.sysconfig | 7 + engine/contrib/init/upstart/REVIEWERS | 2 + engine/contrib/init/upstart/docker.conf | 72 + engine/contrib/mac-install-bundle.sh | 45 + engine/contrib/mkimage-alpine.sh | 90 + engine/contrib/mkimage-arch-pacman.conf | 92 + engine/contrib/mkimage-arch.sh | 126 + engine/contrib/mkimage-archarm-pacman.conf | 98 + engine/contrib/mkimage-crux.sh | 75 + engine/contrib/mkimage-pld.sh | 73 + engine/contrib/mkimage-yum.sh | 136 + engine/contrib/mkimage.sh | 120 + engine/contrib/mkimage/.febootstrap-minimize | 28 + engine/contrib/mkimage/busybox-static | 34 + engine/contrib/mkimage/debootstrap | 251 + engine/contrib/mkimage/mageia-urpmi | 61 + engine/contrib/mkimage/rinse | 25 + engine/contrib/nnp-test/Dockerfile | 9 + engine/contrib/nnp-test/nnp-test.c | 10 + engine/contrib/nuke-graph-directory.sh | 64 + engine/contrib/report-issue.sh | 105 + engine/contrib/syntax/nano/Dockerfile.nanorc | 26 + engine/contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 160 + .../textmate/Docker.tmbundle/info.plist | 16 + engine/contrib/syntax/textmate/README.md | 17 + engine/contrib/syntax/textmate/REVIEWERS | 1 + engine/contrib/syntax/vim/LICENSE | 22 + engine/contrib/syntax/vim/README.md | 26 + engine/contrib/syntax/vim/doc/dockerfile.txt | 18 + .../syntax/vim/ftdetect/dockerfile.vim | 1 + .../contrib/syntax/vim/syntax/dockerfile.vim | 31 + engine/contrib/syscall-test/Dockerfile | 15 + engine/contrib/syscall-test/acct.c | 16 + engine/contrib/syscall-test/exit32.s | 7 + engine/contrib/syscall-test/ns.c | 63 + engine/contrib/syscall-test/raw.c | 14 + engine/contrib/syscall-test/setgid.c | 11 + engine/contrib/syscall-test/setuid.c | 11 + engine/contrib/syscall-test/socket.c | 30 + engine/contrib/syscall-test/userns.c | 63 + engine/contrib/udev/80-docker.rules | 3 + engine/contrib/vagrant-docker/README.md | 50 + engine/daemon/apparmor_default.go | 36 + engine/daemon/apparmor_default_unsupported.go | 7 + engine/daemon/archive.go | 449 + engine/daemon/archive_tarcopyoptions.go | 15 + engine/daemon/archive_tarcopyoptions_unix.go | 25 + .../daemon/archive_tarcopyoptions_windows.go | 10 + engine/daemon/archive_unix.go | 31 + engine/daemon/archive_windows.go | 39 + engine/daemon/attach.go | 187 + engine/daemon/auth.go | 13 + engine/daemon/bindmount_unix.go | 5 + engine/daemon/caps/utils.go | 139 + engine/daemon/changes.go | 34 + engine/daemon/checkpoint.go | 143 + engine/daemon/cluster.go | 26 + engine/daemon/cluster/cluster.go | 450 + engine/daemon/cluster/configs.go | 118 + .../cluster/controllers/plugin/controller.go | 261 + .../controllers/plugin/controller_test.go | 390 + engine/daemon/cluster/convert/config.go | 78 + engine/daemon/cluster/convert/container.go | 398 + engine/daemon/cluster/convert/network.go | 240 + engine/daemon/cluster/convert/network_test.go | 34 + engine/daemon/cluster/convert/node.go | 94 + engine/daemon/cluster/convert/secret.go | 80 + engine/daemon/cluster/convert/service.go | 639 + engine/daemon/cluster/convert/service_test.go | 308 + engine/daemon/cluster/convert/swarm.go | 147 + engine/daemon/cluster/convert/task.go | 69 + engine/daemon/cluster/errors.go | 61 + engine/daemon/cluster/executor/backend.go | 76 + .../cluster/executor/container/adapter.go | 475 + .../cluster/executor/container/attachment.go | 74 + .../cluster/executor/container/container.go | 680 + .../executor/container/container_test.go | 37 + .../cluster/executor/container/controller.go | 692 + .../cluster/executor/container/errors.go | 17 + .../cluster/executor/container/executor.go | 293 + .../cluster/executor/container/health_test.go | 100 + .../cluster/executor/container/validate.go | 40 + .../executor/container/validate_test.go | 142 + .../executor/container/validate_unix_test.go | 8 + .../container/validate_windows_test.go | 8 + engine/daemon/cluster/filters.go | 123 + engine/daemon/cluster/filters_test.go | 102 + engine/daemon/cluster/helpers.go | 246 + engine/daemon/cluster/listen_addr.go | 301 + engine/daemon/cluster/listen_addr_linux.go | 89 + engine/daemon/cluster/listen_addr_others.go | 9 + engine/daemon/cluster/networks.go | 316 + engine/daemon/cluster/noderunner.go | 388 + engine/daemon/cluster/nodes.go | 105 + engine/daemon/cluster/provider/network.go | 37 + engine/daemon/cluster/secrets.go | 118 + engine/daemon/cluster/services.go | 602 + engine/daemon/cluster/swarm.go | 569 + engine/daemon/cluster/tasks.go | 87 + engine/daemon/cluster/utils.go | 63 + engine/daemon/commit.go | 186 + engine/daemon/config/config.go | 567 + engine/daemon/config/config_common_unix.go | 71 + .../daemon/config/config_common_unix_test.go | 84 + engine/daemon/config/config_test.go | 518 + engine/daemon/config/config_unix.go | 87 + engine/daemon/config/config_unix_test.go | 134 + engine/daemon/config/config_windows.go | 57 + engine/daemon/config/config_windows_test.go | 60 + engine/daemon/config/opts.go | 22 + engine/daemon/configs.go | 21 + engine/daemon/configs_linux.go | 5 + engine/daemon/configs_unsupported.go | 7 + engine/daemon/configs_windows.go | 5 + engine/daemon/container.go | 358 + engine/daemon/container_linux.go | 30 + engine/daemon/container_operations.go | 1150 + engine/daemon/container_operations_unix.go | 403 + engine/daemon/container_operations_windows.go | 201 + engine/daemon/container_unix_test.go | 44 + engine/daemon/container_windows.go | 9 + engine/daemon/create.go | 304 + engine/daemon/create_test.go | 21 + engine/daemon/create_unix.go | 94 + engine/daemon/create_windows.go | 93 + engine/daemon/daemon.go | 1320 + engine/daemon/daemon_linux.go | 133 + engine/daemon/daemon_linux_test.go | 322 + engine/daemon/daemon_test.go | 319 + engine/daemon/daemon_unix.go | 1523 + engine/daemon/daemon_unix_test.go | 268 + engine/daemon/daemon_unsupported.go | 5 + engine/daemon/daemon_windows.go | 655 + engine/daemon/daemon_windows_test.go | 72 + engine/daemon/debugtrap_unix.go | 27 + engine/daemon/debugtrap_unsupported.go | 7 + engine/daemon/debugtrap_windows.go | 46 + engine/daemon/delete.go | 152 + engine/daemon/delete_test.go | 95 + engine/daemon/dependency.go | 17 + engine/daemon/discovery/discovery.go | 202 + engine/daemon/discovery/discovery_test.go | 96 + engine/daemon/disk_usage.go | 50 + engine/daemon/errors.go | 155 + engine/daemon/events.go | 308 + engine/daemon/events/events.go | 165 + engine/daemon/events/events_test.go | 282 + engine/daemon/events/filter.go | 138 + engine/daemon/events/metrics.go | 15 + engine/daemon/events/testutils/testutils.go | 76 + engine/daemon/events_test.go | 90 + engine/daemon/exec.go | 324 + engine/daemon/exec/exec.go | 146 + engine/daemon/exec_linux.go | 59 + engine/daemon/exec_linux_test.go | 53 + engine/daemon/exec_windows.go | 16 + engine/daemon/export.go | 86 + engine/daemon/graphdriver/aufs/aufs.go | 678 + engine/daemon/graphdriver/aufs/aufs_test.go | 805 + engine/daemon/graphdriver/aufs/dirs.go | 64 + engine/daemon/graphdriver/aufs/mount.go | 17 + engine/daemon/graphdriver/aufs/mount_linux.go | 7 + .../graphdriver/aufs/mount_unsupported.go | 12 + engine/daemon/graphdriver/btrfs/btrfs.go | 663 + engine/daemon/graphdriver/btrfs/btrfs_test.go | 65 + .../graphdriver/btrfs/dummy_unsupported.go | 3 + engine/daemon/graphdriver/btrfs/version.go | 26 + .../daemon/graphdriver/btrfs/version_none.go | 14 + .../daemon/graphdriver/btrfs/version_test.go | 13 + engine/daemon/graphdriver/copy/copy.go | 277 + engine/daemon/graphdriver/copy/copy_test.go | 159 + engine/daemon/graphdriver/counter.go | 62 + engine/daemon/graphdriver/devmapper/README.md | 98 + .../graphdriver/devmapper/device_setup.go | 231 + .../daemon/graphdriver/devmapper/deviceset.go | 2824 + .../graphdriver/devmapper/devmapper_doc.go | 106 + .../graphdriver/devmapper/devmapper_test.go | 205 + engine/daemon/graphdriver/devmapper/driver.go | 258 + engine/daemon/graphdriver/devmapper/mount.go | 66 + engine/daemon/graphdriver/driver.go | 307 + engine/daemon/graphdriver/driver_freebsd.go | 21 + engine/daemon/graphdriver/driver_linux.go | 124 + engine/daemon/graphdriver/driver_test.go | 36 + .../daemon/graphdriver/driver_unsupported.go | 13 + engine/daemon/graphdriver/driver_windows.go | 12 + engine/daemon/graphdriver/errors.go | 36 + engine/daemon/graphdriver/fsdiff.go | 175 + .../graphdriver/graphtest/graphbench_unix.go | 257 + .../graphdriver/graphtest/graphtest_unix.go | 352 + .../graphtest/graphtest_windows.go | 1 + .../daemon/graphdriver/graphtest/testutil.go | 337 + .../graphdriver/graphtest/testutil_unix.go | 69 + engine/daemon/graphdriver/lcow/lcow.go | 1052 + engine/daemon/graphdriver/lcow/lcow_svm.go | 378 + engine/daemon/graphdriver/lcow/remotefs.go | 139 + .../daemon/graphdriver/lcow/remotefs_file.go | 211 + .../graphdriver/lcow/remotefs_filedriver.go | 123 + .../graphdriver/lcow/remotefs_pathdriver.go | 212 + engine/daemon/graphdriver/overlay/overlay.go | 524 + .../graphdriver/overlay/overlay_test.go | 93 + .../overlay/overlay_unsupported.go | 3 + engine/daemon/graphdriver/overlay2/check.go | 134 + engine/daemon/graphdriver/overlay2/mount.go | 89 + engine/daemon/graphdriver/overlay2/overlay.go | 758 + .../graphdriver/overlay2/overlay_test.go | 109 + .../overlay2/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/randomid.go | 81 + .../graphdriver/overlayutils/overlayutils.go | 25 + engine/daemon/graphdriver/plugin.go | 55 + engine/daemon/graphdriver/proxy.go | 264 + engine/daemon/graphdriver/quota/errors.go | 19 + .../daemon/graphdriver/quota/projectquota.go | 384 + .../graphdriver/quota/projectquota_test.go | 152 + .../graphdriver/register/register_aufs.go | 8 + .../graphdriver/register/register_btrfs.go | 8 + .../register/register_devicemapper.go | 8 + .../graphdriver/register/register_overlay.go | 8 + .../graphdriver/register/register_overlay2.go | 8 + .../graphdriver/register/register_vfs.go | 6 + .../graphdriver/register/register_windows.go | 7 + .../graphdriver/register/register_zfs.go | 8 + engine/daemon/graphdriver/vfs/copy_linux.go | 7 + .../graphdriver/vfs/copy_unsupported.go | 9 + engine/daemon/graphdriver/vfs/driver.go | 167 + engine/daemon/graphdriver/vfs/quota_linux.go | 26 + .../graphdriver/vfs/quota_unsupported.go | 20 + engine/daemon/graphdriver/vfs/vfs_test.go | 41 + engine/daemon/graphdriver/windows/windows.go | 942 + engine/daemon/graphdriver/zfs/MAINTAINERS | 2 + engine/daemon/graphdriver/zfs/zfs.go | 431 + engine/daemon/graphdriver/zfs/zfs_freebsd.go | 38 + engine/daemon/graphdriver/zfs/zfs_linux.go | 28 + engine/daemon/graphdriver/zfs/zfs_test.go | 35 + .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 + engine/daemon/health.go | 381 + engine/daemon/health_test.go | 154 + engine/daemon/images/cache.go | 27 + engine/daemon/images/image.go | 64 + engine/daemon/images/image_builder.go | 225 + engine/daemon/images/image_commit.go | 127 + engine/daemon/images/image_delete.go | 414 + engine/daemon/images/image_events.go | 39 + engine/daemon/images/image_exporter.go | 25 + engine/daemon/images/image_history.go | 87 + engine/daemon/images/image_import.go | 138 + engine/daemon/images/image_inspect.go | 104 + engine/daemon/images/image_prune.go | 211 + engine/daemon/images/image_pull.go | 126 + engine/daemon/images/image_push.go | 66 + engine/daemon/images/image_search.go | 95 + engine/daemon/images/image_search_test.go | 357 + engine/daemon/images/image_tag.go | 41 + engine/daemon/images/image_unix.go | 45 + engine/daemon/images/image_windows.go | 41 + engine/daemon/images/images.go | 348 + engine/daemon/images/locals.go | 32 + engine/daemon/images/service.go | 251 + engine/daemon/info.go | 206 + engine/daemon/info_unix.go | 93 + engine/daemon/info_unix_test.go | 53 + engine/daemon/info_windows.go | 10 + engine/daemon/initlayer/setup_unix.go | 73 + engine/daemon/initlayer/setup_windows.go | 16 + engine/daemon/inspect.go | 273 + engine/daemon/inspect_linux.go | 73 + engine/daemon/inspect_test.go | 33 + engine/daemon/inspect_windows.go | 26 + engine/daemon/keys.go | 59 + engine/daemon/keys_unsupported.go | 8 + engine/daemon/kill.go | 180 + engine/daemon/links.go | 91 + engine/daemon/links/links.go | 141 + engine/daemon/links/links_test.go | 213 + engine/daemon/list.go | 607 + engine/daemon/list_test.go | 26 + engine/daemon/list_unix.go | 11 + engine/daemon/list_windows.go | 20 + engine/daemon/listeners/group_unix.go | 34 + engine/daemon/listeners/listeners_linux.go | 102 + engine/daemon/listeners/listeners_windows.go | 54 + engine/daemon/logdrivers_linux.go | 15 + engine/daemon/logdrivers_windows.go | 14 + engine/daemon/logger/adapter.go | 139 + engine/daemon/logger/adapter_test.go | 216 + .../daemon/logger/awslogs/cloudwatchlogs.go | 744 + .../logger/awslogs/cloudwatchlogs_test.go | 1391 + .../logger/awslogs/cwlogsiface_mock_test.go | 119 + engine/daemon/logger/copier.go | 186 + engine/daemon/logger/copier_test.go | 484 + .../daemon/logger/etwlogs/etwlogs_windows.go | 168 + engine/daemon/logger/factory.go | 162 + engine/daemon/logger/fluentd/fluentd.go | 263 + engine/daemon/logger/gcplogs/gcplogging.go | 244 + .../daemon/logger/gcplogs/gcplogging_linux.go | 29 + .../logger/gcplogs/gcplogging_others.go | 7 + engine/daemon/logger/gelf/gelf.go | 268 + engine/daemon/logger/gelf/gelf_test.go | 260 + engine/daemon/logger/journald/journald.go | 127 + .../daemon/logger/journald/journald_test.go | 23 + .../logger/journald/journald_unsupported.go | 6 + engine/daemon/logger/journald/read.go | 441 + engine/daemon/logger/journald/read_native.go | 6 + .../logger/journald/read_native_compat.go | 6 + .../logger/journald/read_unsupported.go | 7 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 185 + .../logger/jsonfilelog/jsonfilelog_test.go | 302 + .../logger/jsonfilelog/jsonlog/jsonlog.go | 25 + .../jsonfilelog/jsonlog/jsonlogbytes.go | 125 + .../jsonfilelog/jsonlog/jsonlogbytes_test.go | 51 + .../jsonfilelog/jsonlog/time_marshalling.go | 20 + .../jsonlog/time_marshalling_test.go | 34 + engine/daemon/logger/jsonfilelog/read.go | 89 + engine/daemon/logger/jsonfilelog/read_test.go | 64 + engine/daemon/logger/logentries/logentries.go | 115 + engine/daemon/logger/logger.go | 145 + engine/daemon/logger/logger_test.go | 21 + engine/daemon/logger/loggerutils/log_tag.go | 31 + .../daemon/logger/loggerutils/log_tag_test.go | 47 + engine/daemon/logger/loggerutils/logfile.go | 680 + .../loggerutils/multireader/multireader.go | 212 + .../multireader/multireader_test.go | 225 + engine/daemon/logger/loginfo.go | 129 + engine/daemon/logger/metrics.go | 21 + engine/daemon/logger/plugin.go | 116 + engine/daemon/logger/plugin_unix.go | 23 + engine/daemon/logger/plugin_unsupported.go | 12 + engine/daemon/logger/proxy.go | 107 + engine/daemon/logger/ring.go | 223 + engine/daemon/logger/ring_test.go | 299 + engine/daemon/logger/splunk/splunk.go | 649 + engine/daemon/logger/splunk/splunk_test.go | 1389 + .../logger/splunk/splunkhecmock_test.go | 182 + engine/daemon/logger/syslog/syslog.go | 266 + engine/daemon/logger/syslog/syslog_test.go | 62 + engine/daemon/logger/templates/templates.go | 50 + .../daemon/logger/templates/templates_test.go | 19 + engine/daemon/logs.go | 209 + engine/daemon/logs_test.go | 15 + engine/daemon/metrics.go | 192 + engine/daemon/metrics_unix.go | 60 + engine/daemon/metrics_unsupported.go | 12 + engine/daemon/monitor.go | 212 + engine/daemon/mounts.go | 55 + engine/daemon/names.go | 113 + engine/daemon/names/names.go | 9 + engine/daemon/network.go | 884 + engine/daemon/network/settings.go | 69 + engine/daemon/oci.go | 78 + engine/daemon/oci_linux.go | 881 + engine/daemon/oci_linux_test.go | 102 + engine/daemon/oci_windows.go | 419 + engine/daemon/pause.go | 55 + engine/daemon/prune.go | 250 + engine/daemon/reload.go | 324 + engine/daemon/reload_test.go | 573 + engine/daemon/reload_unix.go | 56 + engine/daemon/reload_windows.go | 9 + engine/daemon/rename.go | 123 + engine/daemon/resize.go | 50 + engine/daemon/resize_test.go | 103 + engine/daemon/restart.go | 70 + engine/daemon/seccomp_disabled.go | 19 + engine/daemon/seccomp_linux.go | 55 + engine/daemon/seccomp_unsupported.go | 5 + engine/daemon/secrets.go | 23 + engine/daemon/secrets_linux.go | 5 + engine/daemon/secrets_unsupported.go | 7 + engine/daemon/secrets_windows.go | 5 + engine/daemon/selinux_linux.go | 15 + engine/daemon/selinux_unsupported.go | 13 + engine/daemon/start.go | 254 + engine/daemon/start_unix.go | 57 + engine/daemon/start_windows.go | 38 + engine/daemon/stats.go | 155 + engine/daemon/stats/collector.go | 159 + engine/daemon/stats/collector_unix.go | 83 + engine/daemon/stats/collector_windows.go | 17 + engine/daemon/stats_collector.go | 26 + engine/daemon/stats_unix.go | 57 + engine/daemon/stats_windows.go | 11 + engine/daemon/stop.go | 89 + engine/daemon/testdata/keyfile | 7 + engine/daemon/top_unix.go | 189 + engine/daemon/top_unix_test.go | 79 + engine/daemon/top_windows.go | 63 + engine/daemon/trustkey.go | 57 + engine/daemon/trustkey_test.go | 71 + engine/daemon/unpause.go | 44 + engine/daemon/update.go | 95 + engine/daemon/update_linux.go | 54 + engine/daemon/update_windows.go | 11 + engine/daemon/util_test.go | 65 + engine/daemon/volumes.go | 421 + engine/daemon/volumes_linux.go | 36 + engine/daemon/volumes_linux_test.go | 56 + engine/daemon/volumes_unit_test.go | 42 + engine/daemon/volumes_unix.go | 156 + engine/daemon/volumes_unix_test.go | 256 + engine/daemon/volumes_windows.go | 51 + engine/daemon/wait.go | 23 + engine/daemon/workdir.go | 20 + engine/distribution/config.go | 266 + engine/distribution/errors.go | 206 + engine/distribution/errors_test.go | 85 + .../fixtures/validate_manifest/bad_manifest | 38 + .../validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + engine/distribution/metadata/metadata.go | 75 + engine/distribution/metadata/v1_id_service.go | 51 + .../metadata/v1_id_service_test.go | 88 + .../metadata/v2_metadata_service.go | 241 + .../metadata/v2_metadata_service_test.go | 115 + engine/distribution/oci.go | 45 + engine/distribution/pull.go | 201 + engine/distribution/pull_v1.go | 368 + engine/distribution/pull_v2.go | 976 + engine/distribution/pull_v2_test.go | 184 + engine/distribution/pull_v2_unix.go | 60 + engine/distribution/pull_v2_windows.go | 138 + engine/distribution/push.go | 186 + engine/distribution/push_v1.go | 457 + engine/distribution/push_v2.go | 709 + engine/distribution/push_v2_test.go | 740 + engine/distribution/registry.go | 158 + engine/distribution/registry_unit_test.go | 127 + engine/distribution/utils/progress.go | 44 + engine/distribution/xfer/download.go | 474 + engine/distribution/xfer/download_test.go | 362 + engine/distribution/xfer/transfer.go | 401 + engine/distribution/xfer/transfer_test.go | 410 + engine/distribution/xfer/upload.go | 174 + engine/distribution/xfer/upload_test.go | 134 + engine/dockerversion/useragent.go | 76 + engine/dockerversion/version_lib.go | 18 + engine/docs/api/v1.18.md | 2179 + engine/docs/api/v1.19.md | 2259 + engine/docs/api/v1.20.md | 2414 + engine/docs/api/v1.21.md | 3003 + engine/docs/api/v1.22.md | 3343 ++ engine/docs/api/v1.23.md | 3459 ++ engine/docs/api/v1.24.md | 5377 ++ engine/docs/api/version-history.md | 424 + engine/docs/contributing/README.md | 8 + .../docs/contributing/images/branch-sig.png | Bin 0 -> 56537 bytes .../contributing/images/contributor-edit.png | Bin 0 -> 17933 bytes engine/docs/contributing/images/copy_url.png | Bin 0 -> 69486 bytes .../docs/contributing/images/fork_docker.png | Bin 0 -> 52190 bytes engine/docs/contributing/images/git_bash.png | Bin 0 -> 26097 bytes .../docs/contributing/images/list_example.png | Bin 0 -> 51194 bytes engine/docs/contributing/set-up-dev-env.md | 372 + engine/docs/contributing/set-up-git.md | 280 + engine/docs/contributing/software-req-win.md | 177 + engine/docs/contributing/software-required.md | 94 + engine/docs/contributing/test.md | 244 + engine/docs/contributing/who-written-for.md | 49 + engine/docs/static_files/contributors.png | Bin 0 -> 23100 bytes .../docs/static_files/moby-project-logo.png | Bin 0 -> 20458 bytes engine/errdefs/defs.go | 74 + engine/errdefs/doc.go | 8 + engine/errdefs/helpers.go | 240 + engine/errdefs/helpers_test.go | 194 + engine/errdefs/is.go | 114 + engine/hack/README.md | 55 + engine/hack/ci/arm | 10 + engine/hack/ci/experimental | 9 + engine/hack/ci/janky | 17 + engine/hack/ci/powerpc | 6 + engine/hack/ci/z | 6 + engine/hack/dind | 33 + .../dockerfile/install/containerd.installer | 36 + .../dockerfile/install/dockercli.installer | 31 + .../dockerfile/install/gometalinter.installer | 12 + engine/hack/dockerfile/install/install.sh | 30 + .../hack/dockerfile/install/proxy.installer | 38 + engine/hack/dockerfile/install/runc.installer | 22 + engine/hack/dockerfile/install/tini.installer | 14 + .../hack/dockerfile/install/tomlv.installer | 12 + engine/hack/dockerfile/install/vndr.installer | 11 + engine/hack/generate-authors.sh | 15 + engine/hack/generate-swagger-api.sh | 27 + .../hack/integration-cli-on-swarm/README.md | 69 + .../integration-cli-on-swarm/agent/Dockerfile | 6 + .../agent/master/call.go | 132 + .../agent/master/master.go | 65 + .../agent/master/set.go | 28 + .../agent/master/set_test.go | 63 + .../agent/types/types.go | 18 + .../agent/vendor.conf | 2 + .../github.com/bfirsh/funker-go/LICENSE | 191 + .../github.com/bfirsh/funker-go/call.go | 50 + .../github.com/bfirsh/funker-go/handle.go | 54 + .../agent/worker/executor.go | 118 + .../agent/worker/worker.go | 69 + .../integration-cli-on-swarm/host/compose.go | 122 + .../host/dockercmd.go | 64 + .../host/enumerate.go | 55 + .../host/enumerate_test.go | 84 + .../integration-cli-on-swarm/host/host.go | 198 + .../integration-cli-on-swarm/host/volume.go | 88 + engine/hack/make.ps1 | 457 + engine/hack/make.sh | 224 + engine/hack/make/.binary | 73 + engine/hack/make/.binary-setup | 9 + engine/hack/make/.detect-daemon-osarch | 43 + engine/hack/make/.ensure-emptyfs | 23 + engine/hack/make/.go-autogen | 90 + engine/hack/make/.go-autogen.ps1 | 95 + engine/hack/make/.integration-daemon-setup | 7 + engine/hack/make/.integration-daemon-start | 126 + engine/hack/make/.integration-daemon-stop | 28 + engine/hack/make/.integration-test-helpers | 122 + engine/hack/make/.resources-windows/common.rc | 38 + .../.resources-windows/docker.exe.manifest | 18 + .../hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes .../hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes engine/hack/make/.resources-windows/docker.rc | 3 + .../hack/make/.resources-windows/dockerd.rc | 4 + .../make/.resources-windows/event_messages.mc | 39 + .../hack/make/.resources-windows/resources.go | 18 + engine/hack/make/README.md | 16 + engine/hack/make/binary | 10 + engine/hack/make/binary-daemon | 27 + .../hack/make/build-integration-test-binary | 7 + engine/hack/make/cross | 29 + engine/hack/make/dynbinary | 10 + engine/hack/make/dynbinary-daemon | 10 + engine/hack/make/install-binary | 29 + engine/hack/make/run | 44 + engine/hack/make/test-docker-py | 20 + engine/hack/make/test-integration | 21 + engine/hack/make/test-integration-cli | 6 + engine/hack/make/test-integration-shell | 9 + engine/hack/test/e2e-run.sh | 72 + engine/hack/test/unit | 38 + engine/hack/validate/.swagger-yamllint | 4 + engine/hack/validate/.validate | 30 + engine/hack/validate/all | 8 + .../hack/validate/changelog-date-descending | 12 + engine/hack/validate/changelog-well-formed | 25 + engine/hack/validate/dco | 55 + engine/hack/validate/default | 17 + engine/hack/validate/default-seccomp | 28 + .../hack/validate/deprecate-integration-cli | 25 + engine/hack/validate/gometalinter | 13 + engine/hack/validate/gometalinter.json | 27 + engine/hack/validate/pkg-imports | 33 + engine/hack/validate/swagger | 13 + engine/hack/validate/swagger-gen | 29 + engine/hack/validate/test-imports | 38 + engine/hack/validate/toml | 31 + engine/hack/validate/vendor | 55 + engine/hack/vendor.sh | 15 + engine/image/cache/cache.go | 253 + engine/image/cache/compare.go | 63 + engine/image/cache/compare_test.go | 126 + engine/image/fs.go | 175 + engine/image/fs_test.go | 270 + engine/image/image.go | 232 + engine/image/image_test.go | 125 + engine/image/rootfs.go | 52 + engine/image/spec/README.md | 46 + engine/image/spec/v1.1.md | 623 + engine/image/spec/v1.2.md | 677 + engine/image/spec/v1.md | 562 + engine/image/store.go | 346 + engine/image/store_test.go | 197 + engine/image/tarexport/load.go | 432 + engine/image/tarexport/save.go | 431 + engine/image/tarexport/tarexport.go | 47 + engine/image/v1/imagev1.go | 150 + engine/image/v1/imagev1_test.go | 55 + engine/integration-cli/benchmark_test.go | 95 + engine/integration-cli/check_test.go | 409 + engine/integration-cli/checker/checker.go | 46 + engine/integration-cli/cli/build/build.go | 82 + engine/integration-cli/cli/cli.go | 226 + engine/integration-cli/daemon/daemon.go | 143 + engine/integration-cli/daemon/daemon_swarm.go | 197 + .../integration-cli/daemon_swarm_hack_test.go | 23 + .../integration-cli/docker_api_attach_test.go | 260 + .../integration-cli/docker_api_build_test.go | 558 + .../docker_api_build_windows_test.go | 39 + .../docker_api_containers_test.go | 2207 + .../docker_api_containers_windows_test.go | 76 + .../integration-cli/docker_api_create_test.go | 136 + .../docker_api_exec_resize_test.go | 113 + .../integration-cli/docker_api_exec_test.go | 313 + .../integration-cli/docker_api_images_test.go | 187 + .../docker_api_inspect_test.go | 181 + .../docker_api_ipcmode_test.go | 213 + .../integration-cli/docker_api_logs_test.go | 216 + .../docker_api_network_test.go | 376 + .../integration-cli/docker_api_stats_test.go | 314 + .../docker_api_swarm_node_test.go | 127 + .../docker_api_swarm_service_test.go | 612 + .../integration-cli/docker_api_swarm_test.go | 1034 + engine/integration-cli/docker_api_test.go | 110 + .../integration-cli/docker_cli_attach_test.go | 179 + .../docker_cli_attach_unix_test.go | 229 + .../integration-cli/docker_cli_build_test.go | 6209 ++ .../docker_cli_build_unix_test.go | 228 + .../docker_cli_by_digest_test.go | 693 + .../integration-cli/docker_cli_commit_test.go | 168 + .../docker_cli_config_create_test.go | 131 + .../docker_cli_cp_from_container_test.go | 399 + engine/integration-cli/docker_cli_cp_test.go | 664 + .../docker_cli_cp_to_container_test.go | 495 + .../docker_cli_cp_to_container_unix_test.go | 81 + .../docker_cli_cp_utils_test.go | 305 + .../integration-cli/docker_cli_create_test.go | 374 + .../docker_cli_daemon_plugins_test.go | 328 + .../integration-cli/docker_cli_daemon_test.go | 3131 + .../integration-cli/docker_cli_events_test.go | 769 + .../docker_cli_events_unix_test.go | 511 + .../integration-cli/docker_cli_exec_test.go | 607 + .../docker_cli_exec_unix_test.go | 97 + .../docker_cli_export_import_test.go | 34 + ...er_cli_external_volume_driver_unix_test.go | 631 + .../integration-cli/docker_cli_health_test.go | 167 + .../docker_cli_history_test.go | 119 + .../integration-cli/docker_cli_images_test.go | 366 + .../integration-cli/docker_cli_import_test.go | 142 + .../integration-cli/docker_cli_info_test.go | 238 + .../docker_cli_info_unix_test.go | 15 + .../docker_cli_inspect_test.go | 460 + .../integration-cli/docker_cli_links_test.go | 239 + .../integration-cli/docker_cli_login_test.go | 30 + .../integration-cli/docker_cli_logout_test.go | 106 + .../docker_cli_logs_bench_test.go | 32 + .../integration-cli/docker_cli_logs_test.go | 336 + .../docker_cli_netmode_test.go | 96 + .../docker_cli_network_unix_test.go | 1835 + .../docker_cli_plugins_logdriver_test.go | 48 + .../docker_cli_plugins_test.go | 493 + .../integration-cli/docker_cli_port_test.go | 351 + .../integration-cli/docker_cli_proxy_test.go | 51 + .../docker_cli_prune_unix_test.go | 309 + engine/integration-cli/docker_cli_ps_test.go | 874 + .../docker_cli_pull_local_test.go | 470 + .../integration-cli/docker_cli_pull_test.go | 274 + .../integration-cli/docker_cli_push_test.go | 382 + .../docker_cli_registry_user_agent_test.go | 103 + .../docker_cli_restart_test.go | 309 + engine/integration-cli/docker_cli_rmi_test.go | 338 + engine/integration-cli/docker_cli_run_test.go | 4540 ++ .../docker_cli_run_unix_test.go | 1585 + .../docker_cli_save_load_test.go | 405 + .../docker_cli_save_load_unix_test.go | 107 + .../integration-cli/docker_cli_search_test.go | 131 + .../docker_cli_secret_create_test.go | 92 + .../docker_cli_service_create_test.go | 447 + .../docker_cli_service_health_test.go | 136 + .../docker_cli_service_logs_test.go | 388 + .../docker_cli_service_scale_test.go | 57 + .../docker_cli_service_update_test.go | 137 + engine/integration-cli/docker_cli_sni_test.go | 44 + .../integration-cli/docker_cli_start_test.go | 199 + .../integration-cli/docker_cli_stats_test.go | 180 + .../integration-cli/docker_cli_swarm_test.go | 2062 + .../docker_cli_swarm_unix_test.go | 104 + engine/integration-cli/docker_cli_top_test.go | 73 + .../docker_cli_update_unix_test.go | 339 + .../integration-cli/docker_cli_userns_test.go | 98 + .../docker_cli_v2_only_test.go | 58 + .../integration-cli/docker_cli_volume_test.go | 639 + .../integration-cli/docker_cli_wait_test.go | 98 + .../docker_deprecated_api_v124_test.go | 250 + .../docker_deprecated_api_v124_unix_test.go | 31 + .../docker_hub_pull_suite_test.go | 90 + engine/integration-cli/docker_utils_test.go | 466 + .../environment/environment.go | 49 + engine/integration-cli/events_utils_test.go | 206 + .../auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + engine/integration-cli/fixtures/https/ca.pem | 1 + .../fixtures/https/client-cert.pem | 1 + .../fixtures/https/client-key.pem | 1 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + .../fixtures/https/server-cert.pem | 1 + .../fixtures/https/server-key.pem | 1 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + .../fixtures/registry/cert.pem | 21 + .../fixtures_linux_daemon_test.go | 139 + .../requirement/requirement.go | 34 + engine/integration-cli/requirements_test.go | 219 + .../integration-cli/requirements_unix_test.go | 117 + engine/integration-cli/test_vars_exec_test.go | 8 + .../integration-cli/test_vars_noexec_test.go | 8 + .../test_vars_noseccomp_test.go | 8 + .../integration-cli/test_vars_seccomp_test.go | 8 + engine/integration-cli/test_vars_test.go | 11 + engine/integration-cli/test_vars_unix_test.go | 14 + .../integration-cli/test_vars_windows_test.go | 15 + .../integration-cli/testdata/emptyLayer.tar | Bin 0 -> 30720 bytes engine/integration-cli/utils_test.go | 183 + .../integration/build/build_session_test.go | 129 + engine/integration/build/build_squash_test.go | 103 + engine/integration/build/build_test.go | 460 + engine/integration/build/main_test.go | 33 + engine/integration/config/config_test.go | 356 + engine/integration/config/main_test.go | 33 + engine/integration/container/copy_test.go | 65 + engine/integration/container/create_test.go | 303 + .../container/daemon_linux_test.go | 78 + engine/integration/container/diff_test.go | 42 + engine/integration/container/exec_test.go | 50 + engine/integration/container/export_test.go | 78 + engine/integration/container/health_test.go | 47 + engine/integration/container/inspect_test.go | 48 + engine/integration/container/kill_test.go | 183 + .../integration/container/links_linux_test.go | 57 + engine/integration/container/logs_test.go | 35 + engine/integration/container/main_test.go | 33 + .../container/mounts_linux_test.go | 208 + engine/integration/container/nat_test.go | 120 + engine/integration/container/pause_test.go | 98 + engine/integration/container/ps_test.go | 49 + engine/integration/container/remove_test.go | 112 + engine/integration/container/rename_test.go | 213 + engine/integration/container/resize_test.go | 66 + engine/integration/container/restart_test.go | 114 + engine/integration/container/stats_test.go | 43 + engine/integration/container/stop_test.go | 127 + .../container/update_linux_test.go | 107 + engine/integration/container/update_test.go | 64 + engine/integration/doc.go | 3 + engine/integration/image/commit_test.go | 48 + engine/integration/image/import_test.go | 42 + engine/integration/image/main_test.go | 33 + engine/integration/image/remove_test.go | 59 + engine/integration/image/tag_test.go | 140 + .../internal/container/container.go | 54 + engine/integration/internal/container/exec.go | 86 + engine/integration/internal/container/ops.go | 136 + .../integration/internal/container/states.go | 41 + .../integration/internal/network/network.go | 35 + engine/integration/internal/network/ops.go | 94 + .../internal/requirement/requirement.go | 53 + engine/integration/internal/swarm/service.go | 200 + engine/integration/network/delete_test.go | 91 + engine/integration/network/helpers.go | 85 + engine/integration/network/inspect_test.go | 177 + .../integration/network/ipvlan/ipvlan_test.go | 432 + .../integration/network/ipvlan/main_test.go | 33 + .../network/macvlan/macvlan_test.go | 281 + .../integration/network/macvlan/main_test.go | 33 + engine/integration/network/main_test.go | 33 + engine/integration/network/service_test.go | 315 + .../plugin/authz/authz_plugin_test.go | 521 + .../plugin/authz/authz_plugin_v2_test.go | 175 + engine/integration/plugin/authz/main_test.go | 180 + .../plugin/graphdriver/external_test.go | 462 + .../plugin/graphdriver/main_test.go | 36 + .../plugin/logging/cmd/close_on_start/main.go | 48 + .../logging/cmd/close_on_start/main_test.go | 1 + .../plugin/logging/cmd/cmd_test.go | 1 + .../plugin/logging/cmd/dummy/main.go | 19 + .../plugin/logging/cmd/dummy/main_test.go | 1 + .../plugin/logging/helpers_test.go | 67 + .../plugin/logging/logging_test.go | 79 + .../integration/plugin/logging/main_test.go | 29 + .../plugin/logging/validation_test.go | 35 + engine/integration/plugin/pkg_test.go | 1 + .../plugin/volumes/cmd/cmd_test.go | 1 + .../plugin/volumes/cmd/dummy/main.go | 19 + .../plugin/volumes/cmd/dummy/main_test.go | 1 + .../plugin/volumes/helpers_test.go | 73 + .../integration/plugin/volumes/main_test.go | 32 + .../integration/plugin/volumes/mounts_test.go | 58 + engine/integration/secret/main_test.go | 33 + engine/integration/secret/secret_test.go | 366 + engine/integration/service/create_test.go | 374 + engine/integration/service/inspect_test.go | 153 + engine/integration/service/main_test.go | 33 + engine/integration/service/network_test.go | 75 + engine/integration/service/plugin_test.go | 121 + engine/integration/session/main_test.go | 33 + engine/integration/session/session_test.go | 48 + .../system/cgroupdriver_systemd_test.go | 56 + engine/integration/system/event_test.go | 122 + engine/integration/system/info_linux_test.go | 48 + engine/integration/system/info_test.go | 42 + engine/integration/system/login_test.go | 28 + engine/integration/system/main_test.go | 33 + engine/integration/system/version_test.go | 23 + engine/integration/testdata/https/ca.pem | 23 + .../testdata/https/client-cert.pem | 73 + .../integration/testdata/https/client-key.pem | 16 + .../testdata/https/server-cert.pem | 76 + .../integration/testdata/https/server-key.pem | 16 + engine/integration/volume/main_test.go | 33 + engine/integration/volume/volume_test.go | 116 + engine/internal/test/daemon/config.go | 82 + engine/internal/test/daemon/container.go | 40 + engine/internal/test/daemon/daemon.go | 681 + engine/internal/test/daemon/daemon_unix.go | 39 + engine/internal/test/daemon/daemon_windows.go | 25 + engine/internal/test/daemon/node.go | 82 + engine/internal/test/daemon/ops.go | 44 + engine/internal/test/daemon/plugin.go | 77 + engine/internal/test/daemon/secret.go | 84 + engine/internal/test/daemon/service.go | 131 + engine/internal/test/daemon/swarm.go | 194 + engine/internal/test/environment/clean.go | 217 + .../internal/test/environment/environment.go | 158 + engine/internal/test/environment/protect.go | 254 + engine/internal/test/fakecontext/context.go | 131 + engine/internal/test/fakegit/fakegit.go | 136 + engine/internal/test/fakestorage/fixtures.go | 92 + engine/internal/test/fakestorage/storage.go | 200 + engine/internal/test/fixtures/load/frozen.go | 196 + .../test/fixtures/plugin/basic/basic.go | 34 + .../internal/test/fixtures/plugin/plugin.go | 216 + engine/internal/test/helper.go | 6 + engine/internal/test/registry/ops.go | 26 + engine/internal/test/registry/registry.go | 255 + .../internal/test/registry/registry_mock.go | 71 + engine/internal/test/request/npipe.go | 12 + engine/internal/test/request/npipe_windows.go | 12 + engine/internal/test/request/ops.go | 78 + engine/internal/test/request/request.go | 218 + engine/internal/testutil/helpers.go | 17 + engine/internal/testutil/stringutils.go | 14 + engine/internal/testutil/stringutils_test.go | 34 + engine/layer/empty.go | 61 + engine/layer/empty_test.go | 52 + engine/layer/filestore.go | 355 + engine/layer/filestore_test.go | 104 + engine/layer/filestore_unix.go | 15 + engine/layer/filestore_windows.go | 35 + engine/layer/layer.go | 237 + engine/layer/layer_store.go | 754 + engine/layer/layer_store_windows.go | 11 + engine/layer/layer_test.go | 768 + engine/layer/layer_unix.go | 9 + engine/layer/layer_unix_test.go | 73 + engine/layer/layer_windows.go | 46 + engine/layer/migration.go | 252 + engine/layer/migration_test.go | 429 + engine/layer/mount_test.go | 239 + engine/layer/mounted_layer.go | 100 + engine/layer/ro_layer.go | 182 + engine/layer/ro_layer_windows.go | 9 + engine/libcontainerd/client_daemon.go | 894 + engine/libcontainerd/client_daemon_linux.go | 108 + engine/libcontainerd/client_daemon_windows.go | 55 + engine/libcontainerd/client_local_windows.go | 1328 + engine/libcontainerd/errors.go | 13 + engine/libcontainerd/process_windows.go | 44 + engine/libcontainerd/queue.go | 35 + engine/libcontainerd/queue_test.go | 31 + engine/libcontainerd/remote_daemon.go | 344 + engine/libcontainerd/remote_daemon_linux.go | 65 + engine/libcontainerd/remote_daemon_options.go | 141 + .../remote_daemon_options_linux.go | 18 + engine/libcontainerd/remote_daemon_windows.go | 50 + engine/libcontainerd/remote_local.go | 59 + engine/libcontainerd/types.go | 108 + engine/libcontainerd/types_linux.go | 30 + engine/libcontainerd/types_windows.go | 42 + engine/libcontainerd/utils_linux.go | 12 + engine/libcontainerd/utils_windows.go | 46 + engine/libcontainerd/utils_windows_test.go | 13 + engine/migrate/v1/migratev1.go | 501 + engine/migrate/v1/migratev1_test.go | 437 + engine/oci/defaults.go | 212 + engine/oci/devices_linux.go | 86 + engine/oci/devices_unsupported.go | 20 + engine/oci/namespaces.go | 13 + engine/opts/address_pools.go | 84 + engine/opts/address_pools_test.go | 20 + engine/opts/env.go | 48 + engine/opts/env_test.go | 124 + engine/opts/hosts.go | 165 + engine/opts/hosts_test.go | 181 + engine/opts/hosts_unix.go | 8 + engine/opts/hosts_windows.go | 4 + engine/opts/ip.go | 47 + engine/opts/ip_test.go | 54 + engine/opts/opts.go | 337 + engine/opts/opts_test.go | 264 + engine/opts/opts_unix.go | 6 + engine/opts/opts_windows.go | 56 + engine/opts/quotedstring.go | 37 + engine/opts/quotedstring_test.go | 30 + engine/opts/runtime.go | 79 + engine/opts/ulimit.go | 81 + engine/opts/ulimit_test.go | 42 + engine/pkg/README.md | 11 + engine/pkg/aaparser/aaparser.go | 89 + engine/pkg/aaparser/aaparser_test.go | 73 + engine/pkg/archive/README.md | 1 + engine/pkg/archive/archive.go | 1291 + engine/pkg/archive/archive_linux.go | 92 + engine/pkg/archive/archive_linux_test.go | 162 + engine/pkg/archive/archive_other.go | 7 + engine/pkg/archive/archive_test.go | 1364 + engine/pkg/archive/archive_unix.go | 114 + engine/pkg/archive/archive_unix_test.go | 318 + engine/pkg/archive/archive_windows.go | 77 + engine/pkg/archive/archive_windows_test.go | 93 + engine/pkg/archive/changes.go | 441 + engine/pkg/archive/changes_linux.go | 286 + engine/pkg/archive/changes_other.go | 97 + engine/pkg/archive/changes_posix_test.go | 127 + engine/pkg/archive/changes_test.go | 504 + engine/pkg/archive/changes_unix.go | 37 + engine/pkg/archive/changes_windows.go | 30 + engine/pkg/archive/copy.go | 472 + engine/pkg/archive/copy_unix.go | 11 + engine/pkg/archive/copy_unix_test.go | 958 + engine/pkg/archive/copy_windows.go | 9 + engine/pkg/archive/diff.go | 258 + engine/pkg/archive/diff_test.go | 386 + engine/pkg/archive/example_changes.go | 97 + engine/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes engine/pkg/archive/time_linux.go | 16 + engine/pkg/archive/time_unsupported.go | 16 + engine/pkg/archive/utils_test.go | 166 + engine/pkg/archive/whiteouts.go | 23 + engine/pkg/archive/wrap.go | 59 + engine/pkg/archive/wrap_test.go | 92 + engine/pkg/authorization/api.go | 88 + engine/pkg/authorization/api_test.go | 76 + engine/pkg/authorization/authz.go | 189 + engine/pkg/authorization/authz_unix_test.go | 342 + engine/pkg/authorization/middleware.go | 110 + engine/pkg/authorization/middleware_test.go | 53 + .../pkg/authorization/middleware_unix_test.go | 66 + engine/pkg/authorization/plugin.go | 118 + engine/pkg/authorization/response.go | 210 + engine/pkg/broadcaster/unbuffered.go | 49 + engine/pkg/broadcaster/unbuffered_test.go | 161 + engine/pkg/chrootarchive/archive.go | 73 + engine/pkg/chrootarchive/archive_test.go | 413 + engine/pkg/chrootarchive/archive_unix.go | 88 + engine/pkg/chrootarchive/archive_windows.go | 22 + engine/pkg/chrootarchive/chroot_linux.go | 113 + engine/pkg/chrootarchive/chroot_unix.go | 12 + engine/pkg/chrootarchive/diff.go | 23 + engine/pkg/chrootarchive/diff_unix.go | 130 + engine/pkg/chrootarchive/diff_windows.go | 45 + engine/pkg/chrootarchive/init_unix.go | 28 + engine/pkg/chrootarchive/init_windows.go | 4 + engine/pkg/containerfs/archiver.go | 203 + engine/pkg/containerfs/containerfs.go | 87 + engine/pkg/containerfs/containerfs_unix.go | 10 + engine/pkg/containerfs/containerfs_windows.go | 15 + engine/pkg/devicemapper/devmapper.go | 826 + engine/pkg/devicemapper/devmapper_log.go | 124 + engine/pkg/devicemapper/devmapper_wrapper.go | 252 + .../devicemapper/devmapper_wrapper_dynamic.go | 6 + ...vmapper_wrapper_dynamic_deferred_remove.go | 35 + ...r_wrapper_dynamic_dlsym_deferred_remove.go | 128 + .../devmapper_wrapper_no_deferred_remove.go | 17 + engine/pkg/devicemapper/ioctl.go | 28 + engine/pkg/devicemapper/log.go | 11 + engine/pkg/directory/directory.go | 26 + engine/pkg/directory/directory_test.go | 193 + engine/pkg/directory/directory_unix.go | 54 + engine/pkg/directory/directory_windows.go | 42 + engine/pkg/discovery/README.md | 41 + engine/pkg/discovery/backends.go | 107 + engine/pkg/discovery/discovery.go | 35 + engine/pkg/discovery/discovery_test.go | 137 + engine/pkg/discovery/entry.go | 94 + engine/pkg/discovery/file/file.go | 107 + engine/pkg/discovery/file/file_test.go | 114 + engine/pkg/discovery/generator.go | 35 + engine/pkg/discovery/generator_test.go | 53 + engine/pkg/discovery/kv/kv.go | 192 + engine/pkg/discovery/kv/kv_test.go | 322 + engine/pkg/discovery/memory/memory.go | 93 + engine/pkg/discovery/memory/memory_test.go | 48 + engine/pkg/discovery/nodes/nodes.go | 54 + engine/pkg/discovery/nodes/nodes_test.go | 51 + engine/pkg/dmesg/dmesg_linux.go | 18 + engine/pkg/dmesg/dmesg_linux_test.go | 9 + engine/pkg/filenotify/filenotify.go | 40 + engine/pkg/filenotify/fsnotify.go | 18 + engine/pkg/filenotify/poller.go | 204 + engine/pkg/filenotify/poller_test.go | 119 + engine/pkg/fileutils/fileutils.go | 298 + engine/pkg/fileutils/fileutils_darwin.go | 27 + engine/pkg/fileutils/fileutils_test.go | 591 + engine/pkg/fileutils/fileutils_unix.go | 22 + engine/pkg/fileutils/fileutils_windows.go | 7 + engine/pkg/fsutils/fsutils_linux.go | 86 + engine/pkg/fsutils/fsutils_linux_test.go | 92 + engine/pkg/homedir/homedir_linux.go | 21 + engine/pkg/homedir/homedir_others.go | 13 + engine/pkg/homedir/homedir_test.go | 24 + engine/pkg/homedir/homedir_unix.go | 34 + engine/pkg/homedir/homedir_windows.go | 24 + engine/pkg/idtools/idtools.go | 266 + engine/pkg/idtools/idtools_unix.go | 230 + engine/pkg/idtools/idtools_unix_test.go | 397 + engine/pkg/idtools/idtools_windows.go | 23 + engine/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + engine/pkg/idtools/utils_unix.go | 32 + engine/pkg/ioutils/buffer.go | 51 + engine/pkg/ioutils/buffer_test.go | 153 + engine/pkg/ioutils/bytespipe.go | 186 + engine/pkg/ioutils/bytespipe_test.go | 159 + engine/pkg/ioutils/fswriters.go | 162 + engine/pkg/ioutils/fswriters_test.go | 132 + engine/pkg/ioutils/readers.go | 157 + engine/pkg/ioutils/readers_test.go | 95 + engine/pkg/ioutils/temp_unix.go | 10 + engine/pkg/ioutils/temp_windows.go | 16 + engine/pkg/ioutils/writeflusher.go | 92 + engine/pkg/ioutils/writers.go | 66 + engine/pkg/ioutils/writers_test.go | 65 + engine/pkg/jsonmessage/jsonmessage.go | 335 + engine/pkg/jsonmessage/jsonmessage_test.go | 298 + engine/pkg/locker/README.md | 65 + engine/pkg/locker/locker.go | 112 + engine/pkg/locker/locker_test.go | 161 + engine/pkg/longpath/longpath.go | 26 + engine/pkg/longpath/longpath_test.go | 22 + engine/pkg/loopback/attach_loopback.go | 137 + engine/pkg/loopback/ioctl.go | 48 + engine/pkg/loopback/loop_wrapper.go | 52 + engine/pkg/loopback/loopback.go | 64 + engine/pkg/mount/flags.go | 149 + engine/pkg/mount/flags_freebsd.go | 49 + engine/pkg/mount/flags_linux.go | 87 + engine/pkg/mount/flags_unsupported.go | 31 + engine/pkg/mount/mount.go | 141 + engine/pkg/mount/mount_unix_test.go | 170 + engine/pkg/mount/mounter_freebsd.go | 60 + engine/pkg/mount/mounter_linux.go | 57 + engine/pkg/mount/mounter_linux_test.go | 228 + engine/pkg/mount/mounter_unsupported.go | 11 + engine/pkg/mount/mountinfo.go | 40 + engine/pkg/mount/mountinfo_freebsd.go | 55 + engine/pkg/mount/mountinfo_linux.go | 132 + engine/pkg/mount/mountinfo_linux_test.go | 508 + engine/pkg/mount/mountinfo_unsupported.go | 12 + engine/pkg/mount/mountinfo_windows.go | 6 + engine/pkg/mount/sharedsubtree_linux.go | 67 + engine/pkg/mount/sharedsubtree_linux_test.go | 348 + .../cmd/names-generator/main.go | 14 + engine/pkg/namesgenerator/names-generator.go | 645 + .../namesgenerator/names-generator_test.go | 27 + engine/pkg/parsers/kernel/kernel.go | 74 + engine/pkg/parsers/kernel/kernel_darwin.go | 56 + engine/pkg/parsers/kernel/kernel_unix.go | 35 + engine/pkg/parsers/kernel/kernel_unix_test.go | 96 + engine/pkg/parsers/kernel/kernel_windows.go | 51 + engine/pkg/parsers/kernel/uname_linux.go | 17 + engine/pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem_unix_test.go | 247 + .../operatingsystem_windows.go | 51 + engine/pkg/parsers/parsers.go | 69 + engine/pkg/parsers/parsers_test.go | 70 + engine/pkg/pidfile/pidfile.go | 53 + engine/pkg/pidfile/pidfile_darwin.go | 14 + engine/pkg/pidfile/pidfile_test.go | 38 + engine/pkg/pidfile/pidfile_unix.go | 16 + engine/pkg/pidfile/pidfile_windows.go | 25 + engine/pkg/platform/architecture_linux.go | 18 + engine/pkg/platform/architecture_unix.go | 20 + engine/pkg/platform/architecture_windows.go | 60 + engine/pkg/platform/platform.go | 23 + engine/pkg/plugingetter/getter.go | 52 + engine/pkg/plugins/client.go | 242 + engine/pkg/plugins/client_test.go | 277 + engine/pkg/plugins/discovery.go | 154 + engine/pkg/plugins/discovery_test.go | 152 + engine/pkg/plugins/discovery_unix.go | 5 + engine/pkg/plugins/discovery_unix_test.go | 159 + engine/pkg/plugins/discovery_windows.go | 8 + engine/pkg/plugins/errors.go | 33 + engine/pkg/plugins/plugin_test.go | 154 + engine/pkg/plugins/pluginrpc-gen/README.md | 58 + .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 83 + .../fixtures/otherfixture/spaceship.go | 4 + engine/pkg/plugins/pluginrpc-gen/main.go | 91 + engine/pkg/plugins/pluginrpc-gen/parser.go | 263 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 222 + engine/pkg/plugins/pluginrpc-gen/template.go | 118 + engine/pkg/plugins/plugins.go | 337 + engine/pkg/plugins/plugins_unix.go | 9 + engine/pkg/plugins/plugins_windows.go | 7 + engine/pkg/plugins/transport/http.go | 36 + engine/pkg/plugins/transport/http_test.go | 21 + engine/pkg/plugins/transport/transport.go | 36 + engine/pkg/pools/pools.go | 137 + engine/pkg/pools/pools_test.go | 163 + engine/pkg/progress/progress.go | 89 + engine/pkg/progress/progressreader.go | 66 + engine/pkg/progress/progressreader_test.go | 75 + engine/pkg/pubsub/publisher.go | 121 + engine/pkg/pubsub/publisher_test.go | 142 + engine/pkg/reexec/README.md | 5 + engine/pkg/reexec/command_linux.go | 28 + engine/pkg/reexec/command_unix.go | 23 + engine/pkg/reexec/command_unsupported.go | 16 + engine/pkg/reexec/command_windows.go | 21 + engine/pkg/reexec/reexec.go | 47 + engine/pkg/reexec/reexec_test.go | 52 + engine/pkg/signal/README.md | 1 + engine/pkg/signal/signal.go | 54 + engine/pkg/signal/signal_darwin.go | 41 + engine/pkg/signal/signal_freebsd.go | 43 + engine/pkg/signal/signal_linux.go | 81 + engine/pkg/signal/signal_linux_test.go | 59 + engine/pkg/signal/signal_test.go | 34 + engine/pkg/signal/signal_unix.go | 21 + engine/pkg/signal/signal_unsupported.go | 10 + engine/pkg/signal/signal_windows.go | 26 + engine/pkg/signal/testfiles/main.go | 43 + engine/pkg/signal/trap.go | 104 + engine/pkg/signal/trap_linux_test.go | 82 + engine/pkg/stdcopy/stdcopy.go | 190 + engine/pkg/stdcopy/stdcopy_test.go | 289 + engine/pkg/streamformatter/streamformatter.go | 159 + .../streamformatter/streamformatter_test.go | 112 + engine/pkg/streamformatter/streamwriter.go | 47 + .../pkg/streamformatter/streamwriter_test.go | 35 + engine/pkg/stringid/README.md | 1 + engine/pkg/stringid/stringid.go | 99 + engine/pkg/stringid/stringid_test.go | 72 + engine/pkg/symlink/LICENSE.APACHE | 191 + engine/pkg/symlink/LICENSE.BSD | 27 + engine/pkg/symlink/README.md | 6 + engine/pkg/symlink/fs.go | 144 + engine/pkg/symlink/fs_unix.go | 15 + engine/pkg/symlink/fs_unix_test.go | 407 + engine/pkg/symlink/fs_windows.go | 169 + engine/pkg/sysinfo/README.md | 1 + engine/pkg/sysinfo/numcpu.go | 12 + engine/pkg/sysinfo/numcpu_linux.go | 42 + engine/pkg/sysinfo/numcpu_windows.go | 35 + engine/pkg/sysinfo/sysinfo.go | 144 + engine/pkg/sysinfo/sysinfo_linux.go | 254 + engine/pkg/sysinfo/sysinfo_linux_test.go | 104 + engine/pkg/sysinfo/sysinfo_test.go | 26 + engine/pkg/sysinfo/sysinfo_unix.go | 9 + engine/pkg/sysinfo/sysinfo_windows.go | 7 + engine/pkg/system/chtimes.go | 31 + engine/pkg/system/chtimes_test.go | 94 + engine/pkg/system/chtimes_unix.go | 14 + engine/pkg/system/chtimes_unix_test.go | 91 + engine/pkg/system/chtimes_windows.go | 26 + engine/pkg/system/chtimes_windows_test.go | 86 + engine/pkg/system/errors.go | 13 + engine/pkg/system/exitcode.go | 19 + engine/pkg/system/filesys.go | 67 + engine/pkg/system/filesys_windows.go | 296 + engine/pkg/system/init.go | 22 + engine/pkg/system/init_unix.go | 7 + engine/pkg/system/init_windows.go | 12 + engine/pkg/system/lcow.go | 32 + engine/pkg/system/lcow_unix.go | 8 + engine/pkg/system/lcow_windows.go | 6 + engine/pkg/system/lstat_unix.go | 19 + engine/pkg/system/lstat_unix_test.go | 30 + engine/pkg/system/lstat_windows.go | 14 + engine/pkg/system/meminfo.go | 17 + engine/pkg/system/meminfo_linux.go | 65 + engine/pkg/system/meminfo_unix_test.go | 40 + engine/pkg/system/meminfo_unsupported.go | 8 + engine/pkg/system/meminfo_windows.go | 45 + engine/pkg/system/mknod.go | 22 + engine/pkg/system/mknod_windows.go | 11 + engine/pkg/system/path.go | 60 + engine/pkg/system/path_windows_test.go | 83 + engine/pkg/system/process_unix.go | 24 + engine/pkg/system/process_windows.go | 18 + engine/pkg/system/rm.go | 80 + engine/pkg/system/rm_test.go | 84 + engine/pkg/system/stat_darwin.go | 13 + engine/pkg/system/stat_freebsd.go | 13 + engine/pkg/system/stat_linux.go | 19 + engine/pkg/system/stat_openbsd.go | 13 + engine/pkg/system/stat_solaris.go | 13 + engine/pkg/system/stat_unix.go | 65 + engine/pkg/system/stat_unix_test.go | 40 + engine/pkg/system/stat_windows.go | 49 + engine/pkg/system/syscall_unix.go | 17 + engine/pkg/system/syscall_windows.go | 127 + engine/pkg/system/syscall_windows_test.go | 9 + engine/pkg/system/umask.go | 13 + engine/pkg/system/umask_windows.go | 7 + engine/pkg/system/utimes_freebsd.go | 24 + engine/pkg/system/utimes_linux.go | 25 + engine/pkg/system/utimes_unix_test.go | 68 + engine/pkg/system/utimes_unsupported.go | 10 + engine/pkg/system/xattrs_linux.go | 29 + engine/pkg/system/xattrs_unsupported.go | 13 + engine/pkg/tailfile/tailfile.go | 66 + engine/pkg/tailfile/tailfile_test.go | 148 + engine/pkg/tarsum/builder_context.go | 21 + engine/pkg/tarsum/builder_context_test.go | 67 + engine/pkg/tarsum/fileinfosums.go | 133 + engine/pkg/tarsum/fileinfosums_test.go | 62 + engine/pkg/tarsum/tarsum.go | 301 + engine/pkg/tarsum/tarsum_spec.md | 230 + engine/pkg/tarsum/tarsum_test.go | 657 + .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes engine/pkg/tarsum/testdata/xattr/json | 1 + engine/pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes engine/pkg/tarsum/versioning.go | 158 + engine/pkg/tarsum/versioning_test.go | 98 + engine/pkg/tarsum/writercloser.go | 22 + engine/pkg/term/ascii.go | 66 + engine/pkg/term/ascii_test.go | 25 + engine/pkg/term/proxy.go | 78 + engine/pkg/term/proxy_test.go | 115 + engine/pkg/term/tc.go | 20 + engine/pkg/term/term.go | 124 + engine/pkg/term/term_linux_test.go | 117 + engine/pkg/term/term_windows.go | 228 + engine/pkg/term/termios_bsd.go | 42 + engine/pkg/term/termios_linux.go | 39 + engine/pkg/term/windows/ansi_reader.go | 263 + engine/pkg/term/windows/ansi_writer.go | 64 + engine/pkg/term/windows/console.go | 35 + engine/pkg/term/windows/windows.go | 33 + engine/pkg/term/windows/windows_test.go | 3 + engine/pkg/term/winsize.go | 20 + engine/pkg/truncindex/truncindex.go | 139 + engine/pkg/truncindex/truncindex_test.go | 453 + engine/pkg/urlutil/urlutil.go | 52 + engine/pkg/urlutil/urlutil_test.go | 56 + engine/pkg/useragent/README.md | 1 + engine/pkg/useragent/useragent.go | 55 + engine/pkg/useragent/useragent_test.go | 31 + engine/plugin/backend_linux.go | 876 + engine/plugin/backend_linux_test.go | 81 + engine/plugin/backend_unsupported.go | 72 + engine/plugin/blobstore.go | 190 + engine/plugin/defs.go | 50 + engine/plugin/errors.go | 66 + engine/plugin/events.go | 111 + .../plugin/executor/containerd/containerd.go | 175 + .../executor/containerd/containerd_test.go | 148 + engine/plugin/manager.go | 384 + engine/plugin/manager_linux.go | 335 + engine/plugin/manager_linux_test.go | 279 + engine/plugin/manager_test.go | 55 + engine/plugin/manager_windows.go | 28 + engine/plugin/store.go | 291 + engine/plugin/store_test.go | 64 + engine/plugin/v2/plugin.go | 311 + engine/plugin/v2/plugin_linux.go | 141 + engine/plugin/v2/plugin_unsupported.go | 14 + engine/plugin/v2/settable.go | 102 + engine/plugin/v2/settable_test.go | 91 + engine/poule.yml | 129 + engine/profiles/apparmor/apparmor.go | 114 + engine/profiles/apparmor/template.go | 44 + engine/profiles/seccomp/default.json | 751 + engine/profiles/seccomp/fixtures/example.json | 27 + engine/profiles/seccomp/generate.go | 32 + engine/profiles/seccomp/seccomp.go | 160 + engine/profiles/seccomp/seccomp_default.go | 640 + engine/profiles/seccomp/seccomp_test.go | 32 + .../profiles/seccomp/seccomp_unsupported.go | 12 + engine/project/ARM.md | 45 + engine/project/BRANCHES-AND-TAGS.md | 35 + engine/project/CONTRIBUTING.md | 1 + engine/project/GOVERNANCE.md | 120 + engine/project/IRC-ADMINISTRATION.md | 37 + engine/project/ISSUE-TRIAGE.md | 132 + engine/project/PACKAGE-REPO-MAINTENANCE.md | 74 + engine/project/PACKAGERS.md | 307 + engine/project/PATCH-RELEASES.md | 68 + engine/project/PRINCIPLES.md | 19 + engine/project/README.md | 24 + engine/project/RELEASE-PROCESS.md | 78 + engine/project/REVIEWING.md | 246 + engine/project/TOOLS.md | 63 + engine/reference/errors.go | 25 + engine/reference/store.go | 343 + engine/reference/store_test.go | 350 + engine/registry/auth.go | 296 + engine/registry/auth_test.go | 120 + engine/registry/config.go | 442 + engine/registry/config_test.go | 381 + engine/registry/config_unix.go | 16 + engine/registry/config_windows.go | 18 + engine/registry/endpoint_test.go | 78 + engine/registry/endpoint_v1.go | 198 + engine/registry/errors.go | 31 + engine/registry/registry.go | 191 + engine/registry/registry_mock_test.go | 476 + engine/registry/registry_test.go | 934 + .../resumable/resumablerequestreader.go | 96 + .../resumable/resumablerequestreader_test.go | 257 + engine/registry/service.go | 328 + engine/registry/service_v1.go | 40 + engine/registry/service_v1_test.go | 32 + engine/registry/service_v2.go | 82 + engine/registry/session.go | 779 + engine/registry/types.go | 70 + engine/reports/2017-05-01.md | 35 + engine/reports/2017-05-08.md | 34 + engine/reports/2017-05-15.md | 52 + engine/reports/2017-06-05.md | 36 + engine/reports/2017-06-12.md | 78 + engine/reports/2017-06-26.md | 120 + engine/reports/builder/2017-05-01.md | 47 + engine/reports/builder/2017-05-08.md | 57 + engine/reports/builder/2017-05-15.md | 64 + engine/reports/builder/2017-05-22.md | 47 + engine/reports/builder/2017-05-29.md | 52 + engine/reports/builder/2017-06-05.md | 58 + engine/reports/builder/2017-06-12.md | 58 + engine/reports/builder/2017-06-26.md | 78 + engine/reports/builder/2017-07-10.md | 65 + engine/reports/builder/2017-07-17.md | 79 + engine/restartmanager/restartmanager.go | 133 + engine/restartmanager/restartmanager_test.go | 36 + engine/runconfig/config.go | 81 + engine/runconfig/config_test.go | 190 + engine/runconfig/config_unix.go | 59 + engine/runconfig/config_windows.go | 19 + engine/runconfig/errors.go | 42 + .../fixtures/unix/container_config_1_14.json | 30 + .../fixtures/unix/container_config_1_17.json | 50 + .../fixtures/unix/container_config_1_19.json | 58 + .../unix/container_hostconfig_1_14.json | 18 + .../unix/container_hostconfig_1_19.json | 30 + .../windows/container_config_1_19.json | 58 + engine/runconfig/hostconfig.go | 79 + engine/runconfig/hostconfig_test.go | 273 + engine/runconfig/hostconfig_unix.go | 110 + engine/runconfig/hostconfig_windows.go | 96 + engine/runconfig/hostconfig_windows_test.go | 17 + engine/runconfig/opts/parse.go | 20 + engine/vendor.conf | 164 + .../github.com/Graylog2/go-gelf/LICENSE | 21 + .../github.com/Graylog2/go-gelf/README.md | 117 + .../Graylog2/go-gelf/gelf/message.go | 147 + .../Graylog2/go-gelf/gelf/reader.go | 140 + .../Graylog2/go-gelf/gelf/tcpreader.go | 156 + .../Graylog2/go-gelf/gelf/tcpwriter.go | 105 + .../Graylog2/go-gelf/gelf/udpwriter.go | 231 + .../github.com/Graylog2/go-gelf/gelf/utils.go | 41 + .../Graylog2/go-gelf/gelf/writer.go | 34 + engine/vendor/github.com/Nvveen/Gotty/LICENSE | 26 + engine/vendor/github.com/Nvveen/Gotty/README | 5 + .../github.com/Nvveen/Gotty/attributes.go | 514 + .../vendor/github.com/Nvveen/Gotty/gotty.go | 244 + .../vendor/github.com/Nvveen/Gotty/parser.go | 362 + .../vendor/github.com/Nvveen/Gotty/types.go | 23 + .../github.com/containerd/continuity/LICENSE | 202 + .../containerd/continuity/README.md | 74 + .../containerd/continuity/devices/devices.go | 5 + .../continuity/devices/devices_unix.go | 58 + .../continuity/devices/devices_windows.go | 11 + .../containerd/continuity/driver/driver.go | 158 + .../continuity/driver/driver_unix.go | 127 + .../continuity/driver/driver_windows.go | 28 + .../containerd/continuity/driver/utils.go | 74 + .../containerd/continuity/fs/copy.go | 121 + .../containerd/continuity/fs/copy_linux.go | 101 + .../containerd/continuity/fs/copy_unix.go | 80 + .../containerd/continuity/fs/copy_windows.go | 33 + .../containerd/continuity/fs/diff.go | 310 + .../containerd/continuity/fs/diff_unix.go | 58 + .../containerd/continuity/fs/diff_windows.go | 32 + .../containerd/continuity/fs/dtype_linux.go | 87 + .../github.com/containerd/continuity/fs/du.go | 22 + .../containerd/continuity/fs/du_unix.go | 88 + .../containerd/continuity/fs/du_windows.go | 60 + .../containerd/continuity/fs/hardlink.go | 27 + .../containerd/continuity/fs/hardlink_unix.go | 18 + .../continuity/fs/hardlink_windows.go | 7 + .../containerd/continuity/fs/path.go | 276 + .../containerd/continuity/fs/stat_bsd.go | 28 + .../containerd/continuity/fs/stat_linux.go | 27 + .../containerd/continuity/fs/time.go | 13 + .../continuity/pathdriver/path_driver.go | 85 + .../continuity/syscallx/syscall_unix.go | 10 + .../continuity/syscallx/syscall_windows.go | 96 + .../containerd/continuity/sysx/asm.s | 10 + .../continuity/sysx/chmod_darwin.go | 18 + .../continuity/sysx/chmod_darwin_386.go | 25 + .../continuity/sysx/chmod_darwin_amd64.go | 25 + .../continuity/sysx/chmod_freebsd.go | 17 + .../continuity/sysx/chmod_freebsd_amd64.go | 25 + .../containerd/continuity/sysx/chmod_linux.go | 12 + .../continuity/sysx/chmod_solaris.go | 11 + .../containerd/continuity/sysx/file_posix.go | 112 + .../continuity/sysx/nodata_linux.go | 7 + .../continuity/sysx/nodata_solaris.go | 8 + .../containerd/continuity/sysx/nodata_unix.go | 9 + .../containerd/continuity/sysx/sys.go | 37 + .../containerd/continuity/sysx/xattr.go | 67 + .../continuity/sysx/xattr_darwin.go | 71 + .../continuity/sysx/xattr_darwin_386.go | 111 + .../continuity/sysx/xattr_darwin_amd64.go | 111 + .../continuity/sysx/xattr_freebsd.go | 12 + .../containerd/continuity/sysx/xattr_linux.go | 44 + .../continuity/sysx/xattr_openbsd.go | 7 + .../continuity/sysx/xattr_solaris.go | 12 + .../continuity/sysx/xattr_unsupported.go | 44 + .../containerd/continuity/vendor.conf | 13 + .../github.com/containerd/go-runc/LICENSE | 201 + .../github.com/containerd/go-runc/README.md | 14 + .../containerd/go-runc/command_linux.go | 39 + .../containerd/go-runc/command_other.go | 32 + .../github.com/containerd/go-runc/console.go | 157 + .../containerd/go-runc/container.go | 30 + .../github.com/containerd/go-runc/events.go | 100 + .../github.com/containerd/go-runc/io.go | 229 + .../github.com/containerd/go-runc/monitor.go | 76 + .../github.com/containerd/go-runc/runc.go | 703 + .../github.com/containerd/go-runc/utils.go | 107 + .../github.com/containerd/ttrpc/LICENSE | 201 + .../github.com/containerd/ttrpc/README.md | 52 + .../github.com/containerd/ttrpc/channel.go | 154 + .../github.com/containerd/ttrpc/client.go | 280 + .../github.com/containerd/ttrpc/codec.go | 42 + .../github.com/containerd/ttrpc/config.go | 39 + .../github.com/containerd/ttrpc/handshake.go | 50 + .../github.com/containerd/ttrpc/server.go | 456 + .../github.com/containerd/ttrpc/services.go | 150 + .../github.com/containerd/ttrpc/types.go | 42 + .../containerd/ttrpc/unixcreds_linux.go | 108 + .../github.com/fernet/fernet-go/License | 20 + .../vendor/github.com/fernet/fernet-go/Readme | 22 + .../github.com/fernet/fernet-go/fernet.go | 168 + .../vendor/github.com/fernet/fernet-go/key.go | 91 + engine/vendor/github.com/golang/gddo/LICENSE | 27 + .../github.com/golang/gddo/README.markdown | 44 + .../github.com/golang/gddo/httputil/buster.go | 95 + .../golang/gddo/httputil/header/header.go | 298 + .../golang/gddo/httputil/httputil.go | 25 + .../golang/gddo/httputil/negotiate.go | 79 + .../golang/gddo/httputil/respbuf.go | 58 + .../github.com/golang/gddo/httputil/static.go | 265 + .../golang/gddo/httputil/transport.go | 87 + .../grpc-ecosystem/grpc-opentracing/LICENSE | 27 + .../grpc-ecosystem/grpc-opentracing/PATENTS | 23 + .../grpc-opentracing/README.rst | 25 + .../grpc-opentracing/go/otgrpc/README.md | 57 + .../grpc-opentracing/go/otgrpc/client.go | 239 + .../grpc-opentracing/go/otgrpc/errors.go | 69 + .../grpc-opentracing/go/otgrpc/options.go | 76 + .../grpc-opentracing/go/otgrpc/package.go | 5 + .../grpc-opentracing/go/otgrpc/server.go | 141 + .../grpc-opentracing/go/otgrpc/shared.go | 42 + .../grpc-opentracing/python/README.md | 4 + .../python/examples/protos/command_line.proto | 15 + .../python/examples/protos/store.proto | 37 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 41 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 657 + .../hashicorp/go-immutable-radix/iter.go | 91 + .../hashicorp/go-immutable-radix/node.go | 352 + .../hashicorp/go-immutable-radix/raw_iter.go | 78 + .../github.com/ishidawataru/sctp/LICENSE | 201 + .../github.com/ishidawataru/sctp/README.md | 18 + .../github.com/ishidawataru/sctp/sctp.go | 656 + .../ishidawataru/sctp/sctp_linux.go | 227 + .../ishidawataru/sctp/sctp_unsupported.go | 47 + .../vendor/github.com/moby/buildkit/LICENSE | 201 + .../vendor/github.com/moby/buildkit/README.md | 274 + .../api/services/control/control.pb.go | 4589 ++ .../api/services/control/control.proto | 121 + .../buildkit/api/services/control/generate.go | 3 + .../moby/buildkit/api/types/generate.go | 3 + .../moby/buildkit/api/types/worker.pb.go | 523 + .../moby/buildkit/api/types/worker.proto | 16 + .../buildkit/cache/contenthash/checksum.go | 634 + .../buildkit/cache/contenthash/checksum.pb.go | 755 + .../buildkit/cache/contenthash/checksum.proto | 30 + .../buildkit/cache/contenthash/filehash.go | 98 + .../cache/contenthash/filehash_unix.go | 47 + .../cache/contenthash/filehash_windows.go | 23 + .../buildkit/cache/contenthash/generate.go | 3 + .../moby/buildkit/cache/contenthash/tarsum.go | 60 + .../github.com/moby/buildkit/cache/fsutil.go | 71 + .../github.com/moby/buildkit/cache/gc.go | 27 + .../github.com/moby/buildkit/cache/manager.go | 573 + .../moby/buildkit/cache/metadata.go | 206 + .../moby/buildkit/cache/metadata/metadata.go | 382 + .../github.com/moby/buildkit/cache/refs.go | 387 + .../moby/buildkit/cache/remotecache/export.go | 128 + .../moby/buildkit/cache/remotecache/import.go | 98 + .../cache/remotecache/registry/registry.go | 73 + .../cache/remotecache/v1/cachestorage.go | 247 + .../buildkit/cache/remotecache/v1/chains.go | 127 + .../moby/buildkit/cache/remotecache/v1/doc.go | 50 + .../buildkit/cache/remotecache/v1/parse.go | 102 + .../buildkit/cache/remotecache/v1/spec.go | 35 + .../buildkit/cache/remotecache/v1/utils.go | 306 + .../github.com/moby/buildkit/client/client.go | 132 + .../moby/buildkit/client/client_unix.go | 19 + .../moby/buildkit/client/client_windows.go | 24 + .../moby/buildkit/client/diskusage.go | 73 + .../moby/buildkit/client/exporters.go | 8 + .../github.com/moby/buildkit/client/graph.go | 45 + .../moby/buildkit/client/llb/exec.go | 409 + .../client/llb/imagemetaresolver/resolver.go | 92 + .../moby/buildkit/client/llb/marshal.go | 112 + .../moby/buildkit/client/llb/meta.go | 170 + .../moby/buildkit/client/llb/resolver.go | 18 + .../moby/buildkit/client/llb/source.go | 363 + .../moby/buildkit/client/llb/state.go | 396 + .../github.com/moby/buildkit/client/prune.go | 50 + .../github.com/moby/buildkit/client/solve.go | 251 + .../moby/buildkit/client/workers.go | 53 + .../moby/buildkit/control/control.go | 304 + .../moby/buildkit/executor/executor.go | 30 + .../moby/buildkit/executor/oci/hosts.go | 38 + .../moby/buildkit/executor/oci/mounts.go | 68 + .../moby/buildkit/executor/oci/resolvconf.go | 81 + .../moby/buildkit/executor/oci/spec_unix.go | 163 + .../moby/buildkit/executor/oci/user.go | 107 + .../executor/runcexecutor/executor.go | 247 + .../moby/buildkit/exporter/exporter.go | 16 + .../frontend/dockerfile/builder/build.go | 310 + .../frontend/dockerfile/command/command.go | 46 + .../dockerfile/dockerfile2llb/convert.go | 1030 + .../dockerfile2llb/convert_norunmount.go | 16 + .../dockerfile2llb/convert_runmount.go | 85 + .../dockerfile2llb/defaultshell_unix.go | 7 + .../dockerfile2llb/defaultshell_windows.go | 7 + .../dockerfile/dockerfile2llb/directives.go | 38 + .../dockerfile/dockerfile2llb/image.go | 74 + .../frontend/dockerfile/instructions/bflag.go | 200 + .../dockerfile/instructions/commands.go | 446 + .../instructions/commands_runmount.go | 181 + .../dockerfile/instructions/errors_unix.go | 9 + .../dockerfile/instructions/errors_windows.go | 27 + .../frontend/dockerfile/instructions/parse.go | 649 + .../dockerfile/instructions/support.go | 19 + .../dockerfile/parser/line_parsers.go | 368 + .../frontend/dockerfile/parser/parser.go | 327 + .../dockerfile/parser/split_command.go | 118 + .../dockerfile/shell/equal_env_unix.go | 10 + .../dockerfile/shell/equal_env_windows.go | 10 + .../buildkit/frontend/dockerfile/shell/lex.go | 373 + .../moby/buildkit/frontend/frontend.go | 29 + .../frontend/gateway/client/client.go | 52 + .../frontend/gateway/client/result.go | 54 + .../frontend/gateway/forwarder/forward.go | 149 + .../frontend/gateway/forwarder/frontend.go | 38 + .../moby/buildkit/frontend/gateway/gateway.go | 513 + .../moby/buildkit/frontend/gateway/pb/caps.go | 64 + .../frontend/gateway/pb/gateway.pb.go | 3360 ++ .../frontend/gateway/pb/gateway.proto | 99 + .../buildkit/frontend/gateway/pb/generate.go | 3 + .../moby/buildkit/frontend/result.go | 23 + .../moby/buildkit/identity/randomid.go | 53 + .../moby/buildkit/session/auth/auth.go | 26 + .../moby/buildkit/session/auth/auth.pb.go | 673 + .../moby/buildkit/session/auth/auth.proto | 19 + .../moby/buildkit/session/auth/generate.go | 3 + .../moby/buildkit/session/context.go | 22 + .../buildkit/session/filesync/diffcopy.go | 114 + .../buildkit/session/filesync/filesync.go | 289 + .../buildkit/session/filesync/filesync.pb.go | 644 + .../buildkit/session/filesync/filesync.proto | 20 + .../buildkit/session/filesync/generate.go | 3 + .../github.com/moby/buildkit/session/grpc.go | 81 + .../moby/buildkit/session/grpchijack/dial.go | 156 + .../buildkit/session/grpchijack/hijack.go | 14 + .../moby/buildkit/session/manager.go | 218 + .../moby/buildkit/session/session.go | 143 + .../snapshot/blobmapping/snapshotter.go | 129 + .../moby/buildkit/snapshot/localmounter.go | 72 + .../buildkit/snapshot/localmounter_unix.go | 29 + .../buildkit/snapshot/localmounter_windows.go | 26 + .../moby/buildkit/snapshot/snapshotter.go | 137 + .../solver/boltdbcachestorage/storage.go | 449 + .../moby/buildkit/solver/cachekey.go | 66 + .../moby/buildkit/solver/cachemanager.go | 270 + .../moby/buildkit/solver/cachestorage.go | 51 + .../moby/buildkit/solver/combinedcache.go | 124 + .../github.com/moby/buildkit/solver/edge.go | 866 + .../moby/buildkit/solver/exporter.go | 208 + .../github.com/moby/buildkit/solver/index.go | 243 + .../buildkit/solver/internal/pipe/pipe.go | 197 + .../github.com/moby/buildkit/solver/jobs.go | 774 + .../moby/buildkit/solver/llbsolver/bridge.go | 191 + .../buildkit/solver/llbsolver/ops/build.go | 132 + .../buildkit/solver/llbsolver/ops/exec.go | 535 + .../buildkit/solver/llbsolver/ops/source.go | 78 + .../moby/buildkit/solver/llbsolver/result.go | 152 + .../moby/buildkit/solver/llbsolver/solver.go | 177 + .../moby/buildkit/solver/llbsolver/vertex.go | 185 + .../buildkit/solver/memorycachestorage.go | 307 + .../moby/buildkit/solver/pb/attr.go | 17 + .../moby/buildkit/solver/pb/const.go | 12 + .../moby/buildkit/solver/pb/generate.go | 3 + .../moby/buildkit/solver/pb/ops.pb.go | 4960 ++ .../moby/buildkit/solver/pb/ops.proto | 165 + .../moby/buildkit/solver/pb/platform.go | 41 + .../moby/buildkit/solver/progress.go | 109 + .../github.com/moby/buildkit/solver/result.go | 105 + .../moby/buildkit/solver/scheduler.go | 396 + .../github.com/moby/buildkit/solver/types.go | 168 + .../moby/buildkit/source/git/gitsource.go | 406 + .../buildkit/source/git/gitsource_unix.go | 27 + .../buildkit/source/git/gitsource_windows.go | 23 + .../moby/buildkit/source/gitidentifier.go | 70 + .../moby/buildkit/source/http/httpsource.go | 429 + .../moby/buildkit/source/identifier.go | 205 + .../moby/buildkit/source/local/local.go | 249 + .../moby/buildkit/source/manager.go | 48 + .../moby/buildkit/util/apicaps/caps.go | 161 + .../moby/buildkit/util/apicaps/pb/caps.pb.go | 535 + .../moby/buildkit/util/apicaps/pb/caps.proto | 19 + .../moby/buildkit/util/apicaps/pb/generate.go | 3 + .../util/appdefaults/appdefaults_unix.go | 55 + .../util/appdefaults/appdefaults_windows.go | 18 + .../moby/buildkit/util/cond/cond.go | 40 + .../moby/buildkit/util/contentutil/buffer.go | 156 + .../moby/buildkit/util/contentutil/copy.go | 43 + .../moby/buildkit/util/contentutil/fetcher.go | 70 + .../util/contentutil/multiprovider.go | 44 + .../moby/buildkit/util/contentutil/pusher.go | 58 + .../util/flightcontrol/flightcontrol.go | 324 + .../moby/buildkit/util/imageutil/config.go | 159 + .../moby/buildkit/util/progress/logs/logs.go | 53 + .../buildkit/util/progress/multireader.go | 77 + .../buildkit/util/progress/multiwriter.go | 105 + .../moby/buildkit/util/progress/progress.go | 252 + .../util/rootless/specconv/specconv_linux.go | 113 + .../moby/buildkit/util/system/path_unix.go | 14 + .../moby/buildkit/util/system/path_windows.go | 37 + .../buildkit/util/system/seccomp_linux.go | 29 + .../buildkit/util/system/seccomp_nolinux.go | 7 + .../buildkit/util/system/seccomp_noseccomp.go | 7 + .../moby/buildkit/util/tracing/tracing.go | 109 + .../github.com/moby/buildkit/vendor.conf | 69 + .../github.com/moby/buildkit/worker/filter.go | 33 + .../github.com/moby/buildkit/worker/result.go | 40 + .../github.com/moby/buildkit/worker/worker.go | 41 + .../moby/buildkit/worker/workercontroller.go | 77 + .../github.com/tonistiigi/fsutil/LICENSE | 22 + .../tonistiigi/fsutil/chtimes_linux.go | 20 + .../tonistiigi/fsutil/chtimes_nolinux.go | 20 + .../github.com/tonistiigi/fsutil/diff.go | 43 + .../tonistiigi/fsutil/diff_containerd.go | 199 + .../fsutil/diff_containerd_linux.go | 37 + .../tonistiigi/fsutil/diskwriter.go | 340 + .../tonistiigi/fsutil/diskwriter_unix.go | 51 + .../tonistiigi/fsutil/diskwriter_windows.go | 17 + .../tonistiigi/fsutil/followlinks.go | 150 + .../github.com/tonistiigi/fsutil/generate.go | 3 + .../github.com/tonistiigi/fsutil/hardlinks.go | 46 + .../github.com/tonistiigi/fsutil/readme.md | 45 + .../github.com/tonistiigi/fsutil/receive.go | 269 + .../github.com/tonistiigi/fsutil/send.go | 208 + .../github.com/tonistiigi/fsutil/stat.pb.go | 931 + .../github.com/tonistiigi/fsutil/stat.proto | 17 + .../github.com/tonistiigi/fsutil/validator.go | 92 + .../github.com/tonistiigi/fsutil/walker.go | 246 + .../tonistiigi/fsutil/walker_unix.go | 61 + .../tonistiigi/fsutil/walker_windows.go | 14 + .../github.com/tonistiigi/fsutil/wire.pb.go | 567 + .../github.com/tonistiigi/fsutil/wire.proto | 19 + engine/volume/drivers/adapter.go | 176 + engine/volume/drivers/extpoint.go | 235 + engine/volume/drivers/extpoint_test.go | 24 + engine/volume/drivers/proxy.go | 255 + engine/volume/drivers/proxy_test.go | 132 + engine/volume/local/local.go | 378 + engine/volume/local/local_test.go | 335 + engine/volume/local/local_unix.go | 99 + engine/volume/local/local_windows.go | 46 + engine/volume/mounts/lcow_parser.go | 34 + engine/volume/mounts/linux_parser.go | 417 + engine/volume/mounts/mounts.go | 181 + engine/volume/mounts/parser.go | 47 + engine/volume/mounts/parser_test.go | 480 + engine/volume/mounts/validate.go | 28 + engine/volume/mounts/validate_test.go | 73 + engine/volume/mounts/validate_unix_test.go | 8 + engine/volume/mounts/validate_windows_test.go | 6 + engine/volume/mounts/volume_copy.go | 23 + engine/volume/mounts/volume_unix.go | 18 + engine/volume/mounts/volume_windows.go | 8 + engine/volume/mounts/windows_parser.go | 456 + engine/volume/service/by.go | 89 + engine/volume/service/convert.go | 132 + engine/volume/service/db.go | 95 + engine/volume/service/db_test.go | 52 + engine/volume/service/default_driver.go | 21 + engine/volume/service/default_driver_stubs.go | 10 + engine/volume/service/errors.go | 111 + engine/volume/service/opts/opts.go | 89 + engine/volume/service/restore.go | 85 + engine/volume/service/restore_test.go | 58 + engine/volume/service/service.go | 243 + engine/volume/service/service_linux_test.go | 66 + engine/volume/service/service_test.go | 253 + engine/volume/service/store.go | 858 + engine/volume/service/store_test.go | 421 + engine/volume/service/store_unix.go | 9 + engine/volume/service/store_windows.go | 12 + engine/volume/testutils/testutils.go | 230 + engine/volume/volume.go | 69 + 4032 files changed, 731565 insertions(+) create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 Makefile create mode 100644 README.md create mode 100644 VERSION create mode 100644 cli/.dockerignore create mode 100644 cli/.mailmap create mode 100644 cli/AUTHORS create mode 100644 cli/CONTRIBUTING.md create mode 100644 cli/Jenkinsfile create mode 100644 cli/LICENSE create mode 100644 cli/MAINTAINERS create mode 100644 cli/Makefile create mode 100644 cli/NOTICE create mode 100644 cli/README.md create mode 100644 cli/TESTING.md create mode 100644 cli/VERSION create mode 100644 cli/appveyor.yml create mode 100644 cli/circle.yml create mode 100644 cli/cli/cobra.go create mode 100644 cli/cli/command/bundlefile/bundlefile.go create mode 100644 cli/cli/command/bundlefile/bundlefile_test.go create mode 100644 cli/cli/command/checkpoint/client_test.go create mode 100644 cli/cli/command/checkpoint/cmd.go create mode 100644 cli/cli/command/checkpoint/create.go create mode 100644 cli/cli/command/checkpoint/create_test.go create mode 100644 cli/cli/command/checkpoint/list.go create mode 100644 cli/cli/command/checkpoint/list_test.go create mode 100644 cli/cli/command/checkpoint/remove.go create mode 100644 cli/cli/command/checkpoint/remove_test.go create mode 100644 cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden create mode 100644 cli/cli/command/cli.go create mode 100644 cli/cli/command/cli_test.go create mode 100644 cli/cli/command/commands/commands.go create mode 100644 cli/cli/command/config/client_test.go create mode 100644 cli/cli/command/config/cmd.go create mode 100644 cli/cli/command/config/create.go create mode 100644 cli/cli/command/config/create_test.go create mode 100644 cli/cli/command/config/inspect.go create mode 100644 cli/cli/command/config/inspect_test.go create mode 100644 cli/cli/command/config/ls.go create mode 100644 cli/cli/command/config/ls_test.go create mode 100644 cli/cli/command/config/remove.go create mode 100644 cli/cli/command/config/remove_test.go create mode 100644 cli/cli/command/config/testdata/config-create-with-name.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-pretty.simple.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden create mode 100644 cli/cli/command/config/testdata/config-list-sort.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-config-format.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-filter.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-format.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-quiet-option.golden create mode 100644 cli/cli/command/container/attach.go create mode 100644 cli/cli/command/container/attach_test.go create mode 100644 cli/cli/command/container/client_test.go create mode 100644 cli/cli/command/container/cmd.go create mode 100644 cli/cli/command/container/commit.go create mode 100644 cli/cli/command/container/cp.go create mode 100644 cli/cli/command/container/cp_test.go create mode 100644 cli/cli/command/container/create.go create mode 100644 cli/cli/command/container/create_test.go create mode 100644 cli/cli/command/container/diff.go create mode 100644 cli/cli/command/container/exec.go create mode 100644 cli/cli/command/container/exec_test.go create mode 100644 cli/cli/command/container/export.go create mode 100644 cli/cli/command/container/hijack.go create mode 100644 cli/cli/command/container/inspect.go create mode 100644 cli/cli/command/container/kill.go create mode 100644 cli/cli/command/container/list.go create mode 100644 cli/cli/command/container/list_test.go create mode 100644 cli/cli/command/container/logs.go create mode 100644 cli/cli/command/container/logs_test.go create mode 100644 cli/cli/command/container/opts.go create mode 100644 cli/cli/command/container/opts_test.go create mode 100644 cli/cli/command/container/pause.go create mode 100644 cli/cli/command/container/port.go create mode 100644 cli/cli/command/container/prune.go create mode 100644 cli/cli/command/container/ps_test.go create mode 100644 cli/cli/command/container/rename.go create mode 100644 cli/cli/command/container/restart.go create mode 100644 cli/cli/command/container/rm.go create mode 100644 cli/cli/command/container/run.go create mode 100644 cli/cli/command/container/run_test.go create mode 100644 cli/cli/command/container/start.go create mode 100644 cli/cli/command/container/stats.go create mode 100644 cli/cli/command/container/stats_helpers.go create mode 100644 cli/cli/command/container/stats_helpers_test.go create mode 100644 cli/cli/command/container/stats_unit_test.go create mode 100644 cli/cli/command/container/stop.go create mode 100644 cli/cli/command/container/testdata/container-list-format-name-name.golden create mode 100644 cli/cli/command/container/testdata/container-list-format-with-arg.golden create mode 100644 cli/cli/command/container/testdata/container-list-with-config-format.golden create mode 100644 cli/cli/command/container/testdata/container-list-with-format.golden create mode 100644 cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden create mode 100644 cli/cli/command/container/testdata/container-list-without-format.golden create mode 100755 cli/cli/command/container/testdata/utf16.env create mode 100755 cli/cli/command/container/testdata/utf16be.env create mode 100755 cli/cli/command/container/testdata/utf8.env create mode 100644 cli/cli/command/container/testdata/valid.env create mode 100644 cli/cli/command/container/testdata/valid.label create mode 100644 cli/cli/command/container/top.go create mode 100644 cli/cli/command/container/tty.go create mode 100644 cli/cli/command/container/unpause.go create mode 100644 cli/cli/command/container/update.go create mode 100644 cli/cli/command/container/utils.go create mode 100644 cli/cli/command/container/utils_test.go create mode 100644 cli/cli/command/container/wait.go create mode 100644 cli/cli/command/events_utils.go create mode 100644 cli/cli/command/formatter/checkpoint.go create mode 100644 cli/cli/command/formatter/checkpoint_test.go create mode 100644 cli/cli/command/formatter/config.go create mode 100644 cli/cli/command/formatter/config_test.go create mode 100644 cli/cli/command/formatter/container.go create mode 100644 cli/cli/command/formatter/container_test.go create mode 100644 cli/cli/command/formatter/custom.go create mode 100644 cli/cli/command/formatter/custom_test.go create mode 100644 cli/cli/command/formatter/diff.go create mode 100644 cli/cli/command/formatter/diff_test.go create mode 100644 cli/cli/command/formatter/disk_usage.go create mode 100644 cli/cli/command/formatter/disk_usage_test.go create mode 100644 cli/cli/command/formatter/displayutils.go create mode 100644 cli/cli/command/formatter/displayutils_test.go create mode 100644 cli/cli/command/formatter/formatter.go create mode 100644 cli/cli/command/formatter/history.go create mode 100644 cli/cli/command/formatter/history_test.go create mode 100644 cli/cli/command/formatter/image.go create mode 100644 cli/cli/command/formatter/image_test.go create mode 100644 cli/cli/command/formatter/network.go create mode 100644 cli/cli/command/formatter/network_test.go create mode 100644 cli/cli/command/formatter/node.go create mode 100644 cli/cli/command/formatter/node_test.go create mode 100644 cli/cli/command/formatter/plugin.go create mode 100644 cli/cli/command/formatter/plugin_test.go create mode 100644 cli/cli/command/formatter/reflect.go create mode 100644 cli/cli/command/formatter/reflect_test.go create mode 100644 cli/cli/command/formatter/search.go create mode 100644 cli/cli/command/formatter/search_test.go create mode 100644 cli/cli/command/formatter/secret.go create mode 100644 cli/cli/command/formatter/secret_test.go create mode 100644 cli/cli/command/formatter/service.go create mode 100644 cli/cli/command/formatter/service_test.go create mode 100644 cli/cli/command/formatter/stack.go create mode 100644 cli/cli/command/formatter/stack_test.go create mode 100644 cli/cli/command/formatter/stats.go create mode 100644 cli/cli/command/formatter/stats_test.go create mode 100644 cli/cli/command/formatter/task.go create mode 100644 cli/cli/command/formatter/task_test.go create mode 100644 cli/cli/command/formatter/testdata/container-context-write-special-headers.golden create mode 100644 cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden create mode 100644 cli/cli/command/formatter/testdata/disk-usage-raw-format.golden create mode 100644 cli/cli/command/formatter/testdata/search-context-write-stars-table.golden create mode 100644 cli/cli/command/formatter/testdata/search-context-write-table.golden create mode 100644 cli/cli/command/formatter/testdata/service-context-write-raw.golden create mode 100644 cli/cli/command/formatter/testdata/task-context-write-table-custom.golden create mode 100644 cli/cli/command/formatter/trust.go create mode 100644 cli/cli/command/formatter/trust_test.go create mode 100644 cli/cli/command/formatter/volume.go create mode 100644 cli/cli/command/formatter/volume_test.go create mode 100644 cli/cli/command/idresolver/client_test.go create mode 100644 cli/cli/command/idresolver/idresolver.go create mode 100644 cli/cli/command/idresolver/idresolver_test.go create mode 100644 cli/cli/command/image/build.go create mode 100644 cli/cli/command/image/build/context.go create mode 100644 cli/cli/command/image/build/context_test.go create mode 100644 cli/cli/command/image/build/context_unix.go create mode 100644 cli/cli/command/image/build/context_windows.go create mode 100644 cli/cli/command/image/build/dockerignore.go create mode 100644 cli/cli/command/image/build_buildkit.go create mode 100644 cli/cli/command/image/build_session.go create mode 100644 cli/cli/command/image/build_test.go create mode 100644 cli/cli/command/image/client_test.go create mode 100644 cli/cli/command/image/cmd.go create mode 100644 cli/cli/command/image/history.go create mode 100644 cli/cli/command/image/history_test.go create mode 100644 cli/cli/command/image/import.go create mode 100644 cli/cli/command/image/import_test.go create mode 100644 cli/cli/command/image/inspect.go create mode 100644 cli/cli/command/image/inspect_test.go create mode 100644 cli/cli/command/image/list.go create mode 100644 cli/cli/command/image/list_test.go create mode 100644 cli/cli/command/image/load.go create mode 100644 cli/cli/command/image/load_test.go create mode 100644 cli/cli/command/image/prune.go create mode 100644 cli/cli/command/image/prune_test.go create mode 100644 cli/cli/command/image/pull.go create mode 100644 cli/cli/command/image/pull_test.go create mode 100644 cli/cli/command/image/push.go create mode 100644 cli/cli/command/image/push_test.go create mode 100644 cli/cli/command/image/remove.go create mode 100644 cli/cli/command/image/remove_test.go create mode 100644 cli/cli/command/image/save.go create mode 100644 cli/cli/command/image/save_test.go create mode 100644 cli/cli/command/image/tag.go create mode 100644 cli/cli/command/image/tag_test.go create mode 100644 cli/cli/command/image/testdata/history-command-success.non-human.golden create mode 100644 cli/cli/command/image/testdata/history-command-success.quiet-no-trunc.golden create mode 100644 cli/cli/command/image/testdata/history-command-success.quiet.golden create mode 100644 cli/cli/command/image/testdata/history-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/import-command-success.input.txt create mode 100644 cli/cli/command/image/testdata/inspect-command-success.format.golden create mode 100644 cli/cli/command/image/testdata/inspect-command-success.simple-many.golden create mode 100644 cli/cli/command/image/testdata/inspect-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.filters.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.format.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.match-name.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.quiet-format.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/load-command-success.input-file.golden create mode 100644 cli/cli/command/image/testdata/load-command-success.input.txt create mode 100644 cli/cli/command/image/testdata/load-command-success.json.golden create mode 100644 cli/cli/command/image/testdata/load-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.all.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.force-deleted.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.force-untagged.golden create mode 100644 cli/cli/command/image/testdata/pull-command-success.simple-no-tag.golden create mode 100644 cli/cli/command/image/testdata/pull-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image Deleted and Untagged.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image Deleted.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image Untagged.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image not found with force option.golden create mode 100644 cli/cli/command/image/trust.go create mode 100644 cli/cli/command/image/trust_test.go create mode 100644 cli/cli/command/in.go create mode 100644 cli/cli/command/inspect/inspector.go create mode 100644 cli/cli/command/inspect/inspector_test.go create mode 100644 cli/cli/command/manifest/annotate.go create mode 100644 cli/cli/command/manifest/annotate_test.go create mode 100644 cli/cli/command/manifest/client_test.go create mode 100644 cli/cli/command/manifest/cmd.go create mode 100644 cli/cli/command/manifest/create_list.go create mode 100644 cli/cli/command/manifest/create_test.go create mode 100644 cli/cli/command/manifest/inspect.go create mode 100644 cli/cli/command/manifest/inspect_test.go create mode 100644 cli/cli/command/manifest/push.go create mode 100644 cli/cli/command/manifest/push_test.go create mode 100644 cli/cli/command/manifest/testdata/inspect-annotate.golden create mode 100644 cli/cli/command/manifest/testdata/inspect-manifest-list.golden create mode 100644 cli/cli/command/manifest/testdata/inspect-manifest.golden create mode 100644 cli/cli/command/manifest/util.go create mode 100644 cli/cli/command/network/client_test.go create mode 100644 cli/cli/command/network/cmd.go create mode 100644 cli/cli/command/network/connect.go create mode 100644 cli/cli/command/network/connect_test.go create mode 100644 cli/cli/command/network/create.go create mode 100644 cli/cli/command/network/create_test.go create mode 100644 cli/cli/command/network/disconnect.go create mode 100644 cli/cli/command/network/disconnect_test.go create mode 100644 cli/cli/command/network/inspect.go create mode 100644 cli/cli/command/network/list.go create mode 100644 cli/cli/command/network/list_test.go create mode 100644 cli/cli/command/network/prune.go create mode 100644 cli/cli/command/network/remove.go create mode 100644 cli/cli/command/network/testdata/network-list.golden create mode 100644 cli/cli/command/node/client_test.go create mode 100644 cli/cli/command/node/cmd.go create mode 100644 cli/cli/command/node/demote.go create mode 100644 cli/cli/command/node/demote_test.go create mode 100644 cli/cli/command/node/inspect.go create mode 100644 cli/cli/command/node/inspect_test.go create mode 100644 cli/cli/command/node/list.go create mode 100644 cli/cli/command/node/list_test.go create mode 100644 cli/cli/command/node/opts.go create mode 100644 cli/cli/command/node/promote.go create mode 100644 cli/cli/command/node/promote_test.go create mode 100644 cli/cli/command/node/ps.go create mode 100644 cli/cli/command/node/ps_test.go create mode 100644 cli/cli/command/node/remove.go create mode 100644 cli/cli/command/node/remove_test.go create mode 100644 cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden create mode 100644 cli/cli/command/node/testdata/node-inspect-pretty.manager.golden create mode 100644 cli/cli/command/node/testdata/node-inspect-pretty.simple.golden create mode 100644 cli/cli/command/node/testdata/node-list-format-flag.golden create mode 100644 cli/cli/command/node/testdata/node-list-format-from-config.golden create mode 100644 cli/cli/command/node/testdata/node-list-sort.golden create mode 100644 cli/cli/command/node/testdata/node-ps.simple.golden create mode 100644 cli/cli/command/node/testdata/node-ps.with-errors.golden create mode 100644 cli/cli/command/node/update.go create mode 100644 cli/cli/command/node/update_test.go create mode 100644 cli/cli/command/orchestrator.go create mode 100644 cli/cli/command/orchestrator_test.go create mode 100644 cli/cli/command/out.go create mode 100644 cli/cli/command/plugin/client_test.go create mode 100644 cli/cli/command/plugin/cmd.go create mode 100644 cli/cli/command/plugin/create.go create mode 100644 cli/cli/command/plugin/create_test.go create mode 100644 cli/cli/command/plugin/disable.go create mode 100644 cli/cli/command/plugin/disable_test.go create mode 100644 cli/cli/command/plugin/enable.go create mode 100644 cli/cli/command/plugin/enable_test.go create mode 100644 cli/cli/command/plugin/inspect.go create mode 100644 cli/cli/command/plugin/install.go create mode 100644 cli/cli/command/plugin/list.go create mode 100644 cli/cli/command/plugin/push.go create mode 100644 cli/cli/command/plugin/remove.go create mode 100644 cli/cli/command/plugin/remove_test.go create mode 100644 cli/cli/command/plugin/set.go create mode 100644 cli/cli/command/plugin/upgrade.go create mode 100644 cli/cli/command/registry.go create mode 100644 cli/cli/command/registry/login.go create mode 100644 cli/cli/command/registry/login_test.go create mode 100644 cli/cli/command/registry/logout.go create mode 100644 cli/cli/command/registry/search.go create mode 100644 cli/cli/command/registry_test.go create mode 100644 cli/cli/command/secret/client_test.go create mode 100644 cli/cli/command/secret/cmd.go create mode 100644 cli/cli/command/secret/create.go create mode 100644 cli/cli/command/secret/create_test.go create mode 100644 cli/cli/command/secret/inspect.go create mode 100644 cli/cli/command/secret/inspect_test.go create mode 100644 cli/cli/command/secret/ls.go create mode 100644 cli/cli/command/secret/ls_test.go create mode 100644 cli/cli/command/secret/remove.go create mode 100644 cli/cli/command/secret/remove_test.go create mode 100644 cli/cli/command/secret/testdata/secret-create-with-name.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-sort.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-config-format.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-filter.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-format.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden create mode 100644 cli/cli/command/service/client_test.go create mode 100644 cli/cli/command/service/cmd.go create mode 100644 cli/cli/command/service/create.go create mode 100644 cli/cli/command/service/generic_resource_opts.go create mode 100644 cli/cli/command/service/generic_resource_opts_test.go create mode 100644 cli/cli/command/service/helpers.go create mode 100644 cli/cli/command/service/inspect.go create mode 100644 cli/cli/command/service/inspect_test.go create mode 100644 cli/cli/command/service/list.go create mode 100644 cli/cli/command/service/list_test.go create mode 100644 cli/cli/command/service/logs.go create mode 100644 cli/cli/command/service/opts.go create mode 100644 cli/cli/command/service/opts_test.go create mode 100644 cli/cli/command/service/parse.go create mode 100644 cli/cli/command/service/progress/progress.go create mode 100644 cli/cli/command/service/progress/progress_test.go create mode 100644 cli/cli/command/service/ps.go create mode 100644 cli/cli/command/service/ps_test.go create mode 100644 cli/cli/command/service/remove.go create mode 100644 cli/cli/command/service/rollback.go create mode 100644 cli/cli/command/service/rollback_test.go create mode 100644 cli/cli/command/service/scale.go create mode 100644 cli/cli/command/service/testdata/service-list-sort.golden create mode 100644 cli/cli/command/service/trust.go create mode 100644 cli/cli/command/service/update.go create mode 100644 cli/cli/command/service/update_test.go create mode 100644 cli/cli/command/stack/client_test.go create mode 100644 cli/cli/command/stack/cmd.go create mode 100644 cli/cli/command/stack/common.go create mode 100644 cli/cli/command/stack/deploy.go create mode 100644 cli/cli/command/stack/deploy_test.go create mode 100644 cli/cli/command/stack/kubernetes/cli.go create mode 100644 cli/cli/command/stack/kubernetes/client.go create mode 100644 cli/cli/command/stack/kubernetes/conversion.go create mode 100644 cli/cli/command/stack/kubernetes/conversion_test.go create mode 100644 cli/cli/command/stack/kubernetes/convert.go create mode 100644 cli/cli/command/stack/kubernetes/deploy.go create mode 100644 cli/cli/command/stack/kubernetes/list.go create mode 100644 cli/cli/command/stack/kubernetes/ps.go create mode 100644 cli/cli/command/stack/kubernetes/remove.go create mode 100644 cli/cli/command/stack/kubernetes/services.go create mode 100644 cli/cli/command/stack/kubernetes/services_test.go create mode 100644 cli/cli/command/stack/kubernetes/stack.go create mode 100644 cli/cli/command/stack/kubernetes/stackclient.go create mode 100644 cli/cli/command/stack/kubernetes/stackclient_test.go create mode 100644 cli/cli/command/stack/kubernetes/testdata/warnings.golden create mode 100644 cli/cli/command/stack/kubernetes/warnings.go create mode 100644 cli/cli/command/stack/kubernetes/warnings_test.go create mode 100644 cli/cli/command/stack/kubernetes/watcher.go create mode 100644 cli/cli/command/stack/kubernetes/watcher_test.go create mode 100644 cli/cli/command/stack/list.go create mode 100644 cli/cli/command/stack/list_test.go create mode 100644 cli/cli/command/stack/loader/loader.go create mode 100644 cli/cli/command/stack/loader/loader_test.go create mode 100644 cli/cli/command/stack/options/opts.go create mode 100644 cli/cli/command/stack/ps.go create mode 100644 cli/cli/command/stack/ps_test.go create mode 100644 cli/cli/command/stack/remove.go create mode 100644 cli/cli/command/stack/remove_test.go create mode 100644 cli/cli/command/stack/services.go create mode 100644 cli/cli/command/stack/services_test.go create mode 100644 cli/cli/command/stack/swarm/client_test.go create mode 100644 cli/cli/command/stack/swarm/common.go create mode 100644 cli/cli/command/stack/swarm/deploy.go create mode 100644 cli/cli/command/stack/swarm/deploy_bundlefile.go create mode 100644 cli/cli/command/stack/swarm/deploy_bundlefile_test.go create mode 100644 cli/cli/command/stack/swarm/deploy_composefile.go create mode 100644 cli/cli/command/stack/swarm/deploy_composefile_test.go create mode 100644 cli/cli/command/stack/swarm/deploy_test.go create mode 100644 cli/cli/command/stack/swarm/list.go create mode 100644 cli/cli/command/stack/swarm/ps.go create mode 100644 cli/cli/command/stack/swarm/remove.go create mode 100644 cli/cli/command/stack/swarm/services.go create mode 100644 cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab create mode 100644 cli/cli/command/stack/testdata/stack-list-sort-natural.golden create mode 100644 cli/cli/command/stack/testdata/stack-list-sort.golden create mode 100644 cli/cli/command/stack/testdata/stack-list-with-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-list-without-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-config-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-without-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-with-config-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-with-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-without-format.golden create mode 100644 cli/cli/command/stream.go create mode 100644 cli/cli/command/swarm/ca.go create mode 100644 cli/cli/command/swarm/ca_test.go create mode 100644 cli/cli/command/swarm/client_test.go create mode 100644 cli/cli/command/swarm/cmd.go create mode 100644 cli/cli/command/swarm/init.go create mode 100644 cli/cli/command/swarm/init_test.go create mode 100644 cli/cli/command/swarm/join.go create mode 100644 cli/cli/command/swarm/join_test.go create mode 100644 cli/cli/command/swarm/join_token.go create mode 100644 cli/cli/command/swarm/join_token_test.go create mode 100644 cli/cli/command/swarm/leave.go create mode 100644 cli/cli/command/swarm/leave_test.go create mode 100644 cli/cli/command/swarm/opts.go create mode 100644 cli/cli/command/swarm/opts_test.go create mode 100644 cli/cli/command/swarm/progress/root_rotation.go create mode 100644 cli/cli/command/swarm/testdata/init-init-autolock.golden create mode 100644 cli/cli/command/swarm/testdata/init-init.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-manager.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-worker.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden create mode 100644 cli/cli/command/swarm/testdata/update-all-flags-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden create mode 100644 cli/cli/command/swarm/testdata/update-noargs.golden create mode 100644 cli/cli/command/swarm/unlock.go create mode 100644 cli/cli/command/swarm/unlock_key.go create mode 100644 cli/cli/command/swarm/unlock_key_test.go create mode 100644 cli/cli/command/swarm/unlock_test.go create mode 100644 cli/cli/command/swarm/update.go create mode 100644 cli/cli/command/swarm/update_test.go create mode 100644 cli/cli/command/system/client_test.go create mode 100644 cli/cli/command/system/cmd.go create mode 100644 cli/cli/command/system/df.go create mode 100644 cli/cli/command/system/events.go create mode 100644 cli/cli/command/system/info.go create mode 100644 cli/cli/command/system/info_test.go create mode 100644 cli/cli/command/system/inspect.go create mode 100644 cli/cli/command/system/prune.go create mode 100644 cli/cli/command/system/prune_test.go create mode 100644 cli/cli/command/system/testdata/docker-client-version.golden create mode 100644 cli/cli/command/system/testdata/docker-info-no-swarm.golden create mode 100644 cli/cli/command/system/testdata/docker-info-warnings.golden create mode 100644 cli/cli/command/system/testdata/docker-info-with-swarm.golden create mode 100644 cli/cli/command/system/version.go create mode 100644 cli/cli/command/system/version_test.go create mode 100644 cli/cli/command/task/client_test.go create mode 100644 cli/cli/command/task/print.go create mode 100644 cli/cli/command/task/print_test.go create mode 100644 cli/cli/command/task/testdata/task-print-with-global-service.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-indentation.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-quiet-option.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-replicated-service.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-resolution.golden create mode 100644 cli/cli/command/trust.go create mode 100644 cli/cli/command/trust/cmd.go create mode 100644 cli/cli/command/trust/common.go create mode 100644 cli/cli/command/trust/helpers.go create mode 100644 cli/cli/command/trust/helpers_test.go create mode 100644 cli/cli/command/trust/inspect.go create mode 100644 cli/cli/command/trust/inspect_pretty.go create mode 100644 cli/cli/command/trust/inspect_pretty_test.go create mode 100644 cli/cli/command/trust/inspect_test.go create mode 100644 cli/cli/command/trust/key.go create mode 100644 cli/cli/command/trust/key_generate.go create mode 100644 cli/cli/command/trust/key_generate_test.go create mode 100644 cli/cli/command/trust/key_load.go create mode 100644 cli/cli/command/trust/key_load_test.go create mode 100644 cli/cli/command/trust/revoke.go create mode 100644 cli/cli/command/trust/revoke_test.go create mode 100644 cli/cli/command/trust/sign.go create mode 100644 cli/cli/command/trust/sign_test.go create mode 100644 cli/cli/command/trust/signer.go create mode 100644 cli/cli/command/trust/signer_add.go create mode 100644 cli/cli/command/trust/signer_add_test.go create mode 100644 cli/cli/command/trust/signer_remove.go create mode 100644 cli/cli/command/trust/signer_remove_test.go create mode 100644 cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden create mode 100644 cli/cli/command/utils.go create mode 100644 cli/cli/command/volume/client_test.go create mode 100644 cli/cli/command/volume/cmd.go create mode 100644 cli/cli/command/volume/create.go create mode 100644 cli/cli/command/volume/create_test.go create mode 100644 cli/cli/command/volume/inspect.go create mode 100644 cli/cli/command/volume/inspect_test.go create mode 100644 cli/cli/command/volume/list.go create mode 100644 cli/cli/command/volume/list_test.go create mode 100644 cli/cli/command/volume/prune.go create mode 100644 cli/cli/command/volume/prune_test.go create mode 100644 cli/cli/command/volume/remove.go create mode 100644 cli/cli/command/volume/remove_test.go create mode 100644 cli/cli/command/volume/testdata/volume-inspect-with-format.json-template.golden create mode 100644 cli/cli/command/volume/testdata/volume-inspect-with-format.simple-template.golden create mode 100644 cli/cli/command/volume/testdata/volume-inspect-without-format.multiple-volume-with-labels.golden create mode 100644 cli/cli/command/volume/testdata/volume-inspect-without-format.single-volume.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-with-config-format.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-with-format.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-without-format.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune-no.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune-yes.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune.deletedVolumes.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune.empty.golden create mode 100644 cli/cli/compose/convert/compose.go create mode 100644 cli/cli/compose/convert/compose_test.go create mode 100644 cli/cli/compose/convert/service.go create mode 100644 cli/cli/compose/convert/service_test.go create mode 100644 cli/cli/compose/convert/volume.go create mode 100644 cli/cli/compose/convert/volume_test.go create mode 100644 cli/cli/compose/interpolation/interpolation.go create mode 100644 cli/cli/compose/interpolation/interpolation_test.go create mode 100644 cli/cli/compose/loader/example1.env create mode 100644 cli/cli/compose/loader/example2.env create mode 100644 cli/cli/compose/loader/full-example.yml create mode 100644 cli/cli/compose/loader/full-struct_test.go create mode 100644 cli/cli/compose/loader/interpolate.go create mode 100644 cli/cli/compose/loader/loader.go create mode 100644 cli/cli/compose/loader/loader_test.go create mode 100644 cli/cli/compose/loader/merge.go create mode 100644 cli/cli/compose/loader/merge_test.go create mode 100644 cli/cli/compose/loader/types_test.go create mode 100644 cli/cli/compose/loader/volume.go create mode 100644 cli/cli/compose/loader/volume_test.go create mode 100644 cli/cli/compose/schema/bindata.go create mode 100644 cli/cli/compose/schema/data/config_schema_v3.0.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.1.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.2.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.3.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.4.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.5.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.6.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.7.json create mode 100644 cli/cli/compose/schema/schema.go create mode 100644 cli/cli/compose/schema/schema_test.go create mode 100644 cli/cli/compose/template/template.go create mode 100644 cli/cli/compose/template/template_test.go create mode 100644 cli/cli/compose/types/types.go create mode 100644 cli/cli/config/config.go create mode 100644 cli/cli/config/config_test.go create mode 100644 cli/cli/config/configfile/file.go create mode 100644 cli/cli/config/configfile/file_test.go create mode 100644 cli/cli/config/credentials/credentials.go create mode 100644 cli/cli/config/credentials/default_store.go create mode 100644 cli/cli/config/credentials/default_store_darwin.go create mode 100644 cli/cli/config/credentials/default_store_linux.go create mode 100644 cli/cli/config/credentials/default_store_unsupported.go create mode 100644 cli/cli/config/credentials/default_store_windows.go create mode 100644 cli/cli/config/credentials/file_store.go create mode 100644 cli/cli/config/credentials/file_store_test.go create mode 100644 cli/cli/config/credentials/native_store.go create mode 100644 cli/cli/config/credentials/native_store_test.go create mode 100644 cli/cli/debug/debug.go create mode 100644 cli/cli/debug/debug_test.go create mode 100644 cli/cli/error.go create mode 100644 cli/cli/flags/client.go create mode 100644 cli/cli/flags/common.go create mode 100644 cli/cli/flags/common_test.go create mode 100644 cli/cli/manifest/store/store.go create mode 100644 cli/cli/manifest/store/store_test.go create mode 100644 cli/cli/manifest/types/types.go create mode 100644 cli/cli/registry/client/client.go create mode 100644 cli/cli/registry/client/endpoint.go create mode 100644 cli/cli/registry/client/fetcher.go create mode 100644 cli/cli/required.go create mode 100644 cli/cli/required_test.go create mode 100644 cli/cli/trust/trust.go create mode 100644 cli/cli/trust/trust_test.go create mode 100644 cli/cli/version.go create mode 100644 cli/cli/winresources/res_windows.go create mode 100644 cli/cmd/docker/docker.go create mode 100644 cli/cmd/docker/docker_test.go create mode 100644 cli/cmd/docker/docker_windows.go create mode 100644 cli/codecov.yml create mode 100644 cli/contrib/completion/bash/docker create mode 100644 cli/contrib/completion/fish/docker.fish create mode 100644 cli/contrib/completion/powershell/readme.txt create mode 100644 cli/contrib/completion/zsh/REVIEWERS create mode 100644 cli/contrib/completion/zsh/_docker create mode 100644 cli/docker.Makefile create mode 100644 cli/dockerfiles/Dockerfile.binary-native create mode 100644 cli/dockerfiles/Dockerfile.cross create mode 100644 cli/dockerfiles/Dockerfile.dev create mode 100644 cli/dockerfiles/Dockerfile.e2e create mode 100644 cli/dockerfiles/Dockerfile.lint create mode 100644 cli/dockerfiles/Dockerfile.shellcheck create mode 100644 cli/docs/README.md create mode 100644 cli/docs/deprecated.md create mode 100644 cli/docs/extend/EBS_volume.md create mode 100644 cli/docs/extend/config.md create mode 100644 cli/docs/extend/images/authz_additional_info.png create mode 100644 cli/docs/extend/images/authz_allow.png create mode 100644 cli/docs/extend/images/authz_chunked.png create mode 100644 cli/docs/extend/images/authz_connection_hijack.png create mode 100644 cli/docs/extend/images/authz_deny.png create mode 100644 cli/docs/extend/index.md create mode 100644 cli/docs/extend/legacy_plugins.md create mode 100644 cli/docs/extend/plugin_api.md create mode 100644 cli/docs/extend/plugins_authorization.md create mode 100644 cli/docs/extend/plugins_graphdriver.md create mode 100644 cli/docs/extend/plugins_logging.md create mode 100644 cli/docs/extend/plugins_metrics.md create mode 100644 cli/docs/extend/plugins_network.md create mode 100644 cli/docs/extend/plugins_services.md create mode 100644 cli/docs/extend/plugins_volume.md create mode 100644 cli/docs/reference/builder.md create mode 100644 cli/docs/reference/commandline/attach.md create mode 100644 cli/docs/reference/commandline/build.md create mode 100644 cli/docs/reference/commandline/cli.md create mode 100644 cli/docs/reference/commandline/commit.md create mode 100644 cli/docs/reference/commandline/container.md create mode 100644 cli/docs/reference/commandline/container_prune.md create mode 100644 cli/docs/reference/commandline/cp.md create mode 100644 cli/docs/reference/commandline/create.md create mode 100644 cli/docs/reference/commandline/deploy.md create mode 100644 cli/docs/reference/commandline/diff.md create mode 100644 cli/docs/reference/commandline/dockerd.md create mode 100644 cli/docs/reference/commandline/events.md create mode 100644 cli/docs/reference/commandline/exec.md create mode 100644 cli/docs/reference/commandline/export.md create mode 100644 cli/docs/reference/commandline/history.md create mode 100644 cli/docs/reference/commandline/image.md create mode 100644 cli/docs/reference/commandline/image_prune.md create mode 100644 cli/docs/reference/commandline/images.md create mode 100644 cli/docs/reference/commandline/import.md create mode 100644 cli/docs/reference/commandline/index.md create mode 100644 cli/docs/reference/commandline/info.md create mode 100644 cli/docs/reference/commandline/inspect.md create mode 100644 cli/docs/reference/commandline/kill.md create mode 100644 cli/docs/reference/commandline/load.md create mode 100644 cli/docs/reference/commandline/login.md create mode 100644 cli/docs/reference/commandline/logout.md create mode 100644 cli/docs/reference/commandline/logs.md create mode 100644 cli/docs/reference/commandline/manifest.md create mode 100644 cli/docs/reference/commandline/network.md create mode 100644 cli/docs/reference/commandline/network_connect.md create mode 100644 cli/docs/reference/commandline/network_create.md create mode 100644 cli/docs/reference/commandline/network_disconnect.md create mode 100644 cli/docs/reference/commandline/network_inspect.md create mode 100644 cli/docs/reference/commandline/network_ls.md create mode 100644 cli/docs/reference/commandline/network_prune.md create mode 100644 cli/docs/reference/commandline/network_rm.md create mode 100644 cli/docs/reference/commandline/node.md create mode 100644 cli/docs/reference/commandline/node_demote.md create mode 100644 cli/docs/reference/commandline/node_inspect.md create mode 100644 cli/docs/reference/commandline/node_ls.md create mode 100644 cli/docs/reference/commandline/node_promote.md create mode 100644 cli/docs/reference/commandline/node_ps.md create mode 100644 cli/docs/reference/commandline/node_rm.md create mode 100644 cli/docs/reference/commandline/node_update.md create mode 100644 cli/docs/reference/commandline/pause.md create mode 100644 cli/docs/reference/commandline/plugin.md create mode 100644 cli/docs/reference/commandline/plugin_create.md create mode 100644 cli/docs/reference/commandline/plugin_disable.md create mode 100644 cli/docs/reference/commandline/plugin_enable.md create mode 100644 cli/docs/reference/commandline/plugin_inspect.md create mode 100644 cli/docs/reference/commandline/plugin_install.md create mode 100644 cli/docs/reference/commandline/plugin_ls.md create mode 100644 cli/docs/reference/commandline/plugin_push.md create mode 100644 cli/docs/reference/commandline/plugin_rm.md create mode 100644 cli/docs/reference/commandline/plugin_set.md create mode 100644 cli/docs/reference/commandline/plugin_upgrade.md create mode 100644 cli/docs/reference/commandline/port.md create mode 100644 cli/docs/reference/commandline/ps.md create mode 100644 cli/docs/reference/commandline/pull.md create mode 100644 cli/docs/reference/commandline/push.md create mode 100644 cli/docs/reference/commandline/rename.md create mode 100644 cli/docs/reference/commandline/restart.md create mode 100644 cli/docs/reference/commandline/rm.md create mode 100644 cli/docs/reference/commandline/rmi.md create mode 100644 cli/docs/reference/commandline/run.md create mode 100644 cli/docs/reference/commandline/save.md create mode 100644 cli/docs/reference/commandline/search.md create mode 100644 cli/docs/reference/commandline/secret.md create mode 100644 cli/docs/reference/commandline/secret_create.md create mode 100644 cli/docs/reference/commandline/secret_inspect.md create mode 100644 cli/docs/reference/commandline/secret_ls.md create mode 100644 cli/docs/reference/commandline/secret_rm.md create mode 100644 cli/docs/reference/commandline/service.md create mode 100644 cli/docs/reference/commandline/service_create.md create mode 100644 cli/docs/reference/commandline/service_inspect.md create mode 100644 cli/docs/reference/commandline/service_logs.md create mode 100644 cli/docs/reference/commandline/service_ls.md create mode 100644 cli/docs/reference/commandline/service_ps.md create mode 100644 cli/docs/reference/commandline/service_rm.md create mode 100644 cli/docs/reference/commandline/service_rollback.md create mode 100644 cli/docs/reference/commandline/service_scale.md create mode 100644 cli/docs/reference/commandline/service_update.md create mode 100644 cli/docs/reference/commandline/stack.md create mode 100644 cli/docs/reference/commandline/stack_deploy.md create mode 100644 cli/docs/reference/commandline/stack_ls.md create mode 100644 cli/docs/reference/commandline/stack_ps.md create mode 100644 cli/docs/reference/commandline/stack_rm.md create mode 100644 cli/docs/reference/commandline/stack_services.md create mode 100644 cli/docs/reference/commandline/start.md create mode 100644 cli/docs/reference/commandline/stats.md create mode 100644 cli/docs/reference/commandline/stop.md create mode 100644 cli/docs/reference/commandline/swarm.md create mode 100644 cli/docs/reference/commandline/swarm_ca.md create mode 100644 cli/docs/reference/commandline/swarm_init.md create mode 100644 cli/docs/reference/commandline/swarm_join.md create mode 100644 cli/docs/reference/commandline/swarm_join_token.md create mode 100644 cli/docs/reference/commandline/swarm_leave.md create mode 100644 cli/docs/reference/commandline/swarm_unlock.md create mode 100644 cli/docs/reference/commandline/swarm_unlock_key.md create mode 100644 cli/docs/reference/commandline/swarm_update.md create mode 100644 cli/docs/reference/commandline/system.md create mode 100644 cli/docs/reference/commandline/system_df.md create mode 100644 cli/docs/reference/commandline/system_events.md create mode 100644 cli/docs/reference/commandline/system_prune.md create mode 100644 cli/docs/reference/commandline/tag.md create mode 100644 cli/docs/reference/commandline/top.md create mode 100644 cli/docs/reference/commandline/trust_inspect.md create mode 100644 cli/docs/reference/commandline/trust_key_generate.md create mode 100644 cli/docs/reference/commandline/trust_key_load.md create mode 100644 cli/docs/reference/commandline/trust_revoke.md create mode 100644 cli/docs/reference/commandline/trust_sign.md create mode 100644 cli/docs/reference/commandline/trust_signer_add.md create mode 100644 cli/docs/reference/commandline/trust_signer_remove.md create mode 100644 cli/docs/reference/commandline/unpause.md create mode 100644 cli/docs/reference/commandline/update.md create mode 100644 cli/docs/reference/commandline/version.md create mode 100644 cli/docs/reference/commandline/volume.md create mode 100644 cli/docs/reference/commandline/volume_create.md create mode 100644 cli/docs/reference/commandline/volume_inspect.md create mode 100644 cli/docs/reference/commandline/volume_ls.md create mode 100644 cli/docs/reference/commandline/volume_prune.md create mode 100644 cli/docs/reference/commandline/volume_rm.md create mode 100644 cli/docs/reference/commandline/wait.md create mode 100644 cli/docs/reference/glossary.md create mode 100644 cli/docs/reference/index.md create mode 100644 cli/docs/reference/run.md create mode 100644 cli/docs/yaml/Dockerfile create mode 100644 cli/docs/yaml/generate.go create mode 100644 cli/docs/yaml/yaml.go create mode 100644 cli/e2e/compose-env.yaml create mode 100644 cli/e2e/container/attach_test.go create mode 100644 cli/e2e/container/create_test.go create mode 100644 cli/e2e/container/kill_test.go create mode 100644 cli/e2e/container/main_test.go create mode 100644 cli/e2e/container/run_test.go create mode 100644 cli/e2e/container/testdata/run-attached-from-remote-and-remove.golden create mode 100644 cli/e2e/image/build_test.go create mode 100644 cli/e2e/image/main_test.go create mode 100644 cli/e2e/image/pull_test.go create mode 100644 cli/e2e/image/push_test.go create mode 100644 cli/e2e/image/testdata/notary/delgkey1.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey1.key create mode 100644 cli/e2e/image/testdata/notary/delgkey2.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey2.key create mode 100644 cli/e2e/image/testdata/notary/delgkey3.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey3.key create mode 100644 cli/e2e/image/testdata/notary/delgkey4.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey4.key create mode 100755 cli/e2e/image/testdata/notary/gen.sh create mode 100644 cli/e2e/image/testdata/notary/localhost.cert create mode 100644 cli/e2e/image/testdata/notary/localhost.key create mode 100644 cli/e2e/image/testdata/pull-with-content-trust-err.golden create mode 100644 cli/e2e/image/testdata/pull-with-content-trust.golden create mode 100644 cli/e2e/image/testdata/push-with-content-trust-err.golden create mode 100644 cli/e2e/internal/fixtures/fixtures.go create mode 100644 cli/e2e/plugin/basic/basic.go create mode 100644 cli/e2e/plugin/main_test.go create mode 100644 cli/e2e/plugin/trust_test.go create mode 100644 cli/e2e/stack/deploy_test.go create mode 100644 cli/e2e/stack/help_test.go create mode 100644 cli/e2e/stack/main_test.go create mode 100644 cli/e2e/stack/remove_test.go create mode 100644 cli/e2e/stack/testdata/data create mode 100644 cli/e2e/stack/testdata/full-stack.yml create mode 100644 cli/e2e/stack/testdata/stack-deploy-help-kubernetes.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-help-swarm.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-with-names-kubernetes.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-with-names-swarm.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-with-names.golden create mode 100644 cli/e2e/stack/testdata/stack-remove-kubernetes-success.golden create mode 100644 cli/e2e/stack/testdata/stack-remove-swarm-success.golden create mode 100644 cli/e2e/stack/testdata/stack-with-named-resources.yml create mode 100644 cli/e2e/system/inspect_test.go create mode 100644 cli/e2e/system/main_test.go create mode 100644 cli/e2e/testdata/Dockerfile.notary-server create mode 100644 cli/e2e/testdata/notary/notary-config.json create mode 100644 cli/e2e/testdata/notary/notary-server.cert create mode 100644 cli/e2e/testdata/notary/notary-server.key create mode 100644 cli/e2e/testdata/notary/root-ca.cert create mode 100644 cli/e2e/trust/main_test.go create mode 100644 cli/e2e/trust/revoke_test.go create mode 100644 cli/e2e/trust/sign_test.go create mode 100644 cli/experimental/README.md create mode 100644 cli/experimental/checkpoint-restore.md create mode 100644 cli/experimental/docker-stacks-and-bundles.md create mode 100644 cli/experimental/images/ipvlan-l3.gliffy create mode 100644 cli/experimental/images/ipvlan-l3.png create mode 100644 cli/experimental/images/ipvlan-l3.svg create mode 100644 cli/experimental/images/ipvlan_l2_simple.gliffy create mode 100644 cli/experimental/images/ipvlan_l2_simple.png create mode 100644 cli/experimental/images/ipvlan_l2_simple.svg create mode 100644 cli/experimental/images/macvlan-bridge-ipvlan-l2.gliffy create mode 100644 cli/experimental/images/macvlan-bridge-ipvlan-l2.png create mode 100644 cli/experimental/images/macvlan-bridge-ipvlan-l2.svg create mode 100644 cli/experimental/images/multi_tenant_8021q_vlans.gliffy create mode 100644 cli/experimental/images/multi_tenant_8021q_vlans.png create mode 100644 cli/experimental/images/multi_tenant_8021q_vlans.svg create mode 100644 cli/experimental/images/vlans-deeper-look.gliffy create mode 100644 cli/experimental/images/vlans-deeper-look.png create mode 100644 cli/experimental/images/vlans-deeper-look.svg create mode 100644 cli/experimental/vlan-networks.md create mode 100644 cli/gometalinter.json create mode 100644 cli/internal/test/builders/config.go create mode 100644 cli/internal/test/builders/container.go create mode 100644 cli/internal/test/builders/doc.go create mode 100644 cli/internal/test/builders/network.go create mode 100644 cli/internal/test/builders/node.go create mode 100644 cli/internal/test/builders/secret.go create mode 100644 cli/internal/test/builders/service.go create mode 100644 cli/internal/test/builders/swarm.go create mode 100644 cli/internal/test/builders/task.go create mode 100644 cli/internal/test/builders/volume.go create mode 100644 cli/internal/test/cli.go create mode 100644 cli/internal/test/doc.go create mode 100644 cli/internal/test/environment/testenv.go create mode 100644 cli/internal/test/network/client.go create mode 100644 cli/internal/test/notary/client.go create mode 100644 cli/internal/test/output/output.go create mode 100644 cli/internal/test/store.go create mode 100644 cli/kubernetes/README.md create mode 100644 cli/kubernetes/check.go create mode 100644 cli/kubernetes/check_test.go create mode 100644 cli/kubernetes/client/clientset/clientset.go create mode 100644 cli/kubernetes/client/clientset/scheme/register.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta1/compose_client.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta1/stack.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta2/compose_client.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/client/informers/compose/interface.go create mode 100644 cli/kubernetes/client/informers/compose/v1beta2/interface.go create mode 100644 cli/kubernetes/client/informers/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/client/informers/factory.go create mode 100644 cli/kubernetes/client/informers/generic.go create mode 100644 cli/kubernetes/client/informers/internalinterfaces/factory_interfaces.go create mode 100644 cli/kubernetes/client/listers/compose/v1beta2/expansion_generated.go create mode 100644 cli/kubernetes/client/listers/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/compose/clone/maps.go create mode 100644 cli/kubernetes/compose/clone/slices.go create mode 100644 cli/kubernetes/compose/doc.go create mode 100644 cli/kubernetes/compose/impersonation/impersonationconfig.go create mode 100644 cli/kubernetes/compose/v1beta1/doc.go create mode 100644 cli/kubernetes/compose/v1beta1/owner.go create mode 100644 cli/kubernetes/compose/v1beta1/parsing.go create mode 100644 cli/kubernetes/compose/v1beta1/register.go create mode 100644 cli/kubernetes/compose/v1beta1/stack.go create mode 100644 cli/kubernetes/compose/v1beta1/stack_test.go create mode 100644 cli/kubernetes/compose/v1beta2/composefile_stack_types.go create mode 100644 cli/kubernetes/compose/v1beta2/doc.go create mode 100644 cli/kubernetes/compose/v1beta2/owner.go create mode 100644 cli/kubernetes/compose/v1beta2/register.go create mode 100644 cli/kubernetes/compose/v1beta2/scale.go create mode 100644 cli/kubernetes/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/config.go create mode 100644 cli/kubernetes/doc.go create mode 100644 cli/kubernetes/labels/labels.go create mode 100644 cli/kubernetes/labels/labels_test.go create mode 100644 cli/man/Dockerfile.5.md create mode 100644 cli/man/README.md create mode 100644 cli/man/docker-build.1.md create mode 100644 cli/man/docker-config-json.5.md create mode 100644 cli/man/docker-run.1.md create mode 100644 cli/man/docker.1.md create mode 100644 cli/man/dockerd.8.md create mode 100644 cli/man/generate.go create mode 100644 cli/man/import.go create mode 100755 cli/man/md2man-all.sh create mode 100644 cli/man/src/attach.md create mode 100644 cli/man/src/commit.md create mode 100644 cli/man/src/container/attach.md create mode 100644 cli/man/src/container/commit.md create mode 100644 cli/man/src/container/cp.md create mode 100644 cli/man/src/container/create-example.md create mode 100644 cli/man/src/container/create.md create mode 100644 cli/man/src/container/diff.md create mode 100644 cli/man/src/container/exec.md create mode 100644 cli/man/src/container/export.md create mode 100644 cli/man/src/container/kill.md create mode 100644 cli/man/src/container/logs.md create mode 100644 cli/man/src/container/ls.md create mode 100644 cli/man/src/container/pause.md create mode 100644 cli/man/src/container/port.md create mode 100644 cli/man/src/container/rename.md create mode 100644 cli/man/src/container/restart.md create mode 100644 cli/man/src/container/rm.md create mode 100644 cli/man/src/container/run.md create mode 100644 cli/man/src/container/start.md create mode 100644 cli/man/src/container/stats.md create mode 100644 cli/man/src/container/stop.md create mode 100644 cli/man/src/container/top.md create mode 100644 cli/man/src/container/unpause.md create mode 100644 cli/man/src/container/update.md create mode 100644 cli/man/src/container/wait.md create mode 100644 cli/man/src/cp.md create mode 100644 cli/man/src/create.md create mode 100644 cli/man/src/diff.md create mode 100644 cli/man/src/events.md create mode 100644 cli/man/src/exec.md create mode 100644 cli/man/src/export.md create mode 100644 cli/man/src/history.md create mode 100644 cli/man/src/image/build.md create mode 100644 cli/man/src/image/history.md create mode 100644 cli/man/src/image/import.md create mode 100644 cli/man/src/image/load.md create mode 100644 cli/man/src/image/ls.md create mode 100644 cli/man/src/image/pull.md create mode 100644 cli/man/src/image/push.md create mode 100644 cli/man/src/image/rm.md create mode 100644 cli/man/src/image/save.md create mode 100644 cli/man/src/image/tag.md create mode 100644 cli/man/src/images.md create mode 100644 cli/man/src/import.md create mode 100644 cli/man/src/info.md create mode 100644 cli/man/src/inspect.md create mode 100644 cli/man/src/kill.md create mode 100644 cli/man/src/load.md create mode 100644 cli/man/src/login.md create mode 100644 cli/man/src/logout.md create mode 100644 cli/man/src/logs.md create mode 100644 cli/man/src/network/connect.md create mode 100644 cli/man/src/network/create.md create mode 100644 cli/man/src/network/disconnect.md create mode 100644 cli/man/src/network/inspect.md create mode 100644 cli/man/src/network/ls.md create mode 100644 cli/man/src/network/rm.md create mode 100644 cli/man/src/pause.md create mode 100644 cli/man/src/plugin/ls.md create mode 100644 cli/man/src/port.md create mode 100644 cli/man/src/ps.md create mode 100644 cli/man/src/pull.md create mode 100644 cli/man/src/push.md create mode 100644 cli/man/src/rename.md create mode 100644 cli/man/src/restart.md create mode 100644 cli/man/src/rm.md create mode 100644 cli/man/src/rmi.md create mode 100644 cli/man/src/save.md create mode 100644 cli/man/src/search.md create mode 100644 cli/man/src/start.md create mode 100644 cli/man/src/stats.md create mode 100644 cli/man/src/stop.md create mode 100644 cli/man/src/system/events.md create mode 100644 cli/man/src/system/info.md create mode 100644 cli/man/src/tag.md create mode 100644 cli/man/src/top.md create mode 100644 cli/man/src/unpause.md create mode 100644 cli/man/src/update.md create mode 100644 cli/man/src/version.md create mode 100644 cli/man/src/volume.md create mode 100644 cli/man/src/volume/create.md create mode 100644 cli/man/src/volume/inspect.md create mode 100644 cli/man/src/volume/ls.md create mode 100644 cli/man/src/wait.md create mode 100644 cli/opts/config.go create mode 100644 cli/opts/duration.go create mode 100644 cli/opts/duration_test.go create mode 100644 cli/opts/env.go create mode 100644 cli/opts/env_test.go create mode 100644 cli/opts/envfile.go create mode 100644 cli/opts/envfile_test.go create mode 100644 cli/opts/file.go create mode 100644 cli/opts/hosts.go create mode 100644 cli/opts/hosts_test.go create mode 100644 cli/opts/hosts_unix.go create mode 100644 cli/opts/hosts_windows.go create mode 100644 cli/opts/ip.go create mode 100644 cli/opts/ip_test.go create mode 100644 cli/opts/mount.go create mode 100644 cli/opts/mount_test.go create mode 100644 cli/opts/network.go create mode 100644 cli/opts/network_test.go create mode 100644 cli/opts/opts.go create mode 100644 cli/opts/opts_test.go create mode 100644 cli/opts/opts_unix.go create mode 100644 cli/opts/opts_windows.go create mode 100644 cli/opts/parse.go create mode 100644 cli/opts/port.go create mode 100644 cli/opts/port_test.go create mode 100644 cli/opts/quotedstring.go create mode 100644 cli/opts/quotedstring_test.go create mode 100644 cli/opts/runtime.go create mode 100644 cli/opts/secret.go create mode 100644 cli/opts/secret_test.go create mode 100644 cli/opts/throttledevice.go create mode 100644 cli/opts/ulimit.go create mode 100644 cli/opts/ulimit_test.go create mode 100644 cli/opts/weightdevice.go create mode 100644 cli/poule.yml create mode 100755 cli/scripts/build/.variables create mode 100755 cli/scripts/build/binary create mode 100755 cli/scripts/build/cross create mode 100755 cli/scripts/build/dynbinary create mode 100755 cli/scripts/build/osx create mode 100755 cli/scripts/build/windows create mode 100755 cli/scripts/docs/generate-authors.sh create mode 100755 cli/scripts/docs/generate-man.sh create mode 100755 cli/scripts/docs/generate-yaml.sh create mode 100755 cli/scripts/gen/windows-resources create mode 100644 cli/scripts/make.ps1 create mode 100755 cli/scripts/test/e2e/entry create mode 100755 cli/scripts/test/e2e/load-image create mode 100755 cli/scripts/test/e2e/run create mode 100755 cli/scripts/test/e2e/wait-on-daemon create mode 100755 cli/scripts/test/e2e/wrapper create mode 100755 cli/scripts/test/unit create mode 100755 cli/scripts/test/unit-with-coverage create mode 100755 cli/scripts/validate/check-git-diff create mode 100755 cli/scripts/validate/shellcheck create mode 100755 cli/scripts/warn-outside-container create mode 100644 cli/scripts/winresources/common.rc create mode 100644 cli/scripts/winresources/docker.exe.manifest create mode 100644 cli/scripts/winresources/docker.ico create mode 100644 cli/scripts/winresources/docker.png create mode 100644 cli/scripts/winresources/docker.rc create mode 100644 cli/service/logs/parse_logs.go create mode 100644 cli/service/logs/parse_logs_test.go create mode 100644 cli/templates/templates.go create mode 100644 cli/templates/templates_test.go create mode 100755 cli/vendor.conf create mode 100644 cli/vendor/github.com/Nvveen/Gotty/LICENSE create mode 100644 cli/vendor/github.com/Nvveen/Gotty/README create mode 100644 cli/vendor/github.com/Nvveen/Gotty/attributes.go create mode 100644 cli/vendor/github.com/Nvveen/Gotty/gotty.go create mode 100644 cli/vendor/github.com/Nvveen/Gotty/parser.go create mode 100644 cli/vendor/github.com/Nvveen/Gotty/types.go create mode 100644 cli/vendor/github.com/containerd/continuity/LICENSE create mode 100644 cli/vendor/github.com/containerd/continuity/README.md create mode 100644 cli/vendor/github.com/containerd/continuity/pathdriver/path_driver.go create mode 100644 cli/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go create mode 100644 cli/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/asm.s create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_linux.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/file_posix.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/nodata_linux.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/nodata_unix.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/sys.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_linux.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go create mode 100644 cli/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go create mode 100644 cli/vendor/github.com/containerd/continuity/vendor.conf create mode 100644 cli/vendor/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 cli/vendor/github.com/gregjones/httpcache/README.md create mode 100644 cli/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go create mode 100644 cli/vendor/github.com/gregjones/httpcache/httpcache.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/README.rst create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/README.md create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/command_line.proto create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/store.proto create mode 100644 cli/vendor/github.com/howeyc/gopass/LICENSE.txt create mode 100644 cli/vendor/github.com/howeyc/gopass/README.md create mode 100644 cli/vendor/github.com/howeyc/gopass/pass.go create mode 100644 cli/vendor/github.com/howeyc/gopass/terminal.go create mode 100644 cli/vendor/github.com/moby/buildkit/LICENSE create mode 100644 cli/vendor/github.com/moby/buildkit/README.md create mode 100644 cli/vendor/github.com/moby/buildkit/api/services/control/control.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/services/control/control.proto create mode 100644 cli/vendor/github.com/moby/buildkit/api/services/control/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/types/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/types/worker.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/types/worker.proto create mode 100644 cli/vendor/github.com/moby/buildkit/client/client.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/client_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/client_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/diskusage.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/exporters.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/graph.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/exec.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/marshal.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/meta.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/resolver.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/source.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/state.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/prune.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/solve.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/workers.go create mode 100644 cli/vendor/github.com/moby/buildkit/identity/randomid.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/auth.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/auth.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/auth.proto create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/context.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/filesync.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/filesync.proto create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/grpc.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/grpchijack/dial.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/manager.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/session.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/attr.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/const.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/ops.proto create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/platform.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/progress/progressui/display.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/path_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/path_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go create mode 100644 cli/vendor/github.com/moby/buildkit/vendor.conf create mode 100644 cli/vendor/github.com/morikuni/aec/LICENSE create mode 100644 cli/vendor/github.com/morikuni/aec/README.md create mode 100644 cli/vendor/github.com/morikuni/aec/aec.go create mode 100644 cli/vendor/github.com/morikuni/aec/ansi.go create mode 100644 cli/vendor/github.com/morikuni/aec/builder.go create mode 100644 cli/vendor/github.com/morikuni/aec/sgr.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/LICENSE create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diff.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diff_containerd.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diskwriter.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/followlinks.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/generate.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/hardlinks.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/readme.md create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/receive.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/send.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/stat.pb.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/stat.proto create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/validator.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/walker.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/walker_unix.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/walker_windows.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/wire.pb.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/wire.proto create mode 100644 cli/vendor/github.com/tonistiigi/units/LICENSE create mode 100644 cli/vendor/github.com/tonistiigi/units/bytes.go create mode 100644 cli/vendor/github.com/tonistiigi/units/readme.md create mode 100644 cli/vendor/k8s.io/api/LICENSE create mode 100644 cli/vendor/k8s.io/api/README.md create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/doc.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/generated.proto create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/register.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/types.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authentication/v1/register.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/types.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authorization/v1/register.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/types.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/register.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/types.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/register.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/types.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/batch/v1/register.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/types.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/core/v1/annotation_key_constants.go create mode 100644 cli/vendor/k8s.io/api/core/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/core/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/core/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/core/v1/meta.go create mode 100644 cli/vendor/k8s.io/api/core/v1/objectreference.go create mode 100644 cli/vendor/k8s.io/api/core/v1/register.go create mode 100644 cli/vendor/k8s.io/api/core/v1/resource.go create mode 100644 cli/vendor/k8s.io/api/core/v1/taint.go create mode 100644 cli/vendor/k8s.io/api/core/v1/toleration.go create mode 100644 cli/vendor/k8s.io/api/core/v1/types.go create mode 100644 cli/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/networking/v1/register.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/types.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/rbac/v1/register.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/types.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/storage/v1/register.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/types.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/LICENSE create mode 100644 cli/vendor/k8s.io/apimachinery/README.md create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/help.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/math.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/converter.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/helper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/fields.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/requirements.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/selector.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/labels.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/selector.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/codec.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/error.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/extension.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/helper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/selection/operator.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/nodename.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/patch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/uid.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/json/json.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/http.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/interface.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/util.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/int.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/string.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/version/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/version/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/filter.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/mux.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/until.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/watch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go create mode 100644 cli/vendor/k8s.io/client-go/LICENSE create mode 100644 cli/vendor/k8s.io/client-go/README.md create mode 100644 cli/vendor/k8s.io/client-go/discovery/discovery_client.go create mode 100644 cli/vendor/k8s.io/client-go/discovery/helper.go create mode 100644 cli/vendor/k8s.io/client-go/discovery/restmapper.go create mode 100644 cli/vendor/k8s.io/client-go/discovery/unstructured.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/clientset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/import.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/scheme/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/scheme/register.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/version/base.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/version/doc.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/version/version.go create mode 100644 cli/vendor/k8s.io/client-go/rest/client.go create mode 100644 cli/vendor/k8s.io/client-go/rest/config.go create mode 100644 cli/vendor/k8s.io/client-go/rest/plugin.go create mode 100644 cli/vendor/k8s.io/client-go/rest/request.go create mode 100644 cli/vendor/k8s.io/client-go/rest/transport.go create mode 100644 cli/vendor/k8s.io/client-go/rest/url_utils.go create mode 100644 cli/vendor/k8s.io/client-go/rest/urlbackoff.go create mode 100644 cli/vendor/k8s.io/client-go/rest/versions.go create mode 100644 cli/vendor/k8s.io/client-go/rest/watch/decoder.go create mode 100644 cli/vendor/k8s.io/client-go/rest/watch/encoder.go create mode 100644 cli/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/testing/actions.go create mode 100644 cli/vendor/k8s.io/client-go/testing/fake.go create mode 100644 cli/vendor/k8s.io/client-go/testing/fixture.go create mode 100644 cli/vendor/k8s.io/client-go/tools/auth/clientauth.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/controller.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/delta_fifo.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/expiration_cache.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/fifo.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/index.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/listers.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/listwatch.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/mutation_cache.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/mutation_detector.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/reflector.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/shared_informer.go create mode 100755 cli/vendor/k8s.io/client-go/tools/cache/store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/undelta_store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/register.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/types.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/client_config.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/config.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/flag.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/helpers.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/loader.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/overrides.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/validation.go create mode 100644 cli/vendor/k8s.io/client-go/tools/metrics/metrics.go create mode 100644 cli/vendor/k8s.io/client-go/tools/pager/pager.go create mode 100644 cli/vendor/k8s.io/client-go/tools/reference/ref.go create mode 100644 cli/vendor/k8s.io/client-go/transport/cache.go create mode 100644 cli/vendor/k8s.io/client-go/transport/config.go create mode 100644 cli/vendor/k8s.io/client-go/transport/round_trippers.go create mode 100644 cli/vendor/k8s.io/client-go/transport/transport.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/cert.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/csr.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/io.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/pem.go create mode 100644 cli/vendor/k8s.io/client-go/util/flowcontrol/backoff.go create mode 100644 cli/vendor/k8s.io/client-go/util/flowcontrol/throttle.go create mode 100644 cli/vendor/k8s.io/client-go/util/homedir/homedir.go create mode 100644 cli/vendor/k8s.io/client-go/util/integer/integer.go create mode 100644 cli/vendor/k8s.io/kube-openapi/LICENSE create mode 100644 cli/vendor/k8s.io/kube-openapi/README.md create mode 100644 cli/vendor/k8s.io/kube-openapi/pkg/common/common.go create mode 100644 cli/vendor/k8s.io/kube-openapi/pkg/common/doc.go create mode 100644 cli/vendor/k8s.io/kubernetes/LICENSE create mode 100644 cli/vendor/k8s.io/kubernetes/README.md create mode 100644 cli/vendor/k8s.io/kubernetes/build/README.md create mode 100644 cli/vendor/k8s.io/kubernetes/build/pause/orphan.c create mode 100644 cli/vendor/k8s.io/kubernetes/build/pause/pause.c create mode 100644 cli/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go create mode 100644 cli/vendor/k8s.io/kubernetes/third_party/protobuf/google/protobuf/compiler/plugin.proto create mode 100644 cli/vendor/k8s.io/kubernetes/third_party/protobuf/google/protobuf/descriptor.proto create mode 100644 cli/vendor/vbom.ml/util/LICENSE create mode 100644 cli/vendor/vbom.ml/util/README.md create mode 100644 cli/vendor/vbom.ml/util/sortorder/README.md create mode 100644 cli/vendor/vbom.ml/util/sortorder/doc.go create mode 100644 cli/vendor/vbom.ml/util/sortorder/natsort.go create mode 100644 components.conf create mode 100644 engine/.DEREK.yml create mode 100644 engine/.dockerignore create mode 100644 engine/.mailmap create mode 100644 engine/AUTHORS create mode 100644 engine/CHANGELOG.md create mode 100644 engine/CONTRIBUTING.md create mode 100644 engine/Dockerfile create mode 100644 engine/Dockerfile.e2e create mode 100644 engine/Dockerfile.simple create mode 100644 engine/Dockerfile.windows create mode 100644 engine/LICENSE create mode 100644 engine/MAINTAINERS create mode 100644 engine/Makefile create mode 100644 engine/NOTICE create mode 100644 engine/README.md create mode 100644 engine/ROADMAP.md create mode 100644 engine/TESTING.md create mode 100644 engine/VENDORING.md create mode 100644 engine/api/README.md create mode 100644 engine/api/common.go create mode 100644 engine/api/common_unix.go create mode 100644 engine/api/common_windows.go create mode 100644 engine/api/server/backend/build/backend.go create mode 100644 engine/api/server/backend/build/tag.go create mode 100644 engine/api/server/httputils/decoder.go create mode 100644 engine/api/server/httputils/errors.go create mode 100644 engine/api/server/httputils/form.go create mode 100644 engine/api/server/httputils/form_test.go create mode 100644 engine/api/server/httputils/httputils.go create mode 100644 engine/api/server/httputils/httputils_test.go create mode 100644 engine/api/server/httputils/httputils_write_json.go create mode 100644 engine/api/server/httputils/write_log_stream.go create mode 100644 engine/api/server/middleware.go create mode 100644 engine/api/server/middleware/cors.go create mode 100644 engine/api/server/middleware/debug.go create mode 100644 engine/api/server/middleware/debug_test.go create mode 100644 engine/api/server/middleware/experimental.go create mode 100644 engine/api/server/middleware/middleware.go create mode 100644 engine/api/server/middleware/version.go create mode 100644 engine/api/server/middleware/version_test.go create mode 100644 engine/api/server/router/build/backend.go create mode 100644 engine/api/server/router/build/build.go create mode 100644 engine/api/server/router/build/build_routes.go create mode 100644 engine/api/server/router/checkpoint/backend.go create mode 100644 engine/api/server/router/checkpoint/checkpoint.go create mode 100644 engine/api/server/router/checkpoint/checkpoint_routes.go create mode 100644 engine/api/server/router/container/backend.go create mode 100644 engine/api/server/router/container/container.go create mode 100644 engine/api/server/router/container/container_routes.go create mode 100644 engine/api/server/router/container/copy.go create mode 100644 engine/api/server/router/container/exec.go create mode 100644 engine/api/server/router/container/inspect.go create mode 100644 engine/api/server/router/debug/debug.go create mode 100644 engine/api/server/router/debug/debug_routes.go create mode 100644 engine/api/server/router/distribution/backend.go create mode 100644 engine/api/server/router/distribution/distribution.go create mode 100644 engine/api/server/router/distribution/distribution_routes.go create mode 100644 engine/api/server/router/experimental.go create mode 100644 engine/api/server/router/image/backend.go create mode 100644 engine/api/server/router/image/image.go create mode 100644 engine/api/server/router/image/image_routes.go create mode 100644 engine/api/server/router/local.go create mode 100644 engine/api/server/router/network/backend.go create mode 100644 engine/api/server/router/network/filter.go create mode 100644 engine/api/server/router/network/filter_test.go create mode 100644 engine/api/server/router/network/network.go create mode 100644 engine/api/server/router/network/network_routes.go create mode 100644 engine/api/server/router/plugin/backend.go create mode 100644 engine/api/server/router/plugin/plugin.go create mode 100644 engine/api/server/router/plugin/plugin_routes.go create mode 100644 engine/api/server/router/router.go create mode 100644 engine/api/server/router/session/backend.go create mode 100644 engine/api/server/router/session/session.go create mode 100644 engine/api/server/router/session/session_routes.go create mode 100644 engine/api/server/router/swarm/backend.go create mode 100644 engine/api/server/router/swarm/cluster.go create mode 100644 engine/api/server/router/swarm/cluster_routes.go create mode 100644 engine/api/server/router/swarm/helpers.go create mode 100644 engine/api/server/router/system/backend.go create mode 100644 engine/api/server/router/system/system.go create mode 100644 engine/api/server/router/system/system_routes.go create mode 100644 engine/api/server/router/volume/backend.go create mode 100644 engine/api/server/router/volume/volume.go create mode 100644 engine/api/server/router/volume/volume_routes.go create mode 100644 engine/api/server/router_swapper.go create mode 100644 engine/api/server/server.go create mode 100644 engine/api/server/server_test.go create mode 100644 engine/api/swagger-gen.yaml create mode 100644 engine/api/swagger.yaml create mode 100644 engine/api/templates/server/operation.gotmpl create mode 100644 engine/api/types/auth.go create mode 100644 engine/api/types/backend/backend.go create mode 100644 engine/api/types/backend/build.go create mode 100644 engine/api/types/blkiodev/blkio.go create mode 100644 engine/api/types/client.go create mode 100644 engine/api/types/configs.go create mode 100644 engine/api/types/container/config.go create mode 100644 engine/api/types/container/container_changes.go create mode 100644 engine/api/types/container/container_create.go create mode 100644 engine/api/types/container/container_top.go create mode 100644 engine/api/types/container/container_update.go create mode 100644 engine/api/types/container/container_wait.go create mode 100644 engine/api/types/container/host_config.go create mode 100644 engine/api/types/container/hostconfig_unix.go create mode 100644 engine/api/types/container/hostconfig_windows.go create mode 100644 engine/api/types/container/waitcondition.go create mode 100644 engine/api/types/error_response.go create mode 100644 engine/api/types/events/events.go create mode 100644 engine/api/types/filters/example_test.go create mode 100644 engine/api/types/filters/parse.go create mode 100644 engine/api/types/filters/parse_test.go create mode 100644 engine/api/types/graph_driver_data.go create mode 100644 engine/api/types/id_response.go create mode 100644 engine/api/types/image/image_history.go create mode 100644 engine/api/types/image_delete_response_item.go create mode 100644 engine/api/types/image_summary.go create mode 100644 engine/api/types/mount/mount.go create mode 100644 engine/api/types/network/network.go create mode 100644 engine/api/types/plugin.go create mode 100644 engine/api/types/plugin_device.go create mode 100644 engine/api/types/plugin_env.go create mode 100644 engine/api/types/plugin_interface_type.go create mode 100644 engine/api/types/plugin_mount.go create mode 100644 engine/api/types/plugin_responses.go create mode 100644 engine/api/types/plugins/logdriver/entry.pb.go create mode 100644 engine/api/types/plugins/logdriver/entry.proto create mode 100644 engine/api/types/plugins/logdriver/gen.go create mode 100644 engine/api/types/plugins/logdriver/io.go create mode 100644 engine/api/types/port.go create mode 100644 engine/api/types/registry/authenticate.go create mode 100644 engine/api/types/registry/registry.go create mode 100644 engine/api/types/seccomp.go create mode 100644 engine/api/types/service_update_response.go create mode 100644 engine/api/types/stats.go create mode 100644 engine/api/types/strslice/strslice.go create mode 100644 engine/api/types/strslice/strslice_test.go create mode 100644 engine/api/types/swarm/common.go create mode 100644 engine/api/types/swarm/config.go create mode 100644 engine/api/types/swarm/container.go create mode 100644 engine/api/types/swarm/network.go create mode 100644 engine/api/types/swarm/node.go create mode 100644 engine/api/types/swarm/runtime.go create mode 100644 engine/api/types/swarm/runtime/gen.go create mode 100644 engine/api/types/swarm/runtime/plugin.pb.go create mode 100644 engine/api/types/swarm/runtime/plugin.proto create mode 100644 engine/api/types/swarm/secret.go create mode 100644 engine/api/types/swarm/service.go create mode 100644 engine/api/types/swarm/swarm.go create mode 100644 engine/api/types/swarm/task.go create mode 100644 engine/api/types/time/duration_convert.go create mode 100644 engine/api/types/time/duration_convert_test.go create mode 100644 engine/api/types/time/timestamp.go create mode 100644 engine/api/types/time/timestamp_test.go create mode 100644 engine/api/types/types.go create mode 100644 engine/api/types/versions/README.md create mode 100644 engine/api/types/versions/compare.go create mode 100644 engine/api/types/versions/compare_test.go create mode 100644 engine/api/types/versions/v1p19/types.go create mode 100644 engine/api/types/versions/v1p20/types.go create mode 100644 engine/api/types/volume.go create mode 100644 engine/api/types/volume/volume_create.go create mode 100644 engine/api/types/volume/volume_list.go create mode 100644 engine/builder/builder-next/adapters/containerimage/pull.go create mode 100644 engine/builder/builder-next/adapters/snapshot/layer.go create mode 100644 engine/builder/builder-next/adapters/snapshot/snapshot.go create mode 100644 engine/builder/builder-next/builder.go create mode 100644 engine/builder/builder-next/controller.go create mode 100644 engine/builder/builder-next/executor_unix.go create mode 100644 engine/builder/builder-next/executor_windows.go create mode 100644 engine/builder/builder-next/exporter/export.go create mode 100644 engine/builder/builder-next/exporter/writer.go create mode 100644 engine/builder/builder-next/reqbodyhandler.go create mode 100644 engine/builder/builder-next/worker/worker.go create mode 100644 engine/builder/builder.go create mode 100644 engine/builder/dockerfile/buildargs.go create mode 100644 engine/builder/dockerfile/buildargs_test.go create mode 100644 engine/builder/dockerfile/builder.go create mode 100644 engine/builder/dockerfile/builder_unix.go create mode 100644 engine/builder/dockerfile/builder_windows.go create mode 100644 engine/builder/dockerfile/clientsession.go create mode 100644 engine/builder/dockerfile/containerbackend.go create mode 100644 engine/builder/dockerfile/copy.go create mode 100644 engine/builder/dockerfile/copy_test.go create mode 100644 engine/builder/dockerfile/copy_unix.go create mode 100644 engine/builder/dockerfile/copy_windows.go create mode 100644 engine/builder/dockerfile/dispatchers.go create mode 100644 engine/builder/dockerfile/dispatchers_test.go create mode 100644 engine/builder/dockerfile/dispatchers_unix.go create mode 100644 engine/builder/dockerfile/dispatchers_unix_test.go create mode 100644 engine/builder/dockerfile/dispatchers_windows.go create mode 100644 engine/builder/dockerfile/dispatchers_windows_test.go create mode 100644 engine/builder/dockerfile/evaluator.go create mode 100644 engine/builder/dockerfile/evaluator_test.go create mode 100644 engine/builder/dockerfile/imagecontext.go create mode 100644 engine/builder/dockerfile/imageprobe.go create mode 100644 engine/builder/dockerfile/internals.go create mode 100644 engine/builder/dockerfile/internals_linux.go create mode 100644 engine/builder/dockerfile/internals_linux_test.go create mode 100644 engine/builder/dockerfile/internals_test.go create mode 100644 engine/builder/dockerfile/internals_windows.go create mode 100644 engine/builder/dockerfile/internals_windows_test.go create mode 100644 engine/builder/dockerfile/metrics.go create mode 100644 engine/builder/dockerfile/mockbackend_test.go create mode 100644 engine/builder/dockerfile/utils_test.go create mode 100644 engine/builder/dockerignore/dockerignore.go create mode 100644 engine/builder/dockerignore/dockerignore_test.go create mode 100644 engine/builder/fscache/fscache.go create mode 100644 engine/builder/fscache/fscache_test.go create mode 100644 engine/builder/fscache/naivedriver.go create mode 100644 engine/builder/remotecontext/archive.go create mode 100644 engine/builder/remotecontext/detect.go create mode 100644 engine/builder/remotecontext/detect_test.go create mode 100644 engine/builder/remotecontext/filehash.go create mode 100644 engine/builder/remotecontext/generate.go create mode 100644 engine/builder/remotecontext/git.go create mode 100644 engine/builder/remotecontext/git/gitutils.go create mode 100644 engine/builder/remotecontext/git/gitutils_test.go create mode 100644 engine/builder/remotecontext/lazycontext.go create mode 100644 engine/builder/remotecontext/mimetype.go create mode 100644 engine/builder/remotecontext/mimetype_test.go create mode 100644 engine/builder/remotecontext/remote.go create mode 100644 engine/builder/remotecontext/remote_test.go create mode 100644 engine/builder/remotecontext/tarsum.go create mode 100644 engine/builder/remotecontext/tarsum.pb.go create mode 100644 engine/builder/remotecontext/tarsum.proto create mode 100644 engine/builder/remotecontext/tarsum_test.go create mode 100644 engine/builder/remotecontext/utils_test.go create mode 100644 engine/cli/cobra.go create mode 100644 engine/cli/config/configdir.go create mode 100644 engine/cli/debug/debug.go create mode 100644 engine/cli/debug/debug_test.go create mode 100644 engine/cli/error.go create mode 100644 engine/cli/required.go create mode 100644 engine/client/README.md create mode 100644 engine/client/build_cancel.go create mode 100644 engine/client/build_prune.go create mode 100644 engine/client/checkpoint_create.go create mode 100644 engine/client/checkpoint_create_test.go create mode 100644 engine/client/checkpoint_delete.go create mode 100644 engine/client/checkpoint_delete_test.go create mode 100644 engine/client/checkpoint_list.go create mode 100644 engine/client/checkpoint_list_test.go create mode 100644 engine/client/client.go create mode 100644 engine/client/client_mock_test.go create mode 100644 engine/client/client_test.go create mode 100644 engine/client/client_unix.go create mode 100644 engine/client/client_windows.go create mode 100644 engine/client/config_create.go create mode 100644 engine/client/config_create_test.go create mode 100644 engine/client/config_inspect.go create mode 100644 engine/client/config_inspect_test.go create mode 100644 engine/client/config_list.go create mode 100644 engine/client/config_list_test.go create mode 100644 engine/client/config_remove.go create mode 100644 engine/client/config_remove_test.go create mode 100644 engine/client/config_update.go create mode 100644 engine/client/config_update_test.go create mode 100644 engine/client/container_attach.go create mode 100644 engine/client/container_commit.go create mode 100644 engine/client/container_commit_test.go create mode 100644 engine/client/container_copy.go create mode 100644 engine/client/container_copy_test.go create mode 100644 engine/client/container_create.go create mode 100644 engine/client/container_create_test.go create mode 100644 engine/client/container_diff.go create mode 100644 engine/client/container_diff_test.go create mode 100644 engine/client/container_exec.go create mode 100644 engine/client/container_exec_test.go create mode 100644 engine/client/container_export.go create mode 100644 engine/client/container_export_test.go create mode 100644 engine/client/container_inspect.go create mode 100644 engine/client/container_inspect_test.go create mode 100644 engine/client/container_kill.go create mode 100644 engine/client/container_kill_test.go create mode 100644 engine/client/container_list.go create mode 100644 engine/client/container_list_test.go create mode 100644 engine/client/container_logs.go create mode 100644 engine/client/container_logs_test.go create mode 100644 engine/client/container_pause.go create mode 100644 engine/client/container_pause_test.go create mode 100644 engine/client/container_prune.go create mode 100644 engine/client/container_prune_test.go create mode 100644 engine/client/container_remove.go create mode 100644 engine/client/container_remove_test.go create mode 100644 engine/client/container_rename.go create mode 100644 engine/client/container_rename_test.go create mode 100644 engine/client/container_resize.go create mode 100644 engine/client/container_resize_test.go create mode 100644 engine/client/container_restart.go create mode 100644 engine/client/container_restart_test.go create mode 100644 engine/client/container_start.go create mode 100644 engine/client/container_start_test.go create mode 100644 engine/client/container_stats.go create mode 100644 engine/client/container_stats_test.go create mode 100644 engine/client/container_stop.go create mode 100644 engine/client/container_stop_test.go create mode 100644 engine/client/container_top.go create mode 100644 engine/client/container_top_test.go create mode 100644 engine/client/container_unpause.go create mode 100644 engine/client/container_unpause_test.go create mode 100644 engine/client/container_update.go create mode 100644 engine/client/container_update_test.go create mode 100644 engine/client/container_wait.go create mode 100644 engine/client/container_wait_test.go create mode 100644 engine/client/disk_usage.go create mode 100644 engine/client/disk_usage_test.go create mode 100644 engine/client/distribution_inspect.go create mode 100644 engine/client/distribution_inspect_test.go create mode 100644 engine/client/errors.go create mode 100644 engine/client/events.go create mode 100644 engine/client/events_test.go create mode 100644 engine/client/hijack.go create mode 100644 engine/client/hijack_test.go create mode 100644 engine/client/image_build.go create mode 100644 engine/client/image_build_test.go create mode 100644 engine/client/image_create.go create mode 100644 engine/client/image_create_test.go create mode 100644 engine/client/image_history.go create mode 100644 engine/client/image_history_test.go create mode 100644 engine/client/image_import.go create mode 100644 engine/client/image_import_test.go create mode 100644 engine/client/image_inspect.go create mode 100644 engine/client/image_inspect_test.go create mode 100644 engine/client/image_list.go create mode 100644 engine/client/image_list_test.go create mode 100644 engine/client/image_load.go create mode 100644 engine/client/image_load_test.go create mode 100644 engine/client/image_prune.go create mode 100644 engine/client/image_prune_test.go create mode 100644 engine/client/image_pull.go create mode 100644 engine/client/image_pull_test.go create mode 100644 engine/client/image_push.go create mode 100644 engine/client/image_push_test.go create mode 100644 engine/client/image_remove.go create mode 100644 engine/client/image_remove_test.go create mode 100644 engine/client/image_save.go create mode 100644 engine/client/image_save_test.go create mode 100644 engine/client/image_search.go create mode 100644 engine/client/image_search_test.go create mode 100644 engine/client/image_tag.go create mode 100644 engine/client/image_tag_test.go create mode 100644 engine/client/info.go create mode 100644 engine/client/info_test.go create mode 100644 engine/client/interface.go create mode 100644 engine/client/interface_experimental.go create mode 100644 engine/client/interface_stable.go create mode 100644 engine/client/login.go create mode 100644 engine/client/network_connect.go create mode 100644 engine/client/network_connect_test.go create mode 100644 engine/client/network_create.go create mode 100644 engine/client/network_create_test.go create mode 100644 engine/client/network_disconnect.go create mode 100644 engine/client/network_disconnect_test.go create mode 100644 engine/client/network_inspect.go create mode 100644 engine/client/network_inspect_test.go create mode 100644 engine/client/network_list.go create mode 100644 engine/client/network_list_test.go create mode 100644 engine/client/network_prune.go create mode 100644 engine/client/network_prune_test.go create mode 100644 engine/client/network_remove.go create mode 100644 engine/client/network_remove_test.go create mode 100644 engine/client/node_inspect.go create mode 100644 engine/client/node_inspect_test.go create mode 100644 engine/client/node_list.go create mode 100644 engine/client/node_list_test.go create mode 100644 engine/client/node_remove.go create mode 100644 engine/client/node_remove_test.go create mode 100644 engine/client/node_update.go create mode 100644 engine/client/node_update_test.go create mode 100644 engine/client/ping.go create mode 100644 engine/client/ping_test.go create mode 100644 engine/client/plugin_create.go create mode 100644 engine/client/plugin_disable.go create mode 100644 engine/client/plugin_disable_test.go create mode 100644 engine/client/plugin_enable.go create mode 100644 engine/client/plugin_enable_test.go create mode 100644 engine/client/plugin_inspect.go create mode 100644 engine/client/plugin_inspect_test.go create mode 100644 engine/client/plugin_install.go create mode 100644 engine/client/plugin_list.go create mode 100644 engine/client/plugin_list_test.go create mode 100644 engine/client/plugin_push.go create mode 100644 engine/client/plugin_push_test.go create mode 100644 engine/client/plugin_remove.go create mode 100644 engine/client/plugin_remove_test.go create mode 100644 engine/client/plugin_set.go create mode 100644 engine/client/plugin_set_test.go create mode 100644 engine/client/plugin_upgrade.go create mode 100644 engine/client/request.go create mode 100644 engine/client/request_test.go create mode 100644 engine/client/secret_create.go create mode 100644 engine/client/secret_create_test.go create mode 100644 engine/client/secret_inspect.go create mode 100644 engine/client/secret_inspect_test.go create mode 100644 engine/client/secret_list.go create mode 100644 engine/client/secret_list_test.go create mode 100644 engine/client/secret_remove.go create mode 100644 engine/client/secret_remove_test.go create mode 100644 engine/client/secret_update.go create mode 100644 engine/client/secret_update_test.go create mode 100644 engine/client/service_create.go create mode 100644 engine/client/service_create_test.go create mode 100644 engine/client/service_inspect.go create mode 100644 engine/client/service_inspect_test.go create mode 100644 engine/client/service_list.go create mode 100644 engine/client/service_list_test.go create mode 100644 engine/client/service_logs.go create mode 100644 engine/client/service_logs_test.go create mode 100644 engine/client/service_remove.go create mode 100644 engine/client/service_remove_test.go create mode 100644 engine/client/service_update.go create mode 100644 engine/client/service_update_test.go create mode 100644 engine/client/session.go create mode 100644 engine/client/swarm_get_unlock_key.go create mode 100644 engine/client/swarm_get_unlock_key_test.go create mode 100644 engine/client/swarm_init.go create mode 100644 engine/client/swarm_init_test.go create mode 100644 engine/client/swarm_inspect.go create mode 100644 engine/client/swarm_inspect_test.go create mode 100644 engine/client/swarm_join.go create mode 100644 engine/client/swarm_join_test.go create mode 100644 engine/client/swarm_leave.go create mode 100644 engine/client/swarm_leave_test.go create mode 100644 engine/client/swarm_unlock.go create mode 100644 engine/client/swarm_unlock_test.go create mode 100644 engine/client/swarm_update.go create mode 100644 engine/client/swarm_update_test.go create mode 100644 engine/client/task_inspect.go create mode 100644 engine/client/task_inspect_test.go create mode 100644 engine/client/task_list.go create mode 100644 engine/client/task_list_test.go create mode 100644 engine/client/task_logs.go create mode 100644 engine/client/testdata/ca.pem create mode 100644 engine/client/testdata/cert.pem create mode 100644 engine/client/testdata/key.pem create mode 100644 engine/client/transport.go create mode 100644 engine/client/utils.go create mode 100644 engine/client/version.go create mode 100644 engine/client/volume_create.go create mode 100644 engine/client/volume_create_test.go create mode 100644 engine/client/volume_inspect.go create mode 100644 engine/client/volume_inspect_test.go create mode 100644 engine/client/volume_list.go create mode 100644 engine/client/volume_list_test.go create mode 100644 engine/client/volume_prune.go create mode 100644 engine/client/volume_remove.go create mode 100644 engine/client/volume_remove_test.go create mode 100644 engine/cmd/dockerd/README.md create mode 100644 engine/cmd/dockerd/config.go create mode 100644 engine/cmd/dockerd/config_common_unix.go create mode 100644 engine/cmd/dockerd/config_unix.go create mode 100644 engine/cmd/dockerd/config_unix_test.go create mode 100644 engine/cmd/dockerd/config_windows.go create mode 100644 engine/cmd/dockerd/daemon.go create mode 100644 engine/cmd/dockerd/daemon_freebsd.go create mode 100644 engine/cmd/dockerd/daemon_linux.go create mode 100644 engine/cmd/dockerd/daemon_test.go create mode 100644 engine/cmd/dockerd/daemon_unix.go create mode 100644 engine/cmd/dockerd/daemon_unix_test.go create mode 100644 engine/cmd/dockerd/daemon_windows.go create mode 100644 engine/cmd/dockerd/docker.go create mode 100644 engine/cmd/dockerd/docker_unix.go create mode 100644 engine/cmd/dockerd/docker_windows.go create mode 100644 engine/cmd/dockerd/hack/malformed_host_override.go create mode 100644 engine/cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 engine/cmd/dockerd/metrics.go create mode 100644 engine/cmd/dockerd/options.go create mode 100644 engine/cmd/dockerd/options_test.go create mode 100644 engine/cmd/dockerd/service_unsupported.go create mode 100644 engine/cmd/dockerd/service_windows.go create mode 100644 engine/codecov.yml create mode 100644 engine/container/archive.go create mode 100644 engine/container/container.go create mode 100644 engine/container/container_unit_test.go create mode 100644 engine/container/container_unix.go create mode 100644 engine/container/container_windows.go create mode 100644 engine/container/env.go create mode 100644 engine/container/env_test.go create mode 100644 engine/container/health.go create mode 100644 engine/container/history.go create mode 100644 engine/container/memory_store.go create mode 100644 engine/container/memory_store_test.go create mode 100644 engine/container/monitor.go create mode 100644 engine/container/mounts_unix.go create mode 100644 engine/container/mounts_windows.go create mode 100644 engine/container/state.go create mode 100644 engine/container/state_test.go create mode 100644 engine/container/store.go create mode 100644 engine/container/stream/attach.go create mode 100644 engine/container/stream/streams.go create mode 100644 engine/container/view.go create mode 100644 engine/container/view_test.go create mode 100644 engine/contrib/README.md create mode 100644 engine/contrib/REVIEWERS create mode 100644 engine/contrib/apparmor/main.go create mode 100644 engine/contrib/apparmor/template.go create mode 100755 engine/contrib/check-config.sh create mode 100644 engine/contrib/desktop-integration/README.md create mode 100644 engine/contrib/desktop-integration/chromium/Dockerfile create mode 100644 engine/contrib/desktop-integration/gparted/Dockerfile create mode 100644 engine/contrib/docker-device-tool/README.md create mode 100644 engine/contrib/docker-device-tool/device_tool.go create mode 100644 engine/contrib/docker-device-tool/device_tool_windows.go create mode 100755 engine/contrib/docker-machine-install-bundle.sh create mode 100755 engine/contrib/dockerize-disk.sh create mode 100755 engine/contrib/download-frozen-image-v1.sh create mode 100755 engine/contrib/download-frozen-image-v2.sh create mode 100644 engine/contrib/editorconfig create mode 100644 engine/contrib/gitdm/aliases create mode 100644 engine/contrib/gitdm/domain-map create mode 100755 engine/contrib/gitdm/generate_aliases.sh create mode 100644 engine/contrib/gitdm/gitdm.config create mode 100644 engine/contrib/httpserver/Dockerfile create mode 100644 engine/contrib/httpserver/server.go create mode 100644 engine/contrib/init/openrc/docker.confd create mode 100644 engine/contrib/init/openrc/docker.initd create mode 100644 engine/contrib/init/systemd/REVIEWERS create mode 100644 engine/contrib/init/systemd/docker.service create mode 100644 engine/contrib/init/systemd/docker.service.rpm create mode 100644 engine/contrib/init/systemd/docker.socket create mode 100755 engine/contrib/init/sysvinit-debian/docker create mode 100644 engine/contrib/init/sysvinit-debian/docker.default create mode 100755 engine/contrib/init/sysvinit-redhat/docker create mode 100644 engine/contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 engine/contrib/init/upstart/REVIEWERS create mode 100644 engine/contrib/init/upstart/docker.conf create mode 100755 engine/contrib/mac-install-bundle.sh create mode 100755 engine/contrib/mkimage-alpine.sh create mode 100644 engine/contrib/mkimage-arch-pacman.conf create mode 100755 engine/contrib/mkimage-arch.sh create mode 100644 engine/contrib/mkimage-archarm-pacman.conf create mode 100755 engine/contrib/mkimage-crux.sh create mode 100755 engine/contrib/mkimage-pld.sh create mode 100755 engine/contrib/mkimage-yum.sh create mode 100755 engine/contrib/mkimage.sh create mode 100755 engine/contrib/mkimage/.febootstrap-minimize create mode 100755 engine/contrib/mkimage/busybox-static create mode 100755 engine/contrib/mkimage/debootstrap create mode 100755 engine/contrib/mkimage/mageia-urpmi create mode 100755 engine/contrib/mkimage/rinse create mode 100644 engine/contrib/nnp-test/Dockerfile create mode 100644 engine/contrib/nnp-test/nnp-test.c create mode 100755 engine/contrib/nuke-graph-directory.sh create mode 100755 engine/contrib/report-issue.sh create mode 100644 engine/contrib/syntax/nano/Dockerfile.nanorc create mode 100644 engine/contrib/syntax/nano/README.md create mode 100644 engine/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 engine/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 engine/contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 engine/contrib/syntax/textmate/README.md create mode 100644 engine/contrib/syntax/textmate/REVIEWERS create mode 100644 engine/contrib/syntax/vim/LICENSE create mode 100644 engine/contrib/syntax/vim/README.md create mode 100644 engine/contrib/syntax/vim/doc/dockerfile.txt create mode 100644 engine/contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 engine/contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 engine/contrib/syscall-test/Dockerfile create mode 100644 engine/contrib/syscall-test/acct.c create mode 100644 engine/contrib/syscall-test/exit32.s create mode 100644 engine/contrib/syscall-test/ns.c create mode 100644 engine/contrib/syscall-test/raw.c create mode 100644 engine/contrib/syscall-test/setgid.c create mode 100644 engine/contrib/syscall-test/setuid.c create mode 100644 engine/contrib/syscall-test/socket.c create mode 100644 engine/contrib/syscall-test/userns.c create mode 100644 engine/contrib/udev/80-docker.rules create mode 100644 engine/contrib/vagrant-docker/README.md create mode 100644 engine/daemon/apparmor_default.go create mode 100644 engine/daemon/apparmor_default_unsupported.go create mode 100644 engine/daemon/archive.go create mode 100644 engine/daemon/archive_tarcopyoptions.go create mode 100644 engine/daemon/archive_tarcopyoptions_unix.go create mode 100644 engine/daemon/archive_tarcopyoptions_windows.go create mode 100644 engine/daemon/archive_unix.go create mode 100644 engine/daemon/archive_windows.go create mode 100644 engine/daemon/attach.go create mode 100644 engine/daemon/auth.go create mode 100644 engine/daemon/bindmount_unix.go create mode 100644 engine/daemon/caps/utils.go create mode 100644 engine/daemon/changes.go create mode 100644 engine/daemon/checkpoint.go create mode 100644 engine/daemon/cluster.go create mode 100644 engine/daemon/cluster/cluster.go create mode 100644 engine/daemon/cluster/configs.go create mode 100644 engine/daemon/cluster/controllers/plugin/controller.go create mode 100644 engine/daemon/cluster/controllers/plugin/controller_test.go create mode 100644 engine/daemon/cluster/convert/config.go create mode 100644 engine/daemon/cluster/convert/container.go create mode 100644 engine/daemon/cluster/convert/network.go create mode 100644 engine/daemon/cluster/convert/network_test.go create mode 100644 engine/daemon/cluster/convert/node.go create mode 100644 engine/daemon/cluster/convert/secret.go create mode 100644 engine/daemon/cluster/convert/service.go create mode 100644 engine/daemon/cluster/convert/service_test.go create mode 100644 engine/daemon/cluster/convert/swarm.go create mode 100644 engine/daemon/cluster/convert/task.go create mode 100644 engine/daemon/cluster/errors.go create mode 100644 engine/daemon/cluster/executor/backend.go create mode 100644 engine/daemon/cluster/executor/container/adapter.go create mode 100644 engine/daemon/cluster/executor/container/attachment.go create mode 100644 engine/daemon/cluster/executor/container/container.go create mode 100644 engine/daemon/cluster/executor/container/container_test.go create mode 100644 engine/daemon/cluster/executor/container/controller.go create mode 100644 engine/daemon/cluster/executor/container/errors.go create mode 100644 engine/daemon/cluster/executor/container/executor.go create mode 100644 engine/daemon/cluster/executor/container/health_test.go create mode 100644 engine/daemon/cluster/executor/container/validate.go create mode 100644 engine/daemon/cluster/executor/container/validate_test.go create mode 100644 engine/daemon/cluster/executor/container/validate_unix_test.go create mode 100644 engine/daemon/cluster/executor/container/validate_windows_test.go create mode 100644 engine/daemon/cluster/filters.go create mode 100644 engine/daemon/cluster/filters_test.go create mode 100644 engine/daemon/cluster/helpers.go create mode 100644 engine/daemon/cluster/listen_addr.go create mode 100644 engine/daemon/cluster/listen_addr_linux.go create mode 100644 engine/daemon/cluster/listen_addr_others.go create mode 100644 engine/daemon/cluster/networks.go create mode 100644 engine/daemon/cluster/noderunner.go create mode 100644 engine/daemon/cluster/nodes.go create mode 100644 engine/daemon/cluster/provider/network.go create mode 100644 engine/daemon/cluster/secrets.go create mode 100644 engine/daemon/cluster/services.go create mode 100644 engine/daemon/cluster/swarm.go create mode 100644 engine/daemon/cluster/tasks.go create mode 100644 engine/daemon/cluster/utils.go create mode 100644 engine/daemon/commit.go create mode 100644 engine/daemon/config/config.go create mode 100644 engine/daemon/config/config_common_unix.go create mode 100644 engine/daemon/config/config_common_unix_test.go create mode 100644 engine/daemon/config/config_test.go create mode 100644 engine/daemon/config/config_unix.go create mode 100644 engine/daemon/config/config_unix_test.go create mode 100644 engine/daemon/config/config_windows.go create mode 100644 engine/daemon/config/config_windows_test.go create mode 100644 engine/daemon/config/opts.go create mode 100644 engine/daemon/configs.go create mode 100644 engine/daemon/configs_linux.go create mode 100644 engine/daemon/configs_unsupported.go create mode 100644 engine/daemon/configs_windows.go create mode 100644 engine/daemon/container.go create mode 100644 engine/daemon/container_linux.go create mode 100644 engine/daemon/container_operations.go create mode 100644 engine/daemon/container_operations_unix.go create mode 100644 engine/daemon/container_operations_windows.go create mode 100644 engine/daemon/container_unix_test.go create mode 100644 engine/daemon/container_windows.go create mode 100644 engine/daemon/create.go create mode 100644 engine/daemon/create_test.go create mode 100644 engine/daemon/create_unix.go create mode 100644 engine/daemon/create_windows.go create mode 100644 engine/daemon/daemon.go create mode 100644 engine/daemon/daemon_linux.go create mode 100644 engine/daemon/daemon_linux_test.go create mode 100644 engine/daemon/daemon_test.go create mode 100644 engine/daemon/daemon_unix.go create mode 100644 engine/daemon/daemon_unix_test.go create mode 100644 engine/daemon/daemon_unsupported.go create mode 100644 engine/daemon/daemon_windows.go create mode 100644 engine/daemon/daemon_windows_test.go create mode 100644 engine/daemon/debugtrap_unix.go create mode 100644 engine/daemon/debugtrap_unsupported.go create mode 100644 engine/daemon/debugtrap_windows.go create mode 100644 engine/daemon/delete.go create mode 100644 engine/daemon/delete_test.go create mode 100644 engine/daemon/dependency.go create mode 100644 engine/daemon/discovery/discovery.go create mode 100644 engine/daemon/discovery/discovery_test.go create mode 100644 engine/daemon/disk_usage.go create mode 100644 engine/daemon/errors.go create mode 100644 engine/daemon/events.go create mode 100644 engine/daemon/events/events.go create mode 100644 engine/daemon/events/events_test.go create mode 100644 engine/daemon/events/filter.go create mode 100644 engine/daemon/events/metrics.go create mode 100644 engine/daemon/events/testutils/testutils.go create mode 100644 engine/daemon/events_test.go create mode 100644 engine/daemon/exec.go create mode 100644 engine/daemon/exec/exec.go create mode 100644 engine/daemon/exec_linux.go create mode 100644 engine/daemon/exec_linux_test.go create mode 100644 engine/daemon/exec_windows.go create mode 100644 engine/daemon/export.go create mode 100644 engine/daemon/graphdriver/aufs/aufs.go create mode 100644 engine/daemon/graphdriver/aufs/aufs_test.go create mode 100644 engine/daemon/graphdriver/aufs/dirs.go create mode 100644 engine/daemon/graphdriver/aufs/mount.go create mode 100644 engine/daemon/graphdriver/aufs/mount_linux.go create mode 100644 engine/daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 engine/daemon/graphdriver/btrfs/btrfs.go create mode 100644 engine/daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 engine/daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 engine/daemon/graphdriver/btrfs/version.go create mode 100644 engine/daemon/graphdriver/btrfs/version_none.go create mode 100644 engine/daemon/graphdriver/btrfs/version_test.go create mode 100644 engine/daemon/graphdriver/copy/copy.go create mode 100644 engine/daemon/graphdriver/copy/copy_test.go create mode 100644 engine/daemon/graphdriver/counter.go create mode 100644 engine/daemon/graphdriver/devmapper/README.md create mode 100644 engine/daemon/graphdriver/devmapper/device_setup.go create mode 100644 engine/daemon/graphdriver/devmapper/deviceset.go create mode 100644 engine/daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 engine/daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 engine/daemon/graphdriver/devmapper/driver.go create mode 100644 engine/daemon/graphdriver/devmapper/mount.go create mode 100644 engine/daemon/graphdriver/driver.go create mode 100644 engine/daemon/graphdriver/driver_freebsd.go create mode 100644 engine/daemon/graphdriver/driver_linux.go create mode 100644 engine/daemon/graphdriver/driver_test.go create mode 100644 engine/daemon/graphdriver/driver_unsupported.go create mode 100644 engine/daemon/graphdriver/driver_windows.go create mode 100644 engine/daemon/graphdriver/errors.go create mode 100644 engine/daemon/graphdriver/fsdiff.go create mode 100644 engine/daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 engine/daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 engine/daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 engine/daemon/graphdriver/graphtest/testutil.go create mode 100644 engine/daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 engine/daemon/graphdriver/lcow/lcow.go create mode 100644 engine/daemon/graphdriver/lcow/lcow_svm.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs_file.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs_filedriver.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs_pathdriver.go create mode 100644 engine/daemon/graphdriver/overlay/overlay.go create mode 100644 engine/daemon/graphdriver/overlay/overlay_test.go create mode 100644 engine/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 engine/daemon/graphdriver/overlay2/check.go create mode 100644 engine/daemon/graphdriver/overlay2/mount.go create mode 100644 engine/daemon/graphdriver/overlay2/overlay.go create mode 100644 engine/daemon/graphdriver/overlay2/overlay_test.go create mode 100644 engine/daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 engine/daemon/graphdriver/overlay2/randomid.go create mode 100644 engine/daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 engine/daemon/graphdriver/plugin.go create mode 100644 engine/daemon/graphdriver/proxy.go create mode 100644 engine/daemon/graphdriver/quota/errors.go create mode 100644 engine/daemon/graphdriver/quota/projectquota.go create mode 100644 engine/daemon/graphdriver/quota/projectquota_test.go create mode 100644 engine/daemon/graphdriver/register/register_aufs.go create mode 100644 engine/daemon/graphdriver/register/register_btrfs.go create mode 100644 engine/daemon/graphdriver/register/register_devicemapper.go create mode 100644 engine/daemon/graphdriver/register/register_overlay.go create mode 100644 engine/daemon/graphdriver/register/register_overlay2.go create mode 100644 engine/daemon/graphdriver/register/register_vfs.go create mode 100644 engine/daemon/graphdriver/register/register_windows.go create mode 100644 engine/daemon/graphdriver/register/register_zfs.go create mode 100644 engine/daemon/graphdriver/vfs/copy_linux.go create mode 100644 engine/daemon/graphdriver/vfs/copy_unsupported.go create mode 100644 engine/daemon/graphdriver/vfs/driver.go create mode 100644 engine/daemon/graphdriver/vfs/quota_linux.go create mode 100644 engine/daemon/graphdriver/vfs/quota_unsupported.go create mode 100644 engine/daemon/graphdriver/vfs/vfs_test.go create mode 100644 engine/daemon/graphdriver/windows/windows.go create mode 100644 engine/daemon/graphdriver/zfs/MAINTAINERS create mode 100644 engine/daemon/graphdriver/zfs/zfs.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_linux.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_test.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 engine/daemon/health.go create mode 100644 engine/daemon/health_test.go create mode 100644 engine/daemon/images/cache.go create mode 100644 engine/daemon/images/image.go create mode 100644 engine/daemon/images/image_builder.go create mode 100644 engine/daemon/images/image_commit.go create mode 100644 engine/daemon/images/image_delete.go create mode 100644 engine/daemon/images/image_events.go create mode 100644 engine/daemon/images/image_exporter.go create mode 100644 engine/daemon/images/image_history.go create mode 100644 engine/daemon/images/image_import.go create mode 100644 engine/daemon/images/image_inspect.go create mode 100644 engine/daemon/images/image_prune.go create mode 100644 engine/daemon/images/image_pull.go create mode 100644 engine/daemon/images/image_push.go create mode 100644 engine/daemon/images/image_search.go create mode 100644 engine/daemon/images/image_search_test.go create mode 100644 engine/daemon/images/image_tag.go create mode 100644 engine/daemon/images/image_unix.go create mode 100644 engine/daemon/images/image_windows.go create mode 100644 engine/daemon/images/images.go create mode 100644 engine/daemon/images/locals.go create mode 100644 engine/daemon/images/service.go create mode 100644 engine/daemon/info.go create mode 100644 engine/daemon/info_unix.go create mode 100644 engine/daemon/info_unix_test.go create mode 100644 engine/daemon/info_windows.go create mode 100644 engine/daemon/initlayer/setup_unix.go create mode 100644 engine/daemon/initlayer/setup_windows.go create mode 100644 engine/daemon/inspect.go create mode 100644 engine/daemon/inspect_linux.go create mode 100644 engine/daemon/inspect_test.go create mode 100644 engine/daemon/inspect_windows.go create mode 100644 engine/daemon/keys.go create mode 100644 engine/daemon/keys_unsupported.go create mode 100644 engine/daemon/kill.go create mode 100644 engine/daemon/links.go create mode 100644 engine/daemon/links/links.go create mode 100644 engine/daemon/links/links_test.go create mode 100644 engine/daemon/list.go create mode 100644 engine/daemon/list_test.go create mode 100644 engine/daemon/list_unix.go create mode 100644 engine/daemon/list_windows.go create mode 100644 engine/daemon/listeners/group_unix.go create mode 100644 engine/daemon/listeners/listeners_linux.go create mode 100644 engine/daemon/listeners/listeners_windows.go create mode 100644 engine/daemon/logdrivers_linux.go create mode 100644 engine/daemon/logdrivers_windows.go create mode 100644 engine/daemon/logger/adapter.go create mode 100644 engine/daemon/logger/adapter_test.go create mode 100644 engine/daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 engine/daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 engine/daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 engine/daemon/logger/copier.go create mode 100644 engine/daemon/logger/copier_test.go create mode 100644 engine/daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 engine/daemon/logger/factory.go create mode 100644 engine/daemon/logger/fluentd/fluentd.go create mode 100644 engine/daemon/logger/gcplogs/gcplogging.go create mode 100644 engine/daemon/logger/gcplogs/gcplogging_linux.go create mode 100644 engine/daemon/logger/gcplogs/gcplogging_others.go create mode 100644 engine/daemon/logger/gelf/gelf.go create mode 100644 engine/daemon/logger/gelf/gelf_test.go create mode 100644 engine/daemon/logger/journald/journald.go create mode 100644 engine/daemon/logger/journald/journald_test.go create mode 100644 engine/daemon/logger/journald/journald_unsupported.go create mode 100644 engine/daemon/logger/journald/read.go create mode 100644 engine/daemon/logger/journald/read_native.go create mode 100644 engine/daemon/logger/journald/read_native_compat.go create mode 100644 engine/daemon/logger/journald/read_unsupported.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/jsonlog.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go create mode 100644 engine/daemon/logger/jsonfilelog/read.go create mode 100644 engine/daemon/logger/jsonfilelog/read_test.go create mode 100644 engine/daemon/logger/logentries/logentries.go create mode 100644 engine/daemon/logger/logger.go create mode 100644 engine/daemon/logger/logger_test.go create mode 100644 engine/daemon/logger/loggerutils/log_tag.go create mode 100644 engine/daemon/logger/loggerutils/log_tag_test.go create mode 100644 engine/daemon/logger/loggerutils/logfile.go create mode 100644 engine/daemon/logger/loggerutils/multireader/multireader.go create mode 100644 engine/daemon/logger/loggerutils/multireader/multireader_test.go create mode 100644 engine/daemon/logger/loginfo.go create mode 100644 engine/daemon/logger/metrics.go create mode 100644 engine/daemon/logger/plugin.go create mode 100644 engine/daemon/logger/plugin_unix.go create mode 100644 engine/daemon/logger/plugin_unsupported.go create mode 100644 engine/daemon/logger/proxy.go create mode 100644 engine/daemon/logger/ring.go create mode 100644 engine/daemon/logger/ring_test.go create mode 100644 engine/daemon/logger/splunk/splunk.go create mode 100644 engine/daemon/logger/splunk/splunk_test.go create mode 100644 engine/daemon/logger/splunk/splunkhecmock_test.go create mode 100644 engine/daemon/logger/syslog/syslog.go create mode 100644 engine/daemon/logger/syslog/syslog_test.go create mode 100644 engine/daemon/logger/templates/templates.go create mode 100644 engine/daemon/logger/templates/templates_test.go create mode 100644 engine/daemon/logs.go create mode 100644 engine/daemon/logs_test.go create mode 100644 engine/daemon/metrics.go create mode 100644 engine/daemon/metrics_unix.go create mode 100644 engine/daemon/metrics_unsupported.go create mode 100644 engine/daemon/monitor.go create mode 100644 engine/daemon/mounts.go create mode 100644 engine/daemon/names.go create mode 100644 engine/daemon/names/names.go create mode 100644 engine/daemon/network.go create mode 100644 engine/daemon/network/settings.go create mode 100644 engine/daemon/oci.go create mode 100644 engine/daemon/oci_linux.go create mode 100644 engine/daemon/oci_linux_test.go create mode 100644 engine/daemon/oci_windows.go create mode 100644 engine/daemon/pause.go create mode 100644 engine/daemon/prune.go create mode 100644 engine/daemon/reload.go create mode 100644 engine/daemon/reload_test.go create mode 100644 engine/daemon/reload_unix.go create mode 100644 engine/daemon/reload_windows.go create mode 100644 engine/daemon/rename.go create mode 100644 engine/daemon/resize.go create mode 100644 engine/daemon/resize_test.go create mode 100644 engine/daemon/restart.go create mode 100644 engine/daemon/seccomp_disabled.go create mode 100644 engine/daemon/seccomp_linux.go create mode 100644 engine/daemon/seccomp_unsupported.go create mode 100644 engine/daemon/secrets.go create mode 100644 engine/daemon/secrets_linux.go create mode 100644 engine/daemon/secrets_unsupported.go create mode 100644 engine/daemon/secrets_windows.go create mode 100644 engine/daemon/selinux_linux.go create mode 100644 engine/daemon/selinux_unsupported.go create mode 100644 engine/daemon/start.go create mode 100644 engine/daemon/start_unix.go create mode 100644 engine/daemon/start_windows.go create mode 100644 engine/daemon/stats.go create mode 100644 engine/daemon/stats/collector.go create mode 100644 engine/daemon/stats/collector_unix.go create mode 100644 engine/daemon/stats/collector_windows.go create mode 100644 engine/daemon/stats_collector.go create mode 100644 engine/daemon/stats_unix.go create mode 100644 engine/daemon/stats_windows.go create mode 100644 engine/daemon/stop.go create mode 100644 engine/daemon/testdata/keyfile create mode 100644 engine/daemon/top_unix.go create mode 100644 engine/daemon/top_unix_test.go create mode 100644 engine/daemon/top_windows.go create mode 100644 engine/daemon/trustkey.go create mode 100644 engine/daemon/trustkey_test.go create mode 100644 engine/daemon/unpause.go create mode 100644 engine/daemon/update.go create mode 100644 engine/daemon/update_linux.go create mode 100644 engine/daemon/update_windows.go create mode 100644 engine/daemon/util_test.go create mode 100644 engine/daemon/volumes.go create mode 100644 engine/daemon/volumes_linux.go create mode 100644 engine/daemon/volumes_linux_test.go create mode 100644 engine/daemon/volumes_unit_test.go create mode 100644 engine/daemon/volumes_unix.go create mode 100644 engine/daemon/volumes_unix_test.go create mode 100644 engine/daemon/volumes_windows.go create mode 100644 engine/daemon/wait.go create mode 100644 engine/daemon/workdir.go create mode 100644 engine/distribution/config.go create mode 100644 engine/distribution/errors.go create mode 100644 engine/distribution/errors_test.go create mode 100644 engine/distribution/fixtures/validate_manifest/bad_manifest create mode 100644 engine/distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 engine/distribution/fixtures/validate_manifest/good_manifest create mode 100644 engine/distribution/metadata/metadata.go create mode 100644 engine/distribution/metadata/v1_id_service.go create mode 100644 engine/distribution/metadata/v1_id_service_test.go create mode 100644 engine/distribution/metadata/v2_metadata_service.go create mode 100644 engine/distribution/metadata/v2_metadata_service_test.go create mode 100644 engine/distribution/oci.go create mode 100644 engine/distribution/pull.go create mode 100644 engine/distribution/pull_v1.go create mode 100644 engine/distribution/pull_v2.go create mode 100644 engine/distribution/pull_v2_test.go create mode 100644 engine/distribution/pull_v2_unix.go create mode 100644 engine/distribution/pull_v2_windows.go create mode 100644 engine/distribution/push.go create mode 100644 engine/distribution/push_v1.go create mode 100644 engine/distribution/push_v2.go create mode 100644 engine/distribution/push_v2_test.go create mode 100644 engine/distribution/registry.go create mode 100644 engine/distribution/registry_unit_test.go create mode 100644 engine/distribution/utils/progress.go create mode 100644 engine/distribution/xfer/download.go create mode 100644 engine/distribution/xfer/download_test.go create mode 100644 engine/distribution/xfer/transfer.go create mode 100644 engine/distribution/xfer/transfer_test.go create mode 100644 engine/distribution/xfer/upload.go create mode 100644 engine/distribution/xfer/upload_test.go create mode 100644 engine/dockerversion/useragent.go create mode 100644 engine/dockerversion/version_lib.go create mode 100644 engine/docs/api/v1.18.md create mode 100644 engine/docs/api/v1.19.md create mode 100644 engine/docs/api/v1.20.md create mode 100644 engine/docs/api/v1.21.md create mode 100644 engine/docs/api/v1.22.md create mode 100644 engine/docs/api/v1.23.md create mode 100644 engine/docs/api/v1.24.md create mode 100644 engine/docs/api/version-history.md create mode 100644 engine/docs/contributing/README.md create mode 100644 engine/docs/contributing/images/branch-sig.png create mode 100644 engine/docs/contributing/images/contributor-edit.png create mode 100644 engine/docs/contributing/images/copy_url.png create mode 100644 engine/docs/contributing/images/fork_docker.png create mode 100644 engine/docs/contributing/images/git_bash.png create mode 100644 engine/docs/contributing/images/list_example.png create mode 100644 engine/docs/contributing/set-up-dev-env.md create mode 100644 engine/docs/contributing/set-up-git.md create mode 100644 engine/docs/contributing/software-req-win.md create mode 100644 engine/docs/contributing/software-required.md create mode 100644 engine/docs/contributing/test.md create mode 100644 engine/docs/contributing/who-written-for.md create mode 100644 engine/docs/static_files/contributors.png create mode 100644 engine/docs/static_files/moby-project-logo.png create mode 100644 engine/errdefs/defs.go create mode 100644 engine/errdefs/doc.go create mode 100644 engine/errdefs/helpers.go create mode 100644 engine/errdefs/helpers_test.go create mode 100644 engine/errdefs/is.go create mode 100644 engine/hack/README.md create mode 100755 engine/hack/ci/arm create mode 100755 engine/hack/ci/experimental create mode 100755 engine/hack/ci/janky create mode 100755 engine/hack/ci/powerpc create mode 100755 engine/hack/ci/z create mode 100755 engine/hack/dind create mode 100755 engine/hack/dockerfile/install/containerd.installer create mode 100755 engine/hack/dockerfile/install/dockercli.installer create mode 100755 engine/hack/dockerfile/install/gometalinter.installer create mode 100755 engine/hack/dockerfile/install/install.sh create mode 100755 engine/hack/dockerfile/install/proxy.installer create mode 100755 engine/hack/dockerfile/install/runc.installer create mode 100755 engine/hack/dockerfile/install/tini.installer create mode 100755 engine/hack/dockerfile/install/tomlv.installer create mode 100755 engine/hack/dockerfile/install/vndr.installer create mode 100755 engine/hack/generate-authors.sh create mode 100755 engine/hack/generate-swagger-api.sh create mode 100644 engine/hack/integration-cli-on-swarm/README.md create mode 100644 engine/hack/integration-cli-on-swarm/agent/Dockerfile create mode 100644 engine/hack/integration-cli-on-swarm/agent/master/call.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/master/master.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/master/set.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/master/set_test.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/types/types.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/vendor.conf create mode 100644 engine/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE create mode 100644 engine/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/worker/executor.go create mode 100644 engine/hack/integration-cli-on-swarm/agent/worker/worker.go create mode 100644 engine/hack/integration-cli-on-swarm/host/compose.go create mode 100644 engine/hack/integration-cli-on-swarm/host/dockercmd.go create mode 100644 engine/hack/integration-cli-on-swarm/host/enumerate.go create mode 100644 engine/hack/integration-cli-on-swarm/host/enumerate_test.go create mode 100644 engine/hack/integration-cli-on-swarm/host/host.go create mode 100644 engine/hack/integration-cli-on-swarm/host/volume.go create mode 100644 engine/hack/make.ps1 create mode 100755 engine/hack/make.sh create mode 100644 engine/hack/make/.binary create mode 100644 engine/hack/make/.binary-setup create mode 100644 engine/hack/make/.detect-daemon-osarch create mode 100644 engine/hack/make/.ensure-emptyfs create mode 100644 engine/hack/make/.go-autogen create mode 100644 engine/hack/make/.go-autogen.ps1 create mode 100644 engine/hack/make/.integration-daemon-setup create mode 100644 engine/hack/make/.integration-daemon-start create mode 100644 engine/hack/make/.integration-daemon-stop create mode 100644 engine/hack/make/.integration-test-helpers create mode 100644 engine/hack/make/.resources-windows/common.rc create mode 100644 engine/hack/make/.resources-windows/docker.exe.manifest create mode 100644 engine/hack/make/.resources-windows/docker.ico create mode 100644 engine/hack/make/.resources-windows/docker.png create mode 100644 engine/hack/make/.resources-windows/docker.rc create mode 100644 engine/hack/make/.resources-windows/dockerd.rc create mode 100644 engine/hack/make/.resources-windows/event_messages.mc create mode 100644 engine/hack/make/.resources-windows/resources.go create mode 100644 engine/hack/make/README.md create mode 100644 engine/hack/make/binary create mode 100644 engine/hack/make/binary-daemon create mode 100755 engine/hack/make/build-integration-test-binary create mode 100644 engine/hack/make/cross create mode 100644 engine/hack/make/dynbinary create mode 100644 engine/hack/make/dynbinary-daemon create mode 100644 engine/hack/make/install-binary create mode 100644 engine/hack/make/run create mode 100644 engine/hack/make/test-docker-py create mode 100755 engine/hack/make/test-integration create mode 100755 engine/hack/make/test-integration-cli create mode 100644 engine/hack/make/test-integration-shell create mode 100755 engine/hack/test/e2e-run.sh create mode 100755 engine/hack/test/unit create mode 100644 engine/hack/validate/.swagger-yamllint create mode 100644 engine/hack/validate/.validate create mode 100755 engine/hack/validate/all create mode 100755 engine/hack/validate/changelog-date-descending create mode 100755 engine/hack/validate/changelog-well-formed create mode 100755 engine/hack/validate/dco create mode 100755 engine/hack/validate/default create mode 100755 engine/hack/validate/default-seccomp create mode 100755 engine/hack/validate/deprecate-integration-cli create mode 100755 engine/hack/validate/gometalinter create mode 100644 engine/hack/validate/gometalinter.json create mode 100755 engine/hack/validate/pkg-imports create mode 100755 engine/hack/validate/swagger create mode 100755 engine/hack/validate/swagger-gen create mode 100755 engine/hack/validate/test-imports create mode 100755 engine/hack/validate/toml create mode 100755 engine/hack/validate/vendor create mode 100755 engine/hack/vendor.sh create mode 100644 engine/image/cache/cache.go create mode 100644 engine/image/cache/compare.go create mode 100644 engine/image/cache/compare_test.go create mode 100644 engine/image/fs.go create mode 100644 engine/image/fs_test.go create mode 100644 engine/image/image.go create mode 100644 engine/image/image_test.go create mode 100644 engine/image/rootfs.go create mode 100644 engine/image/spec/README.md create mode 100644 engine/image/spec/v1.1.md create mode 100644 engine/image/spec/v1.2.md create mode 100644 engine/image/spec/v1.md create mode 100644 engine/image/store.go create mode 100644 engine/image/store_test.go create mode 100644 engine/image/tarexport/load.go create mode 100644 engine/image/tarexport/save.go create mode 100644 engine/image/tarexport/tarexport.go create mode 100644 engine/image/v1/imagev1.go create mode 100644 engine/image/v1/imagev1_test.go create mode 100644 engine/integration-cli/benchmark_test.go create mode 100644 engine/integration-cli/check_test.go create mode 100644 engine/integration-cli/checker/checker.go create mode 100644 engine/integration-cli/cli/build/build.go create mode 100644 engine/integration-cli/cli/cli.go create mode 100644 engine/integration-cli/daemon/daemon.go create mode 100644 engine/integration-cli/daemon/daemon_swarm.go create mode 100644 engine/integration-cli/daemon_swarm_hack_test.go create mode 100644 engine/integration-cli/docker_api_attach_test.go create mode 100644 engine/integration-cli/docker_api_build_test.go create mode 100644 engine/integration-cli/docker_api_build_windows_test.go create mode 100644 engine/integration-cli/docker_api_containers_test.go create mode 100644 engine/integration-cli/docker_api_containers_windows_test.go create mode 100644 engine/integration-cli/docker_api_create_test.go create mode 100644 engine/integration-cli/docker_api_exec_resize_test.go create mode 100644 engine/integration-cli/docker_api_exec_test.go create mode 100644 engine/integration-cli/docker_api_images_test.go create mode 100644 engine/integration-cli/docker_api_inspect_test.go create mode 100644 engine/integration-cli/docker_api_ipcmode_test.go create mode 100644 engine/integration-cli/docker_api_logs_test.go create mode 100644 engine/integration-cli/docker_api_network_test.go create mode 100644 engine/integration-cli/docker_api_stats_test.go create mode 100644 engine/integration-cli/docker_api_swarm_node_test.go create mode 100644 engine/integration-cli/docker_api_swarm_service_test.go create mode 100644 engine/integration-cli/docker_api_swarm_test.go create mode 100644 engine/integration-cli/docker_api_test.go create mode 100644 engine/integration-cli/docker_cli_attach_test.go create mode 100644 engine/integration-cli/docker_cli_attach_unix_test.go create mode 100644 engine/integration-cli/docker_cli_build_test.go create mode 100644 engine/integration-cli/docker_cli_build_unix_test.go create mode 100644 engine/integration-cli/docker_cli_by_digest_test.go create mode 100644 engine/integration-cli/docker_cli_commit_test.go create mode 100644 engine/integration-cli/docker_cli_config_create_test.go create mode 100644 engine/integration-cli/docker_cli_cp_from_container_test.go create mode 100644 engine/integration-cli/docker_cli_cp_test.go create mode 100644 engine/integration-cli/docker_cli_cp_to_container_test.go create mode 100644 engine/integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 engine/integration-cli/docker_cli_cp_utils_test.go create mode 100644 engine/integration-cli/docker_cli_create_test.go create mode 100644 engine/integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 engine/integration-cli/docker_cli_daemon_test.go create mode 100644 engine/integration-cli/docker_cli_events_test.go create mode 100644 engine/integration-cli/docker_cli_events_unix_test.go create mode 100644 engine/integration-cli/docker_cli_exec_test.go create mode 100644 engine/integration-cli/docker_cli_exec_unix_test.go create mode 100644 engine/integration-cli/docker_cli_export_import_test.go create mode 100644 engine/integration-cli/docker_cli_external_volume_driver_unix_test.go create mode 100644 engine/integration-cli/docker_cli_health_test.go create mode 100644 engine/integration-cli/docker_cli_history_test.go create mode 100644 engine/integration-cli/docker_cli_images_test.go create mode 100644 engine/integration-cli/docker_cli_import_test.go create mode 100644 engine/integration-cli/docker_cli_info_test.go create mode 100644 engine/integration-cli/docker_cli_info_unix_test.go create mode 100644 engine/integration-cli/docker_cli_inspect_test.go create mode 100644 engine/integration-cli/docker_cli_links_test.go create mode 100644 engine/integration-cli/docker_cli_login_test.go create mode 100644 engine/integration-cli/docker_cli_logout_test.go create mode 100644 engine/integration-cli/docker_cli_logs_bench_test.go create mode 100644 engine/integration-cli/docker_cli_logs_test.go create mode 100644 engine/integration-cli/docker_cli_netmode_test.go create mode 100644 engine/integration-cli/docker_cli_network_unix_test.go create mode 100644 engine/integration-cli/docker_cli_plugins_logdriver_test.go create mode 100644 engine/integration-cli/docker_cli_plugins_test.go create mode 100644 engine/integration-cli/docker_cli_port_test.go create mode 100644 engine/integration-cli/docker_cli_proxy_test.go create mode 100644 engine/integration-cli/docker_cli_prune_unix_test.go create mode 100644 engine/integration-cli/docker_cli_ps_test.go create mode 100644 engine/integration-cli/docker_cli_pull_local_test.go create mode 100644 engine/integration-cli/docker_cli_pull_test.go create mode 100644 engine/integration-cli/docker_cli_push_test.go create mode 100644 engine/integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 engine/integration-cli/docker_cli_restart_test.go create mode 100644 engine/integration-cli/docker_cli_rmi_test.go create mode 100644 engine/integration-cli/docker_cli_run_test.go create mode 100644 engine/integration-cli/docker_cli_run_unix_test.go create mode 100644 engine/integration-cli/docker_cli_save_load_test.go create mode 100644 engine/integration-cli/docker_cli_save_load_unix_test.go create mode 100644 engine/integration-cli/docker_cli_search_test.go create mode 100644 engine/integration-cli/docker_cli_secret_create_test.go create mode 100644 engine/integration-cli/docker_cli_service_create_test.go create mode 100644 engine/integration-cli/docker_cli_service_health_test.go create mode 100644 engine/integration-cli/docker_cli_service_logs_test.go create mode 100644 engine/integration-cli/docker_cli_service_scale_test.go create mode 100644 engine/integration-cli/docker_cli_service_update_test.go create mode 100644 engine/integration-cli/docker_cli_sni_test.go create mode 100644 engine/integration-cli/docker_cli_start_test.go create mode 100644 engine/integration-cli/docker_cli_stats_test.go create mode 100644 engine/integration-cli/docker_cli_swarm_test.go create mode 100644 engine/integration-cli/docker_cli_swarm_unix_test.go create mode 100644 engine/integration-cli/docker_cli_top_test.go create mode 100644 engine/integration-cli/docker_cli_update_unix_test.go create mode 100644 engine/integration-cli/docker_cli_userns_test.go create mode 100644 engine/integration-cli/docker_cli_v2_only_test.go create mode 100644 engine/integration-cli/docker_cli_volume_test.go create mode 100644 engine/integration-cli/docker_cli_wait_test.go create mode 100644 engine/integration-cli/docker_deprecated_api_v124_test.go create mode 100644 engine/integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 engine/integration-cli/docker_hub_pull_suite_test.go create mode 100644 engine/integration-cli/docker_utils_test.go create mode 100644 engine/integration-cli/environment/environment.go create mode 100644 engine/integration-cli/events_utils_test.go create mode 100755 engine/integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 engine/integration-cli/fixtures/credentialspecs/valid.json create mode 120000 engine/integration-cli/fixtures/https/ca.pem create mode 120000 engine/integration-cli/fixtures/https/client-cert.pem create mode 120000 engine/integration-cli/fixtures/https/client-key.pem create mode 100644 engine/integration-cli/fixtures/https/client-rogue-cert.pem create mode 100644 engine/integration-cli/fixtures/https/client-rogue-key.pem create mode 120000 engine/integration-cli/fixtures/https/server-cert.pem create mode 120000 engine/integration-cli/fixtures/https/server-key.pem create mode 100644 engine/integration-cli/fixtures/https/server-rogue-cert.pem create mode 100644 engine/integration-cli/fixtures/https/server-rogue-key.pem create mode 100644 engine/integration-cli/fixtures/registry/cert.pem create mode 100644 engine/integration-cli/fixtures_linux_daemon_test.go create mode 100644 engine/integration-cli/requirement/requirement.go create mode 100644 engine/integration-cli/requirements_test.go create mode 100644 engine/integration-cli/requirements_unix_test.go create mode 100644 engine/integration-cli/test_vars_exec_test.go create mode 100644 engine/integration-cli/test_vars_noexec_test.go create mode 100644 engine/integration-cli/test_vars_noseccomp_test.go create mode 100644 engine/integration-cli/test_vars_seccomp_test.go create mode 100644 engine/integration-cli/test_vars_test.go create mode 100644 engine/integration-cli/test_vars_unix_test.go create mode 100644 engine/integration-cli/test_vars_windows_test.go create mode 100644 engine/integration-cli/testdata/emptyLayer.tar create mode 100644 engine/integration-cli/utils_test.go create mode 100644 engine/integration/build/build_session_test.go create mode 100644 engine/integration/build/build_squash_test.go create mode 100644 engine/integration/build/build_test.go create mode 100644 engine/integration/build/main_test.go create mode 100644 engine/integration/config/config_test.go create mode 100644 engine/integration/config/main_test.go create mode 100644 engine/integration/container/copy_test.go create mode 100644 engine/integration/container/create_test.go create mode 100644 engine/integration/container/daemon_linux_test.go create mode 100644 engine/integration/container/diff_test.go create mode 100644 engine/integration/container/exec_test.go create mode 100644 engine/integration/container/export_test.go create mode 100644 engine/integration/container/health_test.go create mode 100644 engine/integration/container/inspect_test.go create mode 100644 engine/integration/container/kill_test.go create mode 100644 engine/integration/container/links_linux_test.go create mode 100644 engine/integration/container/logs_test.go create mode 100644 engine/integration/container/main_test.go create mode 100644 engine/integration/container/mounts_linux_test.go create mode 100644 engine/integration/container/nat_test.go create mode 100644 engine/integration/container/pause_test.go create mode 100644 engine/integration/container/ps_test.go create mode 100644 engine/integration/container/remove_test.go create mode 100644 engine/integration/container/rename_test.go create mode 100644 engine/integration/container/resize_test.go create mode 100644 engine/integration/container/restart_test.go create mode 100644 engine/integration/container/stats_test.go create mode 100644 engine/integration/container/stop_test.go create mode 100644 engine/integration/container/update_linux_test.go create mode 100644 engine/integration/container/update_test.go create mode 100644 engine/integration/doc.go create mode 100644 engine/integration/image/commit_test.go create mode 100644 engine/integration/image/import_test.go create mode 100644 engine/integration/image/main_test.go create mode 100644 engine/integration/image/remove_test.go create mode 100644 engine/integration/image/tag_test.go create mode 100644 engine/integration/internal/container/container.go create mode 100644 engine/integration/internal/container/exec.go create mode 100644 engine/integration/internal/container/ops.go create mode 100644 engine/integration/internal/container/states.go create mode 100644 engine/integration/internal/network/network.go create mode 100644 engine/integration/internal/network/ops.go create mode 100644 engine/integration/internal/requirement/requirement.go create mode 100644 engine/integration/internal/swarm/service.go create mode 100644 engine/integration/network/delete_test.go create mode 100644 engine/integration/network/helpers.go create mode 100644 engine/integration/network/inspect_test.go create mode 100644 engine/integration/network/ipvlan/ipvlan_test.go create mode 100644 engine/integration/network/ipvlan/main_test.go create mode 100644 engine/integration/network/macvlan/macvlan_test.go create mode 100644 engine/integration/network/macvlan/main_test.go create mode 100644 engine/integration/network/main_test.go create mode 100644 engine/integration/network/service_test.go create mode 100644 engine/integration/plugin/authz/authz_plugin_test.go create mode 100644 engine/integration/plugin/authz/authz_plugin_v2_test.go create mode 100644 engine/integration/plugin/authz/main_test.go create mode 100644 engine/integration/plugin/graphdriver/external_test.go create mode 100644 engine/integration/plugin/graphdriver/main_test.go create mode 100644 engine/integration/plugin/logging/cmd/close_on_start/main.go create mode 100644 engine/integration/plugin/logging/cmd/close_on_start/main_test.go create mode 100644 engine/integration/plugin/logging/cmd/cmd_test.go create mode 100644 engine/integration/plugin/logging/cmd/dummy/main.go create mode 100644 engine/integration/plugin/logging/cmd/dummy/main_test.go create mode 100644 engine/integration/plugin/logging/helpers_test.go create mode 100644 engine/integration/plugin/logging/logging_test.go create mode 100644 engine/integration/plugin/logging/main_test.go create mode 100644 engine/integration/plugin/logging/validation_test.go create mode 100644 engine/integration/plugin/pkg_test.go create mode 100644 engine/integration/plugin/volumes/cmd/cmd_test.go create mode 100644 engine/integration/plugin/volumes/cmd/dummy/main.go create mode 100644 engine/integration/plugin/volumes/cmd/dummy/main_test.go create mode 100644 engine/integration/plugin/volumes/helpers_test.go create mode 100644 engine/integration/plugin/volumes/main_test.go create mode 100644 engine/integration/plugin/volumes/mounts_test.go create mode 100644 engine/integration/secret/main_test.go create mode 100644 engine/integration/secret/secret_test.go create mode 100644 engine/integration/service/create_test.go create mode 100644 engine/integration/service/inspect_test.go create mode 100644 engine/integration/service/main_test.go create mode 100644 engine/integration/service/network_test.go create mode 100644 engine/integration/service/plugin_test.go create mode 100644 engine/integration/session/main_test.go create mode 100644 engine/integration/session/session_test.go create mode 100644 engine/integration/system/cgroupdriver_systemd_test.go create mode 100644 engine/integration/system/event_test.go create mode 100644 engine/integration/system/info_linux_test.go create mode 100644 engine/integration/system/info_test.go create mode 100644 engine/integration/system/login_test.go create mode 100644 engine/integration/system/main_test.go create mode 100644 engine/integration/system/version_test.go create mode 100644 engine/integration/testdata/https/ca.pem create mode 100644 engine/integration/testdata/https/client-cert.pem create mode 100644 engine/integration/testdata/https/client-key.pem create mode 100644 engine/integration/testdata/https/server-cert.pem create mode 100644 engine/integration/testdata/https/server-key.pem create mode 100644 engine/integration/volume/main_test.go create mode 100644 engine/integration/volume/volume_test.go create mode 100644 engine/internal/test/daemon/config.go create mode 100644 engine/internal/test/daemon/container.go create mode 100644 engine/internal/test/daemon/daemon.go create mode 100644 engine/internal/test/daemon/daemon_unix.go create mode 100644 engine/internal/test/daemon/daemon_windows.go create mode 100644 engine/internal/test/daemon/node.go create mode 100644 engine/internal/test/daemon/ops.go create mode 100644 engine/internal/test/daemon/plugin.go create mode 100644 engine/internal/test/daemon/secret.go create mode 100644 engine/internal/test/daemon/service.go create mode 100644 engine/internal/test/daemon/swarm.go create mode 100644 engine/internal/test/environment/clean.go create mode 100644 engine/internal/test/environment/environment.go create mode 100644 engine/internal/test/environment/protect.go create mode 100644 engine/internal/test/fakecontext/context.go create mode 100644 engine/internal/test/fakegit/fakegit.go create mode 100644 engine/internal/test/fakestorage/fixtures.go create mode 100644 engine/internal/test/fakestorage/storage.go create mode 100644 engine/internal/test/fixtures/load/frozen.go create mode 100644 engine/internal/test/fixtures/plugin/basic/basic.go create mode 100644 engine/internal/test/fixtures/plugin/plugin.go create mode 100644 engine/internal/test/helper.go create mode 100644 engine/internal/test/registry/ops.go create mode 100644 engine/internal/test/registry/registry.go create mode 100644 engine/internal/test/registry/registry_mock.go create mode 100644 engine/internal/test/request/npipe.go create mode 100644 engine/internal/test/request/npipe_windows.go create mode 100644 engine/internal/test/request/ops.go create mode 100644 engine/internal/test/request/request.go create mode 100644 engine/internal/testutil/helpers.go create mode 100644 engine/internal/testutil/stringutils.go create mode 100644 engine/internal/testutil/stringutils_test.go create mode 100644 engine/layer/empty.go create mode 100644 engine/layer/empty_test.go create mode 100644 engine/layer/filestore.go create mode 100644 engine/layer/filestore_test.go create mode 100644 engine/layer/filestore_unix.go create mode 100644 engine/layer/filestore_windows.go create mode 100644 engine/layer/layer.go create mode 100644 engine/layer/layer_store.go create mode 100644 engine/layer/layer_store_windows.go create mode 100644 engine/layer/layer_test.go create mode 100644 engine/layer/layer_unix.go create mode 100644 engine/layer/layer_unix_test.go create mode 100644 engine/layer/layer_windows.go create mode 100644 engine/layer/migration.go create mode 100644 engine/layer/migration_test.go create mode 100644 engine/layer/mount_test.go create mode 100644 engine/layer/mounted_layer.go create mode 100644 engine/layer/ro_layer.go create mode 100644 engine/layer/ro_layer_windows.go create mode 100644 engine/libcontainerd/client_daemon.go create mode 100644 engine/libcontainerd/client_daemon_linux.go create mode 100644 engine/libcontainerd/client_daemon_windows.go create mode 100644 engine/libcontainerd/client_local_windows.go create mode 100644 engine/libcontainerd/errors.go create mode 100644 engine/libcontainerd/process_windows.go create mode 100644 engine/libcontainerd/queue.go create mode 100644 engine/libcontainerd/queue_test.go create mode 100644 engine/libcontainerd/remote_daemon.go create mode 100644 engine/libcontainerd/remote_daemon_linux.go create mode 100644 engine/libcontainerd/remote_daemon_options.go create mode 100644 engine/libcontainerd/remote_daemon_options_linux.go create mode 100644 engine/libcontainerd/remote_daemon_windows.go create mode 100644 engine/libcontainerd/remote_local.go create mode 100644 engine/libcontainerd/types.go create mode 100644 engine/libcontainerd/types_linux.go create mode 100644 engine/libcontainerd/types_windows.go create mode 100644 engine/libcontainerd/utils_linux.go create mode 100644 engine/libcontainerd/utils_windows.go create mode 100644 engine/libcontainerd/utils_windows_test.go create mode 100644 engine/migrate/v1/migratev1.go create mode 100644 engine/migrate/v1/migratev1_test.go create mode 100644 engine/oci/defaults.go create mode 100644 engine/oci/devices_linux.go create mode 100644 engine/oci/devices_unsupported.go create mode 100644 engine/oci/namespaces.go create mode 100644 engine/opts/address_pools.go create mode 100644 engine/opts/address_pools_test.go create mode 100644 engine/opts/env.go create mode 100644 engine/opts/env_test.go create mode 100644 engine/opts/hosts.go create mode 100644 engine/opts/hosts_test.go create mode 100644 engine/opts/hosts_unix.go create mode 100644 engine/opts/hosts_windows.go create mode 100644 engine/opts/ip.go create mode 100644 engine/opts/ip_test.go create mode 100644 engine/opts/opts.go create mode 100644 engine/opts/opts_test.go create mode 100644 engine/opts/opts_unix.go create mode 100644 engine/opts/opts_windows.go create mode 100644 engine/opts/quotedstring.go create mode 100644 engine/opts/quotedstring_test.go create mode 100644 engine/opts/runtime.go create mode 100644 engine/opts/ulimit.go create mode 100644 engine/opts/ulimit_test.go create mode 100644 engine/pkg/README.md create mode 100644 engine/pkg/aaparser/aaparser.go create mode 100644 engine/pkg/aaparser/aaparser_test.go create mode 100644 engine/pkg/archive/README.md create mode 100644 engine/pkg/archive/archive.go create mode 100644 engine/pkg/archive/archive_linux.go create mode 100644 engine/pkg/archive/archive_linux_test.go create mode 100644 engine/pkg/archive/archive_other.go create mode 100644 engine/pkg/archive/archive_test.go create mode 100644 engine/pkg/archive/archive_unix.go create mode 100644 engine/pkg/archive/archive_unix_test.go create mode 100644 engine/pkg/archive/archive_windows.go create mode 100644 engine/pkg/archive/archive_windows_test.go create mode 100644 engine/pkg/archive/changes.go create mode 100644 engine/pkg/archive/changes_linux.go create mode 100644 engine/pkg/archive/changes_other.go create mode 100644 engine/pkg/archive/changes_posix_test.go create mode 100644 engine/pkg/archive/changes_test.go create mode 100644 engine/pkg/archive/changes_unix.go create mode 100644 engine/pkg/archive/changes_windows.go create mode 100644 engine/pkg/archive/copy.go create mode 100644 engine/pkg/archive/copy_unix.go create mode 100644 engine/pkg/archive/copy_unix_test.go create mode 100644 engine/pkg/archive/copy_windows.go create mode 100644 engine/pkg/archive/diff.go create mode 100644 engine/pkg/archive/diff_test.go create mode 100644 engine/pkg/archive/example_changes.go create mode 100644 engine/pkg/archive/testdata/broken.tar create mode 100644 engine/pkg/archive/time_linux.go create mode 100644 engine/pkg/archive/time_unsupported.go create mode 100644 engine/pkg/archive/utils_test.go create mode 100644 engine/pkg/archive/whiteouts.go create mode 100644 engine/pkg/archive/wrap.go create mode 100644 engine/pkg/archive/wrap_test.go create mode 100644 engine/pkg/authorization/api.go create mode 100644 engine/pkg/authorization/api_test.go create mode 100644 engine/pkg/authorization/authz.go create mode 100644 engine/pkg/authorization/authz_unix_test.go create mode 100644 engine/pkg/authorization/middleware.go create mode 100644 engine/pkg/authorization/middleware_test.go create mode 100644 engine/pkg/authorization/middleware_unix_test.go create mode 100644 engine/pkg/authorization/plugin.go create mode 100644 engine/pkg/authorization/response.go create mode 100644 engine/pkg/broadcaster/unbuffered.go create mode 100644 engine/pkg/broadcaster/unbuffered_test.go create mode 100644 engine/pkg/chrootarchive/archive.go create mode 100644 engine/pkg/chrootarchive/archive_test.go create mode 100644 engine/pkg/chrootarchive/archive_unix.go create mode 100644 engine/pkg/chrootarchive/archive_windows.go create mode 100644 engine/pkg/chrootarchive/chroot_linux.go create mode 100644 engine/pkg/chrootarchive/chroot_unix.go create mode 100644 engine/pkg/chrootarchive/diff.go create mode 100644 engine/pkg/chrootarchive/diff_unix.go create mode 100644 engine/pkg/chrootarchive/diff_windows.go create mode 100644 engine/pkg/chrootarchive/init_unix.go create mode 100644 engine/pkg/chrootarchive/init_windows.go create mode 100644 engine/pkg/containerfs/archiver.go create mode 100644 engine/pkg/containerfs/containerfs.go create mode 100644 engine/pkg/containerfs/containerfs_unix.go create mode 100644 engine/pkg/containerfs/containerfs_windows.go create mode 100644 engine/pkg/devicemapper/devmapper.go create mode 100644 engine/pkg/devicemapper/devmapper_log.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_dynamic.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 engine/pkg/devicemapper/ioctl.go create mode 100644 engine/pkg/devicemapper/log.go create mode 100644 engine/pkg/directory/directory.go create mode 100644 engine/pkg/directory/directory_test.go create mode 100644 engine/pkg/directory/directory_unix.go create mode 100644 engine/pkg/directory/directory_windows.go create mode 100644 engine/pkg/discovery/README.md create mode 100644 engine/pkg/discovery/backends.go create mode 100644 engine/pkg/discovery/discovery.go create mode 100644 engine/pkg/discovery/discovery_test.go create mode 100644 engine/pkg/discovery/entry.go create mode 100644 engine/pkg/discovery/file/file.go create mode 100644 engine/pkg/discovery/file/file_test.go create mode 100644 engine/pkg/discovery/generator.go create mode 100644 engine/pkg/discovery/generator_test.go create mode 100644 engine/pkg/discovery/kv/kv.go create mode 100644 engine/pkg/discovery/kv/kv_test.go create mode 100644 engine/pkg/discovery/memory/memory.go create mode 100644 engine/pkg/discovery/memory/memory_test.go create mode 100644 engine/pkg/discovery/nodes/nodes.go create mode 100644 engine/pkg/discovery/nodes/nodes_test.go create mode 100644 engine/pkg/dmesg/dmesg_linux.go create mode 100644 engine/pkg/dmesg/dmesg_linux_test.go create mode 100644 engine/pkg/filenotify/filenotify.go create mode 100644 engine/pkg/filenotify/fsnotify.go create mode 100644 engine/pkg/filenotify/poller.go create mode 100644 engine/pkg/filenotify/poller_test.go create mode 100644 engine/pkg/fileutils/fileutils.go create mode 100644 engine/pkg/fileutils/fileutils_darwin.go create mode 100644 engine/pkg/fileutils/fileutils_test.go create mode 100644 engine/pkg/fileutils/fileutils_unix.go create mode 100644 engine/pkg/fileutils/fileutils_windows.go create mode 100644 engine/pkg/fsutils/fsutils_linux.go create mode 100644 engine/pkg/fsutils/fsutils_linux_test.go create mode 100644 engine/pkg/homedir/homedir_linux.go create mode 100644 engine/pkg/homedir/homedir_others.go create mode 100644 engine/pkg/homedir/homedir_test.go create mode 100644 engine/pkg/homedir/homedir_unix.go create mode 100644 engine/pkg/homedir/homedir_windows.go create mode 100644 engine/pkg/idtools/idtools.go create mode 100644 engine/pkg/idtools/idtools_unix.go create mode 100644 engine/pkg/idtools/idtools_unix_test.go create mode 100644 engine/pkg/idtools/idtools_windows.go create mode 100644 engine/pkg/idtools/usergroupadd_linux.go create mode 100644 engine/pkg/idtools/usergroupadd_unsupported.go create mode 100644 engine/pkg/idtools/utils_unix.go create mode 100644 engine/pkg/ioutils/buffer.go create mode 100644 engine/pkg/ioutils/buffer_test.go create mode 100644 engine/pkg/ioutils/bytespipe.go create mode 100644 engine/pkg/ioutils/bytespipe_test.go create mode 100644 engine/pkg/ioutils/fswriters.go create mode 100644 engine/pkg/ioutils/fswriters_test.go create mode 100644 engine/pkg/ioutils/readers.go create mode 100644 engine/pkg/ioutils/readers_test.go create mode 100644 engine/pkg/ioutils/temp_unix.go create mode 100644 engine/pkg/ioutils/temp_windows.go create mode 100644 engine/pkg/ioutils/writeflusher.go create mode 100644 engine/pkg/ioutils/writers.go create mode 100644 engine/pkg/ioutils/writers_test.go create mode 100644 engine/pkg/jsonmessage/jsonmessage.go create mode 100644 engine/pkg/jsonmessage/jsonmessage_test.go create mode 100644 engine/pkg/locker/README.md create mode 100644 engine/pkg/locker/locker.go create mode 100644 engine/pkg/locker/locker_test.go create mode 100644 engine/pkg/longpath/longpath.go create mode 100644 engine/pkg/longpath/longpath_test.go create mode 100644 engine/pkg/loopback/attach_loopback.go create mode 100644 engine/pkg/loopback/ioctl.go create mode 100644 engine/pkg/loopback/loop_wrapper.go create mode 100644 engine/pkg/loopback/loopback.go create mode 100644 engine/pkg/mount/flags.go create mode 100644 engine/pkg/mount/flags_freebsd.go create mode 100644 engine/pkg/mount/flags_linux.go create mode 100644 engine/pkg/mount/flags_unsupported.go create mode 100644 engine/pkg/mount/mount.go create mode 100644 engine/pkg/mount/mount_unix_test.go create mode 100644 engine/pkg/mount/mounter_freebsd.go create mode 100644 engine/pkg/mount/mounter_linux.go create mode 100644 engine/pkg/mount/mounter_linux_test.go create mode 100644 engine/pkg/mount/mounter_unsupported.go create mode 100644 engine/pkg/mount/mountinfo.go create mode 100644 engine/pkg/mount/mountinfo_freebsd.go create mode 100644 engine/pkg/mount/mountinfo_linux.go create mode 100644 engine/pkg/mount/mountinfo_linux_test.go create mode 100644 engine/pkg/mount/mountinfo_unsupported.go create mode 100644 engine/pkg/mount/mountinfo_windows.go create mode 100644 engine/pkg/mount/sharedsubtree_linux.go create mode 100644 engine/pkg/mount/sharedsubtree_linux_test.go create mode 100644 engine/pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 engine/pkg/namesgenerator/names-generator.go create mode 100644 engine/pkg/namesgenerator/names-generator_test.go create mode 100644 engine/pkg/parsers/kernel/kernel.go create mode 100644 engine/pkg/parsers/kernel/kernel_darwin.go create mode 100644 engine/pkg/parsers/kernel/kernel_unix.go create mode 100644 engine/pkg/parsers/kernel/kernel_unix_test.go create mode 100644 engine/pkg/parsers/kernel/kernel_windows.go create mode 100644 engine/pkg/parsers/kernel/uname_linux.go create mode 100644 engine/pkg/parsers/kernel/uname_solaris.go create mode 100644 engine/pkg/parsers/kernel/uname_unsupported.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 engine/pkg/parsers/parsers.go create mode 100644 engine/pkg/parsers/parsers_test.go create mode 100644 engine/pkg/pidfile/pidfile.go create mode 100644 engine/pkg/pidfile/pidfile_darwin.go create mode 100644 engine/pkg/pidfile/pidfile_test.go create mode 100644 engine/pkg/pidfile/pidfile_unix.go create mode 100644 engine/pkg/pidfile/pidfile_windows.go create mode 100644 engine/pkg/platform/architecture_linux.go create mode 100644 engine/pkg/platform/architecture_unix.go create mode 100644 engine/pkg/platform/architecture_windows.go create mode 100644 engine/pkg/platform/platform.go create mode 100644 engine/pkg/plugingetter/getter.go create mode 100644 engine/pkg/plugins/client.go create mode 100644 engine/pkg/plugins/client_test.go create mode 100644 engine/pkg/plugins/discovery.go create mode 100644 engine/pkg/plugins/discovery_test.go create mode 100644 engine/pkg/plugins/discovery_unix.go create mode 100644 engine/pkg/plugins/discovery_unix_test.go create mode 100644 engine/pkg/plugins/discovery_windows.go create mode 100644 engine/pkg/plugins/errors.go create mode 100644 engine/pkg/plugins/plugin_test.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/README.md create mode 100644 engine/pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/main.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/parser.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/template.go create mode 100644 engine/pkg/plugins/plugins.go create mode 100644 engine/pkg/plugins/plugins_unix.go create mode 100644 engine/pkg/plugins/plugins_windows.go create mode 100644 engine/pkg/plugins/transport/http.go create mode 100644 engine/pkg/plugins/transport/http_test.go create mode 100644 engine/pkg/plugins/transport/transport.go create mode 100644 engine/pkg/pools/pools.go create mode 100644 engine/pkg/pools/pools_test.go create mode 100644 engine/pkg/progress/progress.go create mode 100644 engine/pkg/progress/progressreader.go create mode 100644 engine/pkg/progress/progressreader_test.go create mode 100644 engine/pkg/pubsub/publisher.go create mode 100644 engine/pkg/pubsub/publisher_test.go create mode 100644 engine/pkg/reexec/README.md create mode 100644 engine/pkg/reexec/command_linux.go create mode 100644 engine/pkg/reexec/command_unix.go create mode 100644 engine/pkg/reexec/command_unsupported.go create mode 100644 engine/pkg/reexec/command_windows.go create mode 100644 engine/pkg/reexec/reexec.go create mode 100644 engine/pkg/reexec/reexec_test.go create mode 100644 engine/pkg/signal/README.md create mode 100644 engine/pkg/signal/signal.go create mode 100644 engine/pkg/signal/signal_darwin.go create mode 100644 engine/pkg/signal/signal_freebsd.go create mode 100644 engine/pkg/signal/signal_linux.go create mode 100644 engine/pkg/signal/signal_linux_test.go create mode 100644 engine/pkg/signal/signal_test.go create mode 100644 engine/pkg/signal/signal_unix.go create mode 100644 engine/pkg/signal/signal_unsupported.go create mode 100644 engine/pkg/signal/signal_windows.go create mode 100644 engine/pkg/signal/testfiles/main.go create mode 100644 engine/pkg/signal/trap.go create mode 100644 engine/pkg/signal/trap_linux_test.go create mode 100644 engine/pkg/stdcopy/stdcopy.go create mode 100644 engine/pkg/stdcopy/stdcopy_test.go create mode 100644 engine/pkg/streamformatter/streamformatter.go create mode 100644 engine/pkg/streamformatter/streamformatter_test.go create mode 100644 engine/pkg/streamformatter/streamwriter.go create mode 100644 engine/pkg/streamformatter/streamwriter_test.go create mode 100644 engine/pkg/stringid/README.md create mode 100644 engine/pkg/stringid/stringid.go create mode 100644 engine/pkg/stringid/stringid_test.go create mode 100644 engine/pkg/symlink/LICENSE.APACHE create mode 100644 engine/pkg/symlink/LICENSE.BSD create mode 100644 engine/pkg/symlink/README.md create mode 100644 engine/pkg/symlink/fs.go create mode 100644 engine/pkg/symlink/fs_unix.go create mode 100644 engine/pkg/symlink/fs_unix_test.go create mode 100644 engine/pkg/symlink/fs_windows.go create mode 100644 engine/pkg/sysinfo/README.md create mode 100644 engine/pkg/sysinfo/numcpu.go create mode 100644 engine/pkg/sysinfo/numcpu_linux.go create mode 100644 engine/pkg/sysinfo/numcpu_windows.go create mode 100644 engine/pkg/sysinfo/sysinfo.go create mode 100644 engine/pkg/sysinfo/sysinfo_linux.go create mode 100644 engine/pkg/sysinfo/sysinfo_linux_test.go create mode 100644 engine/pkg/sysinfo/sysinfo_test.go create mode 100644 engine/pkg/sysinfo/sysinfo_unix.go create mode 100644 engine/pkg/sysinfo/sysinfo_windows.go create mode 100644 engine/pkg/system/chtimes.go create mode 100644 engine/pkg/system/chtimes_test.go create mode 100644 engine/pkg/system/chtimes_unix.go create mode 100644 engine/pkg/system/chtimes_unix_test.go create mode 100644 engine/pkg/system/chtimes_windows.go create mode 100644 engine/pkg/system/chtimes_windows_test.go create mode 100644 engine/pkg/system/errors.go create mode 100644 engine/pkg/system/exitcode.go create mode 100644 engine/pkg/system/filesys.go create mode 100644 engine/pkg/system/filesys_windows.go create mode 100644 engine/pkg/system/init.go create mode 100644 engine/pkg/system/init_unix.go create mode 100644 engine/pkg/system/init_windows.go create mode 100644 engine/pkg/system/lcow.go create mode 100644 engine/pkg/system/lcow_unix.go create mode 100644 engine/pkg/system/lcow_windows.go create mode 100644 engine/pkg/system/lstat_unix.go create mode 100644 engine/pkg/system/lstat_unix_test.go create mode 100644 engine/pkg/system/lstat_windows.go create mode 100644 engine/pkg/system/meminfo.go create mode 100644 engine/pkg/system/meminfo_linux.go create mode 100644 engine/pkg/system/meminfo_unix_test.go create mode 100644 engine/pkg/system/meminfo_unsupported.go create mode 100644 engine/pkg/system/meminfo_windows.go create mode 100644 engine/pkg/system/mknod.go create mode 100644 engine/pkg/system/mknod_windows.go create mode 100644 engine/pkg/system/path.go create mode 100644 engine/pkg/system/path_windows_test.go create mode 100644 engine/pkg/system/process_unix.go create mode 100644 engine/pkg/system/process_windows.go create mode 100644 engine/pkg/system/rm.go create mode 100644 engine/pkg/system/rm_test.go create mode 100644 engine/pkg/system/stat_darwin.go create mode 100644 engine/pkg/system/stat_freebsd.go create mode 100644 engine/pkg/system/stat_linux.go create mode 100644 engine/pkg/system/stat_openbsd.go create mode 100644 engine/pkg/system/stat_solaris.go create mode 100644 engine/pkg/system/stat_unix.go create mode 100644 engine/pkg/system/stat_unix_test.go create mode 100644 engine/pkg/system/stat_windows.go create mode 100644 engine/pkg/system/syscall_unix.go create mode 100644 engine/pkg/system/syscall_windows.go create mode 100644 engine/pkg/system/syscall_windows_test.go create mode 100644 engine/pkg/system/umask.go create mode 100644 engine/pkg/system/umask_windows.go create mode 100644 engine/pkg/system/utimes_freebsd.go create mode 100644 engine/pkg/system/utimes_linux.go create mode 100644 engine/pkg/system/utimes_unix_test.go create mode 100644 engine/pkg/system/utimes_unsupported.go create mode 100644 engine/pkg/system/xattrs_linux.go create mode 100644 engine/pkg/system/xattrs_unsupported.go create mode 100644 engine/pkg/tailfile/tailfile.go create mode 100644 engine/pkg/tailfile/tailfile_test.go create mode 100644 engine/pkg/tarsum/builder_context.go create mode 100644 engine/pkg/tarsum/builder_context_test.go create mode 100644 engine/pkg/tarsum/fileinfosums.go create mode 100644 engine/pkg/tarsum/fileinfosums_test.go create mode 100644 engine/pkg/tarsum/tarsum.go create mode 100644 engine/pkg/tarsum/tarsum_spec.md create mode 100644 engine/pkg/tarsum/tarsum_test.go create mode 100644 engine/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 engine/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 engine/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 engine/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 engine/pkg/tarsum/testdata/xattr/json create mode 100644 engine/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 engine/pkg/tarsum/versioning.go create mode 100644 engine/pkg/tarsum/versioning_test.go create mode 100644 engine/pkg/tarsum/writercloser.go create mode 100644 engine/pkg/term/ascii.go create mode 100644 engine/pkg/term/ascii_test.go create mode 100644 engine/pkg/term/proxy.go create mode 100644 engine/pkg/term/proxy_test.go create mode 100644 engine/pkg/term/tc.go create mode 100644 engine/pkg/term/term.go create mode 100644 engine/pkg/term/term_linux_test.go create mode 100644 engine/pkg/term/term_windows.go create mode 100644 engine/pkg/term/termios_bsd.go create mode 100644 engine/pkg/term/termios_linux.go create mode 100644 engine/pkg/term/windows/ansi_reader.go create mode 100644 engine/pkg/term/windows/ansi_writer.go create mode 100644 engine/pkg/term/windows/console.go create mode 100644 engine/pkg/term/windows/windows.go create mode 100644 engine/pkg/term/windows/windows_test.go create mode 100644 engine/pkg/term/winsize.go create mode 100644 engine/pkg/truncindex/truncindex.go create mode 100644 engine/pkg/truncindex/truncindex_test.go create mode 100644 engine/pkg/urlutil/urlutil.go create mode 100644 engine/pkg/urlutil/urlutil_test.go create mode 100644 engine/pkg/useragent/README.md create mode 100644 engine/pkg/useragent/useragent.go create mode 100644 engine/pkg/useragent/useragent_test.go create mode 100644 engine/plugin/backend_linux.go create mode 100644 engine/plugin/backend_linux_test.go create mode 100644 engine/plugin/backend_unsupported.go create mode 100644 engine/plugin/blobstore.go create mode 100644 engine/plugin/defs.go create mode 100644 engine/plugin/errors.go create mode 100644 engine/plugin/events.go create mode 100644 engine/plugin/executor/containerd/containerd.go create mode 100644 engine/plugin/executor/containerd/containerd_test.go create mode 100644 engine/plugin/manager.go create mode 100644 engine/plugin/manager_linux.go create mode 100644 engine/plugin/manager_linux_test.go create mode 100644 engine/plugin/manager_test.go create mode 100644 engine/plugin/manager_windows.go create mode 100644 engine/plugin/store.go create mode 100644 engine/plugin/store_test.go create mode 100644 engine/plugin/v2/plugin.go create mode 100644 engine/plugin/v2/plugin_linux.go create mode 100644 engine/plugin/v2/plugin_unsupported.go create mode 100644 engine/plugin/v2/settable.go create mode 100644 engine/plugin/v2/settable_test.go create mode 100644 engine/poule.yml create mode 100644 engine/profiles/apparmor/apparmor.go create mode 100644 engine/profiles/apparmor/template.go create mode 100755 engine/profiles/seccomp/default.json create mode 100755 engine/profiles/seccomp/fixtures/example.json create mode 100644 engine/profiles/seccomp/generate.go create mode 100644 engine/profiles/seccomp/seccomp.go create mode 100644 engine/profiles/seccomp/seccomp_default.go create mode 100644 engine/profiles/seccomp/seccomp_test.go create mode 100644 engine/profiles/seccomp/seccomp_unsupported.go create mode 100644 engine/project/ARM.md create mode 100644 engine/project/BRANCHES-AND-TAGS.md create mode 120000 engine/project/CONTRIBUTING.md create mode 100644 engine/project/GOVERNANCE.md create mode 100644 engine/project/IRC-ADMINISTRATION.md create mode 100644 engine/project/ISSUE-TRIAGE.md create mode 100644 engine/project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 engine/project/PACKAGERS.md create mode 100644 engine/project/PATCH-RELEASES.md create mode 100644 engine/project/PRINCIPLES.md create mode 100644 engine/project/README.md create mode 100644 engine/project/RELEASE-PROCESS.md create mode 100644 engine/project/REVIEWING.md create mode 100644 engine/project/TOOLS.md create mode 100644 engine/reference/errors.go create mode 100644 engine/reference/store.go create mode 100644 engine/reference/store_test.go create mode 100644 engine/registry/auth.go create mode 100644 engine/registry/auth_test.go create mode 100644 engine/registry/config.go create mode 100644 engine/registry/config_test.go create mode 100644 engine/registry/config_unix.go create mode 100644 engine/registry/config_windows.go create mode 100644 engine/registry/endpoint_test.go create mode 100644 engine/registry/endpoint_v1.go create mode 100644 engine/registry/errors.go create mode 100644 engine/registry/registry.go create mode 100644 engine/registry/registry_mock_test.go create mode 100644 engine/registry/registry_test.go create mode 100644 engine/registry/resumable/resumablerequestreader.go create mode 100644 engine/registry/resumable/resumablerequestreader_test.go create mode 100644 engine/registry/service.go create mode 100644 engine/registry/service_v1.go create mode 100644 engine/registry/service_v1_test.go create mode 100644 engine/registry/service_v2.go create mode 100644 engine/registry/session.go create mode 100644 engine/registry/types.go create mode 100644 engine/reports/2017-05-01.md create mode 100644 engine/reports/2017-05-08.md create mode 100644 engine/reports/2017-05-15.md create mode 100644 engine/reports/2017-06-05.md create mode 100644 engine/reports/2017-06-12.md create mode 100644 engine/reports/2017-06-26.md create mode 100644 engine/reports/builder/2017-05-01.md create mode 100644 engine/reports/builder/2017-05-08.md create mode 100644 engine/reports/builder/2017-05-15.md create mode 100644 engine/reports/builder/2017-05-22.md create mode 100644 engine/reports/builder/2017-05-29.md create mode 100644 engine/reports/builder/2017-06-05.md create mode 100644 engine/reports/builder/2017-06-12.md create mode 100644 engine/reports/builder/2017-06-26.md create mode 100644 engine/reports/builder/2017-07-10.md create mode 100644 engine/reports/builder/2017-07-17.md create mode 100644 engine/restartmanager/restartmanager.go create mode 100644 engine/restartmanager/restartmanager_test.go create mode 100644 engine/runconfig/config.go create mode 100644 engine/runconfig/config_test.go create mode 100644 engine/runconfig/config_unix.go create mode 100644 engine/runconfig/config_windows.go create mode 100644 engine/runconfig/errors.go create mode 100644 engine/runconfig/fixtures/unix/container_config_1_14.json create mode 100644 engine/runconfig/fixtures/unix/container_config_1_17.json create mode 100644 engine/runconfig/fixtures/unix/container_config_1_19.json create mode 100644 engine/runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 engine/runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 engine/runconfig/fixtures/windows/container_config_1_19.json create mode 100644 engine/runconfig/hostconfig.go create mode 100644 engine/runconfig/hostconfig_test.go create mode 100644 engine/runconfig/hostconfig_unix.go create mode 100644 engine/runconfig/hostconfig_windows.go create mode 100644 engine/runconfig/hostconfig_windows_test.go create mode 100644 engine/runconfig/opts/parse.go create mode 100644 engine/vendor.conf create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/LICENSE create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/README.md create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/message.go create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/reader.go create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/udpwriter.go create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/utils.go create mode 100644 engine/vendor/github.com/Graylog2/go-gelf/gelf/writer.go create mode 100644 engine/vendor/github.com/Nvveen/Gotty/LICENSE create mode 100644 engine/vendor/github.com/Nvveen/Gotty/README create mode 100644 engine/vendor/github.com/Nvveen/Gotty/attributes.go create mode 100644 engine/vendor/github.com/Nvveen/Gotty/gotty.go create mode 100644 engine/vendor/github.com/Nvveen/Gotty/parser.go create mode 100644 engine/vendor/github.com/Nvveen/Gotty/types.go create mode 100644 engine/vendor/github.com/containerd/continuity/LICENSE create mode 100644 engine/vendor/github.com/containerd/continuity/README.md create mode 100644 engine/vendor/github.com/containerd/continuity/devices/devices.go create mode 100644 engine/vendor/github.com/containerd/continuity/devices/devices_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/devices/devices_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/driver/driver.go create mode 100644 engine/vendor/github.com/containerd/continuity/driver/driver_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/driver/driver_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/driver/utils.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/copy.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/copy_linux.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/copy_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/copy_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/diff.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/diff_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/diff_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/dtype_linux.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/du.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/du_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/du_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/hardlink.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/hardlink_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/hardlink_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/path.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/stat_bsd.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/stat_linux.go create mode 100644 engine/vendor/github.com/containerd/continuity/fs/time.go create mode 100644 engine/vendor/github.com/containerd/continuity/pathdriver/path_driver.go create mode 100644 engine/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/asm.s create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_linux.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/file_posix.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/nodata_linux.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/nodata_unix.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/sys.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_linux.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go create mode 100644 engine/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go create mode 100644 engine/vendor/github.com/containerd/continuity/vendor.conf create mode 100644 engine/vendor/github.com/containerd/go-runc/LICENSE create mode 100644 engine/vendor/github.com/containerd/go-runc/README.md create mode 100644 engine/vendor/github.com/containerd/go-runc/command_linux.go create mode 100644 engine/vendor/github.com/containerd/go-runc/command_other.go create mode 100644 engine/vendor/github.com/containerd/go-runc/console.go create mode 100644 engine/vendor/github.com/containerd/go-runc/container.go create mode 100644 engine/vendor/github.com/containerd/go-runc/events.go create mode 100644 engine/vendor/github.com/containerd/go-runc/io.go create mode 100644 engine/vendor/github.com/containerd/go-runc/monitor.go create mode 100644 engine/vendor/github.com/containerd/go-runc/runc.go create mode 100644 engine/vendor/github.com/containerd/go-runc/utils.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/LICENSE create mode 100644 engine/vendor/github.com/containerd/ttrpc/README.md create mode 100644 engine/vendor/github.com/containerd/ttrpc/channel.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/client.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/codec.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/config.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/handshake.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/server.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/services.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/types.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/unixcreds_linux.go create mode 100644 engine/vendor/github.com/fernet/fernet-go/License create mode 100644 engine/vendor/github.com/fernet/fernet-go/Readme create mode 100644 engine/vendor/github.com/fernet/fernet-go/fernet.go create mode 100644 engine/vendor/github.com/fernet/fernet-go/key.go create mode 100644 engine/vendor/github.com/golang/gddo/LICENSE create mode 100644 engine/vendor/github.com/golang/gddo/README.markdown create mode 100644 engine/vendor/github.com/golang/gddo/httputil/buster.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/header/header.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/httputil.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/negotiate.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/respbuf.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/static.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/transport.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/README.rst create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/README.md create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/command_line.proto create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/store.proto create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go create mode 100644 engine/vendor/github.com/ishidawataru/sctp/LICENSE create mode 100644 engine/vendor/github.com/ishidawataru/sctp/README.md create mode 100644 engine/vendor/github.com/ishidawataru/sctp/sctp.go create mode 100644 engine/vendor/github.com/ishidawataru/sctp/sctp_linux.go create mode 100644 engine/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go create mode 100644 engine/vendor/github.com/moby/buildkit/LICENSE create mode 100644 engine/vendor/github.com/moby/buildkit/README.md create mode 100644 engine/vendor/github.com/moby/buildkit/api/services/control/control.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/services/control/control.proto create mode 100644 engine/vendor/github.com/moby/buildkit/api/services/control/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/types/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/types/worker.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/types/worker.proto create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/fsutil.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/gc.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/metadata.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/metadata/metadata.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/refs.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/export.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/import.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/client.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/client_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/client_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/diskusage.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/exporters.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/graph.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/exec.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/marshal.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/meta.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/resolver.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/source.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/state.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/prune.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/solve.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/workers.go create mode 100644 engine/vendor/github.com/moby/buildkit/control/control.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/executor.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/hosts.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/mounts.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/user.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go create mode 100644 engine/vendor/github.com/moby/buildkit/exporter/exporter.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/frontend.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/identity/randomid.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/auth.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/auth.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/auth.proto create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/context.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/filesync.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/filesync.proto create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/grpc.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/grpchijack/dial.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/session.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/localmounter.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/snapshotter.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/boltdbcachestorage/storage.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/cachekey.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/cachemanager.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/cachestorage.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/combinedcache.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/edge.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/exporter.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/index.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/jobs.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/memorycachestorage.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/attr.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/const.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/ops.proto create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/platform.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/progress.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/scheduler.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/types.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/git/gitsource.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/gitidentifier.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/http/httpsource.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/identifier.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/local/local.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/caps.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/cond/cond.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/buffer.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/copy.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/pusher.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/imageutil/config.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/logs/logs.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/multireader.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/multiwriter.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/progress.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/path_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/path_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/tracing/tracing.go create mode 100644 engine/vendor/github.com/moby/buildkit/vendor.conf create mode 100644 engine/vendor/github.com/moby/buildkit/worker/filter.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/worker.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/workercontroller.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/LICENSE create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diff.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diff_containerd.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diskwriter.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/followlinks.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/generate.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/hardlinks.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/readme.md create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/receive.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/send.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/stat.pb.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/stat.proto create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/validator.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/walker.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/walker_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/walker_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/wire.pb.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/wire.proto create mode 100644 engine/volume/drivers/adapter.go create mode 100644 engine/volume/drivers/extpoint.go create mode 100644 engine/volume/drivers/extpoint_test.go create mode 100644 engine/volume/drivers/proxy.go create mode 100644 engine/volume/drivers/proxy_test.go create mode 100644 engine/volume/local/local.go create mode 100644 engine/volume/local/local_test.go create mode 100644 engine/volume/local/local_unix.go create mode 100644 engine/volume/local/local_windows.go create mode 100644 engine/volume/mounts/lcow_parser.go create mode 100644 engine/volume/mounts/linux_parser.go create mode 100644 engine/volume/mounts/mounts.go create mode 100644 engine/volume/mounts/parser.go create mode 100644 engine/volume/mounts/parser_test.go create mode 100644 engine/volume/mounts/validate.go create mode 100644 engine/volume/mounts/validate_test.go create mode 100644 engine/volume/mounts/validate_unix_test.go create mode 100644 engine/volume/mounts/validate_windows_test.go create mode 100644 engine/volume/mounts/volume_copy.go create mode 100644 engine/volume/mounts/volume_unix.go create mode 100644 engine/volume/mounts/volume_windows.go create mode 100644 engine/volume/mounts/windows_parser.go create mode 100644 engine/volume/service/by.go create mode 100644 engine/volume/service/convert.go create mode 100644 engine/volume/service/db.go create mode 100644 engine/volume/service/db_test.go create mode 100644 engine/volume/service/default_driver.go create mode 100644 engine/volume/service/default_driver_stubs.go create mode 100644 engine/volume/service/errors.go create mode 100644 engine/volume/service/opts/opts.go create mode 100644 engine/volume/service/restore.go create mode 100644 engine/volume/service/restore_test.go create mode 100644 engine/volume/service/service.go create mode 100644 engine/volume/service/service_linux_test.go create mode 100644 engine/volume/service/service_test.go create mode 100644 engine/volume/service/store.go create mode 100644 engine/volume/service/store_test.go create mode 100644 engine/volume/service/store_unix.go create mode 100644 engine/volume/service/store_windows.go create mode 100644 engine/volume/testutils/testutils.go create mode 100644 engine/volume/volume.go diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..6f285a85 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,203 @@ +# Changelog + +For more information on the list of deprecated flags and APIs, have a look at +https://docs.docker.com/engine/deprecated/ where you can find the target removal dates + +## 18.06.1-ce (2018-08-21) + +### Builder + +- Fix no error if build args are missing during docker build. [docker/engine#25](https://github.com/docker/engine/pull/25) ++ Set BuildKit's ExportedProduct variable to show useful errors. [docker/engine#21](https://github.com/docker/engine/pull/21) + +### Client + ++ Various shell completion script updates. [docker/cli#1229](https://github.com/docker/cli/pull/1229) [docker/cli#1268](https://github.com/docker/cli/pull/1268) [docker/cli#1272](https://github.com/docker/cli/pull/1272) +- Fix `DOCKER_CONFIG` warning message and fallback search. [docker/cli#1241](https://github.com/docker/cli/pull/1241) +- Fix help message flags on `docker stack` commands and sub-commands. [docker/cli#1267](https://github.com/docker/cli/pull/1267) + +### Runtime + +* Disable CRI plugin listening on port 10010 by default. [docker/engine#29](https://github.com/docker/engine/pull/29) +* Update containerd to v1.1.2. [docker/engine#33](https://github.com/docker/engine/pull/33) +- Windows: Do not invoke HCS shutdown if terminate called. [docker/engine#31](https://github.com/docker/engine/pull/31) +* Windows: Select polling-based watcher for Windows log watcher. [docker/engine#34](https://github.com/docker/engine/pull/34) + +### Swarm Mode + +- Fix the condition used for skipping over running tasks. [docker/swarmkit#2677](https://github.com/docker/swarmkit/pull/2677) +- Fix task sorting. [docker/swarmkit#2712](https://github.com/docker/swarmkit/pull/2712) + +## 18.06.0-ce (2018-07-18) + +### Important notes about this release + +- Docker 18.06 CE will be the last release with a 4-month maintenance lifecycle. The planned Docker 18.09 CE release will be supported for 7 months with Docker 19.03 CE being the next release in line. More details about the release process can be found [here](https://docs.docker.com/install/). + +### Builder + +* Builder: fix layer leak on multi-stage wildcard copy. [moby/moby#37178](https://github.com/moby/moby/pull/37178) +* Fix parsing of invalid environment variable substitution . [moby/moby#37134](https://github.com/moby/moby/pull/37134) +* Builder: use the arch info from base image. [moby/moby#36816](https://github.com/moby/moby/pull/36816) [moby/moby#37197](https://github.com/moby/moby/pull/37197) ++ New experimental builder backend based on [BuildKit](https://github.com/moby/buildkit). To enable, run daemon in experimental mode and set `DOCKER_BUILDKIT=1` environment variable on the docker CLI. [moby/moby#37151](https://github.com/moby/moby/pull/37151) [docker/cli#1111](https://github.com/docker/cli/pull/1111) +- Fix handling uppercase targets names in multi-stage builds. [moby/moby#36960](https://github.com/moby/moby/pull/36960) + +### Client + +* Bump spf13/cobra to v0.0.3, pflag to v1.0.1. [moby/moby#37106](https://github.com/moby/moby/pull/37106) +* Add support for the new Stack API for Kubernetes v1beta2. [docker/cli#899](https://github.com/docker/cli/pull/899) +* K8s: more robust stack error detection on deploy. [docker/cli#948](https://github.com/docker/cli/pull/948) +* Support for rollback config in compose 3.7. [docker/cli#409](https://github.com/docker/cli/pull/409) +* Update Cobra and pflag, and use built-in --version feature. [docker/cli#1069](https://github.com/docker/cli/pull/1069) +* Fix `docker stack deploy --prune` with empty name removing all services. [docker/cli#1088](https://github.com/docker/cli/pull/1088) +* [Kubernetes] stack services filters. [docker/cli#1023](https://github.com/docker/cli/pull/1023) ++ Only show orchestrator flag in root, stack and version commands in help. [docker/cli#1106](https://github.com/docker/cli/pull/1106) ++ Add an `Extras` field on the compose config types. [docker/cli#1126](https://github.com/docker/cli/pull/1126) ++ Add options to the compose loader. [docker/cli#1128](https://github.com/docker/cli/pull/1128) +- Fix always listing nodes in docker stack ps command on Kubernetes. [docker/cli#1093](https://github.com/docker/cli/pull/1093) +- Fix output being shown twice on stack rm error message. [docker/cli#1093](https://github.com/docker/cli/pull/1093) +* Extend client API with custom HTTP requests. [moby/moby#37071](https://github.com/moby/moby/pull/37071) +* Changed error message for unreadable files to clarify possibility of a .Dockerignore entry. [docker/cli#1053](https://github.com/docker/cli/pull/1053) +* Restrict kubernetes.allNamespaces value to 'enabled' or 'disabled' in configuration file. [docker/cli#1087](https://github.com/docker/cli/pull/1087) +* Check errors when initializing the docker client in the help command. [docker/cli#1119](https://github.com/docker/cli/pull/1119) +* Better namespace experience with Kubernetes. Fix using namespace defined in ~/.kube/config for stack commands. Add a NAMESPACE column for docker stack ls command. Add a --all-namespaces flag for docker stack ls command. [docker/cli#991](https://github.com/docker/cli/pull/991) +* Export Push and Save. [docker/cli#1123](https://github.com/docker/cli/pull/1123) +* Export pull as a public function. [docker/cli#1026](https://github.com/docker/cli/pull/1026) +* Remove Kubernetes commands from experimental. [docker/cli#1068](https://github.com/docker/cli/pull/1068) +* Adding configs/secrets to service inspect pretty. [docker/cli#1006](https://github.com/docker/cli/pull/1006) +- Fix service filtering by name on Kubernetes. [docker/cli#1101](https://github.com/docker/cli/pull/1101) +- Fix component information alignment in `docker version`. [docker/cli#1065](https://github.com/docker/cli/pull/1065) +- Fix cpu/memory limits and reservations being reset on service update. [docker/cli#1079](https://github.com/docker/cli/pull/1079) +* Manifest list: request specific permissions. [docker/cli#1024](https://github.com/docker/cli/pull/1024) +* Setting --orchestrator=all also sets --all-namespaces unless specific --namespace are set. [docker/cli#1059](https://github.com/docker/cli/pull/1059) +- Fix panics when --compress and --stream are used together. [docker/cli#1105](https://github.com/docker/cli/pull/1105) +* Switch from x/net/context to context. [docker/cli#1038](https://github.com/docker/cli/pull/1038) ++ Add --init option to `docker service create`. [docker/cli#479](https://github.com/docker/cli/pull/479) ++ Fixed bug displaying garbage output for build command when --stream and --quiet flags combined. [docker/cli#1090](https://github.com/docker/cli/pull/1090) ++ Add `init` support in 3.7 schema. [docker/cli#1129](https://github.com/docker/cli/pull/1129) +- Fix docker trust signer removal. [docker/cli#1112](https://github.com/docker/cli/pull/1112) +- Fix error message from docker inspect. [docker/cli#1071](https://github.com/docker/cli/pull/1071) +* Allow `x-*` extension on 3rd level objects. [docker/cli#1097](https://github.com/docker/cli/pull/1097) +* An invalid orchestrator now generates an error instead of being silently ignored. [docker/cli#1055](https://github.com/docker/cli/pull/1055) +* Added ORCHESTRATOR column to docker stack ls command. [docker/cli#973](https://github.com/docker/cli/pull/973) +* Warn when using host-ip for published ports for services. [docker/cli#1017](https://github.com/docker/cli/pull/1017) ++ Added the option to enable experimental cli features through the `DOCKER_CLI_EXPERIMENTAL` environment variable. [docker/cli#1138](https://github.com/docker/cli/pull/1138) ++ Add exec_die to the list of known container events. [docker/cli#1028](https://github.com/docker/cli/pull/1028) +* [K8s] Do env-variable expansion on the uninterpreted Config files. [docker/cli#974](https://github.com/docker/cli/pull/974) ++ Print warnings on stderr for each unsupported features while parsing a compose file for deployment on Kubernetes. [docker/cli#903](https://github.com/docker/cli/pull/903) ++ Added description about pids count. [docker/cli#1045](https://github.com/docker/cli/pull/1045) +- Warn user of filter when pruning. [docker/cli#1043](https://github.com/docker/cli/pull/1043) +- Fix `--rollback-*` options overwriting `--update-*` options. [docker/cli#1052](https://github.com/docker/cli/pull/1052) +* Update Attach, Build, Commit, Cp, Create subcommand fish completions. [docker/cli#1005](https://github.com/docker/cli/pull/1005) ++ Add bash completion for `dockerd --default-address-pool`. [docker/cli#1173](https://github.com/docker/cli/pull/1173) ++ Add bash completion for `exec_die` event. [docker/cli#1173](https://github.com/docker/cli/pull/1173) +* Update docker-credential-helper so `pass` is not called on every docker command. [docker/cli#1184](https://github.com/docker/cli/pull/1184) +* Fix for rotating swarm external CA. [docker/cli#1199](https://github.com/docker/cli/pull/1199) +* Improve version output alignment. [docker/cli#1207](https://github.com/docker/cli/pull/1207) ++ Add bash completion for `service create|update --init`. [docker/cli#1210](https://github.com/docker/cli/pull/1210) + +### Deprecation + +* Document reserved namespaces deprecation. [docker/cli#1040](https://github.com/docker/cli/pull/1040) + +### Logging + +* Allow awslogs to use non-blocking mode. [moby/moby#36522](https://github.com/moby/moby/pull/36522) +* Improve logging of long log lines on fluentd log driver.. [moby/moby#36159](https://github.com/moby/moby/pull/36159) +* Re-order CHANGELOG.md to pass `make validate` test. [moby/moby#37047](https://github.com/moby/moby/pull/37047) +* Update Events, Exec, Export, History, Images, Import, Inspect, Load, and Login subcommand fish completions. [docker/cli#1061](https://github.com/docker/cli/pull/1061) +* Update documentation for RingLogger's ring buffer. [moby/moby#37084](https://github.com/moby/moby/pull/37084) ++ Add metrics for log failures/partials. [moby/moby#37034](https://github.com/moby/moby/pull/37034) +- Fix logging plugin crash unrecoverable. [moby/moby#37028](https://github.com/moby/moby/pull/37028) +- Fix logging test type. [moby/moby#37070](https://github.com/moby/moby/pull/37070) +- Fix race conditions in logs API. [moby/moby#37062](https://github.com/moby/moby/pull/37062) +- Fix some issues in logfile reader and rotation. [moby/moby#37063](https://github.com/moby/moby/pull/37063) + +### Networking + +* Allow user to specify default address pools for docker networks. [moby/moby#36396](https://github.com/moby/moby/pull/36396) [docker/cli#818](https://github.com/docker/cli/pull/818) +* Adding logs for ipam state [doccker/libnetwork#2417](https://github.com/docker/libnetwork/pull/2147) +* Fix race conditions in the overlay network driver [doccker/libnetwork#2143](https://github.com/docker/libnetwork/pull/2143) +* Add wait time into xtables lock warning [doccker/libnetwork#2142](https://github.com/docker/libnetwork/pull/2142) +* filter xtables lock warnings when firewalld is active [doccker/libnetwork#2135](https://github.com/docker/libnetwork/pull/2135) +* Switch from x/net/context to context [doccker/libnetwork#2140](https://github.com/docker/libnetwork/pull/2140) +* Adding a recovery mechanism for a split gossip cluster [doccker/libnetwork#2134](https://github.com/docker/libnetwork/pull/2134) +* Running docker inspect on network attachment tasks now returns a full task object. [moby/moby#35246](https://github.com/moby/moby/pull/35246) +* Some container/network cleanups. [moby/moby#37033](https://github.com/moby/moby/pull/37033) +- Fix network inspect for overlay network. [moby/moby#37045](https://github.com/moby/moby/pull/37045) +* Improve Scalability of the Linux load balancing. [docker/engine#16](https://github.com/docker/engine/pull/16) +* Change log level from error to warning. [docker/engine#19](https://github.com/docker/engine/pull/19) + +### Runtime + +* Aufs: log why aufs is not supported. [moby/moby#36995](https://github.com/moby/moby/pull/36995) +* Hide experimental checkpoint features on Windows. [docker/cli#1094](https://github.com/docker/cli/pull/1094) +* Lcow: Allow the client to customize capabilities and device cgroup rules for LCOW containers. [moby/moby#37294](https://github.com/moby/moby/pull/37294) +* Changed path given for executable output in windows to actual location of executable output. [moby/moby#37295](https://github.com/moby/moby/pull/37295) ++ Add windows recycle bin test and update hcsshim to v0.6.11. [moby/moby#36994](https://github.com/moby/moby/pull/36994) +* Allow to add any args when doing a make run. [moby/moby#37190](https://github.com/moby/moby/pull/37190) +* Optimize ContainerTop() aka docker top. [moby/moby#37131](https://github.com/moby/moby/pull/37131) +- Fix compilation on 32bit machines. [moby/moby#37292](https://github.com/moby/moby/pull/37292) +* Update API version to v1 38. [moby/moby#37141](https://github.com/moby/moby/pull/37141) +- Fix `docker service update --host-add` does not update existing host entry. [docker/cli#1054](https://github.com/docker/cli/pull/1054) +- Fix swagger file type for ExecIds. [moby/moby#36962](https://github.com/moby/moby/pull/36962) +- Fix swagger volume type generation. [moby/moby#37060](https://github.com/moby/moby/pull/37060) +- Fix wrong assertion in volume/service package. [moby/moby#37211](https://github.com/moby/moby/pull/37211) +- Fix daemon panic on restart when a plugin is running. [moby/moby#37234](https://github.com/moby/moby/pull/37234) +* Construct and add 'LABEL' command from 'label' option to last stage. [moby/moby#37011](https://github.com/moby/moby/pull/37011) +- Fix race condition between exec start and resize.. [moby/moby#37172](https://github.com/moby/moby/pull/37172) +* Alternative failure mitigation of `TestExecInteractiveStdinClose`. [moby/moby#37143](https://github.com/moby/moby/pull/37143) +* RawAccess allows a set of paths to be not set as masked or readonly. [moby/moby#36644](https://github.com/moby/moby/pull/36644) +* Be explicit about github.com prefix being a legacy feature. [moby/moby#37174](https://github.com/moby/moby/pull/37174) +* Bump Golang to 1.10.3. [docker/cli#1122](https://github.com/docker/cli/pull/1122) +* Close ReadClosers to prevent xz zombies. [moby/moby#34218](https://github.com/moby/moby/pull/34218) +* Daemon.ContainerStop(): fix for a negative timeout. [moby/moby#36874](https://github.com/moby/moby/pull/36874) +* Daemon.setMounts(): copy slice in place. [moby/moby#36991](https://github.com/moby/moby/pull/36991) +* Describe IP field of swagger Port definition. [moby/moby#36971](https://github.com/moby/moby/pull/36971) +* Extract volume interaction to a volumes service. [moby/moby#36688](https://github.com/moby/moby/pull/36688) +* Fixed markdown formatting in docker image v1, v1.1, and v1.2 spec. [moby/moby#37051](https://github.com/moby/moby/pull/37051) +* Improve GetTimestamp parsing. [moby/moby#35402](https://github.com/moby/moby/pull/35402) +* Jsonmessage: pass message to aux callback. [moby/moby#37064](https://github.com/moby/moby/pull/37064) +* Overlay2: remove unused cdMountFrom() helper function. [moby/moby#37041](https://github.com/moby/moby/pull/37041) +- Overlay: Fix overlay storage-driver silently ignoring unknown storage-driver options. [moby/moby#37040](https://github.com/moby/moby/pull/37040) +* Remove some unused contrib items. [moby/moby#36977](https://github.com/moby/moby/pull/36977) +* Restartmanager: do not apply restart policy on created containers. [moby/moby#36924](https://github.com/moby/moby/pull/36924) +* Set item-type for ExecIDs. [moby/moby#37121](https://github.com/moby/moby/pull/37121) +* Use go-systemd const instead of magic string in Linux version of dockerd. [moby/moby#37136](https://github.com/moby/moby/pull/37136) +* Use stdlib TLS dialer. [moby/moby#36687](https://github.com/moby/moby/pull/36687) +* Warn when an engine label using a reserved namespace (com.docker.\*, io.docker.\*, or org.dockerproject.\*) is configured, as per https://docs.docker.com/config/labels-custom-metadata/. [moby/moby#36921](https://github.com/moby/moby/pull/36921) +- Fix missing plugin name in message. [moby/moby#37052](https://github.com/moby/moby/pull/37052) +- Fix link anchors in CONTRIBUTING.md. [moby/moby#37276](https://github.com/moby/moby/pull/37276) +- Fix link to Docker Toolbox. [moby/moby#37240](https://github.com/moby/moby/pull/37240) +- Fix mis-used skip condition. [moby/moby#37179](https://github.com/moby/moby/pull/37179) +- Fix bind mounts not working in some cases. [moby/moby#37031](https://github.com/moby/moby/pull/37031) +- Fix fd leak on attach. [moby/moby#37184](https://github.com/moby/moby/pull/37184) +- Fix fluentd partial detection. [moby/moby#37029](https://github.com/moby/moby/pull/37029) +- Fix incorrect link in version-history.md. [moby/moby#37049](https://github.com/moby/moby/pull/37049) +* Allow vim to be case insensitive for D in dockerfile. [moby/moby#37235](https://github.com/moby/moby/pull/37235) ++ Add `t.Name()` to tests so that service names are unique. [moby/moby#37166](https://github.com/moby/moby/pull/37166) ++ Add additional message when backendfs is extfs without d_type support. [moby/moby#37022](https://github.com/moby/moby/pull/37022) ++ Add api version checking for tests from new feature. [moby/moby#37169](https://github.com/moby/moby/pull/37169) ++ Add image metrics for push and pull. [moby/moby#37233](https://github.com/moby/moby/pull/37233) ++ Add support for `init` on services. [moby/moby#37183](https://github.com/moby/moby/pull/37183) ++ Add verification of escapeKeys array length in pkg/term/proxy.go. [moby/moby#36918](https://github.com/moby/moby/pull/36918) +* When link id is empty for overlay2, do not remove this link.. [moby/moby#36161](https://github.com/moby/moby/pull/36161) +- Fix build on OpenBSD by defining Self(). [moby/moby#37301](https://github.com/moby/moby/pull/37301) +- Windows: Fix named pipe support for hyper-v isolated containers. [docker/engine#2](https://github.com/docker/engine/pull/2) [docker/cli#1165](https://github.com/docker/cli/pull/1165) +- Fix manifest lists to always use correct size. [docker/cli#1183](https://github.com/docker/cli/pull/1183) +* Register OCI media types. [docker/engine#4](https://github.com/docker/engine/pull/4) +* Update containerd to v1.1.1 [docker/engine#17](https://github.com/docker/engine/pull/17) +* LCOW: Prefer Windows over Linux in a manifest list. [docker/engine#3](https://github.com/docker/engine/pull/3) +* Add updated `MaskPaths` that are used in code paths directly using containerd to address [CVE-2018-10892](https://cve.mitre.org/cgi-bin/cvename.cgi?name=2018-10892). [docker/engine#15](https://github.com/docker/engine/pull/15) +* Add `/proc/acpi` to masked paths to address [CVE-2018-10892](https://cve.mitre.org/cgi-bin/cvename.cgi?name=2018-10892). [docker/engine#14](https://github.com/docker/engine/pull/14) +- Fix bindmount autocreate race. [docker/engine#11](https://github.com/docker/engine/pull/11) + +### Swarm Mode + +* List stacks for both Swarm and Kubernetes with --orchestrator=all in docker stack ls. Allow several occurrences of --namespace for Kubernetes with docker stack ls. [docker/cli#1031](https://github.com/docker/cli/pull/1031) +* Bump SwarmKit to remove deprecated grpc metadata wrappers. [moby/moby#36905](https://github.com/moby/moby/pull/36905) +* Issue an error for --orchestrator=all when working on mismatched Swarm and Kubernetes hosts. [docker/cli#1035](https://github.com/docker/cli/pull/1035) +- Fix broken swarm commands with Kubernetes defined as orchestrator. "--orchestrator" flag is no longer global but local to stack commands and subcommands [docker/cli#1137](https://github.com/docker/cli/pull/1137) [docker/cli#1139](https://github.com/docker/cli/pull/1139) +* Bump swarmkit to include task reaper fixes and more metrics. [docker/engine#13](https://github.com/docker/engine/pull/13) +- Avoid a leak when a service with unassigned tasks is deleted. [docker/engine#27](https://github.com/docker/engine/pull/27) +- Fix racy batching on the dispatcher. [docker/engine#27](https://github.com/docker/engine/pull/27) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..737db485 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,126 @@ +# Contributing to Docker CE + +Want to contribute on Docker CE? Awesome! + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Reporting Issues](#reporting-other-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + +## Reporting other issues + +There are separate issue-tracking repos for the end user Docker CE +products specialized for a platform. Find your issue or file a new issue +for the platform you are using: + +* https://github.com/docker/for-linux +* https://github.com/docker/for-mac +* https://github.com/docker/for-win +* https://github.com/docker/for-aws +* https://github.com/docker/for-azure + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +If presented with a template when creating an issue, please follow its directions. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Submitting pull requests + +Please see the corresponding `CONTRIBUTING.md` file of each component for more information: + +* Changes to the `engine` should be directed upstream to https://github.com/moby/moby +* Changes to the `cli` should be directed upstream to https://github.com/docker/cli +* Changes to the `packaging` should be directed upstream to https://github.com/docker/docker-ce-packaging + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members. + Regional, racial, gender, or other abuse will not be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..8c30ea60 --- /dev/null +++ b/Makefile @@ -0,0 +1,56 @@ +CLI_DIR:=$(CURDIR)/components/cli +ENGINE_DIR:=$(CURDIR)/components/engine +PACKAGING_DIR:=$(CURDIR)/components/packaging +MOBY_COMPONENTS_SHA=ab7c118272b02d8672dc0255561d0c4015979780 +MOBY_COMPONENTS_URL=https://raw.githubusercontent.com/docker/moby-extras/$(MOBY_COMPONENTS_SHA)/cmd/moby-components +MOBY_COMPONENTS=.helpers/moby-components-$(MOBY_COMPONENTS_SHA) +VERSION=$(shell cat VERSION) + +.PHONY: help +help: ## show make targets + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +.PHONY: test-integration-cli +test-integration-cli: $(CLI_DIR)/build/docker ## test integration of cli and engine + $(MAKE) -C $(ENGINE_DIR) DOCKER_CLI_PATH=$< test-integration-cli + +$(CLI_DIR)/build/docker: + $(MAKE) -C $(CLI_DIR) -f docker.Makefile build + +.PHONY: deb +deb: ## build deb packages + $(MAKE) VERSION=$(VERSION) CLI_DIR=$(CLI_DIR) ENGINE_DIR=$(ENGINE_DIR) -C $(PACKAGING_DIR) deb + +.PHONY: rpm +rpm: ## build rpm packages + $(MAKE) VERSION=$(VERSION) CLI_DIR=$(CLI_DIR) ENGINE_DIR=$(ENGINE_DIR) -C $(PACKAGING_DIR) rpm + +.PHONY: static +static: ## build static packages + $(MAKE) VERSION=$(VERSION) CLI_DIR=$(CLI_DIR) ENGINE_DIR=$(ENGINE_DIR) -C $(PACKAGING_DIR) static + +.PHONY: clean +clean: ## clean the build artifacts + -$(MAKE) -C $(CLI_DIR) clean + -$(MAKE) -C $(ENGINE_DIR) clean + -$(MAKE) -C $(PACKAGING_DIR) clean + +$(MOBY_COMPONENTS): + mkdir -p .helpers + curl -fsSL $(MOBY_COMPONENTS_URL) > $(MOBY_COMPONENTS) + chmod +x $(MOBY_COMPONENTS) + +.PHONY: update-components +update-components: update-components-cli update-components-engine update-components-packaging ## udpate components using moby extra tool + +.PHONY: update-components-cli +update-components-cli: $(MOBY_COMPONENTS) + $(MOBY_COMPONENTS) update cli + +.PHONY: update-components-engine +update-components-engine: $(MOBY_COMPONENTS) + $(MOBY_COMPONENTS) update engine + +.PHONY: update-components-packaging +update-components-packaging: $(MOBY_COMPONENTS) + $(MOBY_COMPONENTS) update packaging diff --git a/README.md b/README.md new file mode 100644 index 00000000..18cbd391 --- /dev/null +++ b/README.md @@ -0,0 +1,94 @@ +# Docker CE + +This repository hosts open source components of Docker CE products. The +`master` branch serves to unify the upstream components on a regular +basis. Long-lived release branches host the code that goes into a product +version for the lifetime of the product. + +This repository is solely maintained by Docker, Inc. + +## Issues + +There are separate issue-tracking repos for the end user Docker CE +products specialized for a platform. Find your issue or file a new issue +for the platform you are using: + +* https://github.com/docker/for-linux +* https://github.com/docker/for-mac +* https://github.com/docker/for-win +* https://github.com/docker/for-aws +* https://github.com/docker/for-azure + +## Unifying upstream sources + +The `master` branch is a combination of components adapted from +different upstream git repos into a unified directory structure using the +[moby-components](https://github.com/shykes/moby-extras/blob/master/cmd/moby-components) +tool. + +You can view the upstream git repos in the +[components.conf](components.conf) file. Each component is isolated into +its own directory under the [components](components) directory. + +The tool will import each component git history within the appropriate path. + +For example, this shows a commit +is imported into the component `engine` from +[moby/moby@a27b4b8](https://github.com/moby/moby/commit/a27b4b8cb8e838d03a99b6d2b30f76bdaf2f9e5d) +into the `components/engine` directory. + +``` +commit 5c70746915d4589a692cbe50a43cf619ed0b7152 +Author: Andrea Luzzardi +Date: Sat Jan 19 00:13:39 2013 + + Initial commit + Upstream-commit: a27b4b8cb8e838d03a99b6d2b30f76bdaf2f9e5d + Component: engine + + components/engine/container.go | 203 ++++++++++++++++++++++++++++... + components/engine/container_test.go | 186 ++++++++++++++++++++++++++++... + components/engine/docker.go | 112 ++++++++++++++++++++++++++++... + components/engine/docker_test.go | 175 ++++++++++++++++++++++++++++... + components/engine/filesystem.go | 52 ++++++++++++++++++++++++++++... + components/engine/filesystem_test.go | 35 +++++++++++++++++++++++++++ + components/engine/lxc_template.go | 94 ++++++++++++++++++++++++++++... + components/engine/state.go | 48 ++++++++++++++++++++++++++++... + components/engine/utils.go | 115 ++++++++++++++++++++++++++++... + components/engine/utils_test.go | 126 ++++++++++++++++++++++++++++... + 10 files changed, 1146 insertions(+) +``` + +## Updates to `master` branch + +Main development of new features should be directed towards the upstream +git repos. The `master` branch of this repo will periodically pull in new +changes from upstream to provide a point for integration. + +## Branching for release + +When a release is started for Docker CE, a new branch will be created +from `master`. Branch names will be `YY.MM` to represent the time-based +release version of the product, e.g. `17.06`. + +## Adding fixes to release branch + +Note: every commit of a fix should affect files only within one component +directory. + +### Fix available upstream + +A PR cherry-picking the necessary commits should be created against +the release branch. If the the cherry-pick cannot be applied cleanly, +the logic of the fix should be ported manually. + +### No fix yet + +First create the PR with the fix for the release branch. Once the fix has +been merged, be sure to port the fix to the respective upstream git repo. + +## Release tags + +There will be a git tag for each release candidate (RC) and general +availablilty (GA) release. The tag will only point to commits on release +branches. diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..cf091aa3 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +18.06.1-ce diff --git a/cli/.dockerignore b/cli/.dockerignore new file mode 100644 index 00000000..b0e1ba59 --- /dev/null +++ b/cli/.dockerignore @@ -0,0 +1,2 @@ +.git +build \ No newline at end of file diff --git a/cli/.mailmap b/cli/.mailmap new file mode 100644 index 00000000..990fd0ea --- /dev/null +++ b/cli/.mailmap @@ -0,0 +1,477 @@ +# Generate AUTHORS: scripts/docs/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Aaron L. Xu +Abhinandan Prativadi +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +AJ Bowen +AJ Bowen +Akihiro Matsushima +Akihiro Suda +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Aleksandrs Fadins +Alessandro Boch +Alex Chen +Alex Ellis +Alexander Larsson +Alexander Morozov +Alexander Morozov +Alexandre Beslic +Alicia Lauerman +Allen Sun +Allen Sun +Andrew Weiss +Andrew Weiss +André Martins +Andy Rothfusz +Andy Smith +Ankush Agarwal +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Anuj Bahuguna +Anuj Bahuguna +Anusha Ragunathan +Arnaud Porterie +Arnaud Porterie +Arthur Gautier +Avi Miller +Ben Bonnefoy +Ben Golub +Ben Toews +Benoit Chesneau +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Bin Liu +Bin Liu +Bingshen Wang +Boaz Shuster +Brandon Philips +Brandon Philips +Brent Salisbury +Brian Goff +Brian Goff +Brian Goff +Chander Govindarajan +Chao Wang +Charles Hooper +Chen Chao +Chen Chuanliang +Chen Mingjie +Chen Qiu +Chen Qiu <21321229@zju.edu.cn> +Chris Dias +Chris McKinnel +Christopher Biscardi +Christopher Latham +Chun Chen +Corbin Coleman +Cristian Staretu +Cristian Staretu +Cristian Staretu +CUI Wei cuiwei13 +Daehyeok Mun +Daehyeok Mun +Daehyeok Mun +Dan Feldman +Daniel Dao +Daniel Dao +Daniel Garcia +Daniel Gasienica +Daniel Goosen +Daniel Grunwell +Daniel J Walsh +Daniel Mizyrycki +Daniel Mizyrycki +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Watkins +Danny Yates +Darren Shepherd +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David M. Karr +David Sheets +David Sissitka +David Williamson +Deshi Xiao +Deshi Xiao +Diego Siqueira +Diogo Monica +Dominik Honnef +Doug Davis +Doug Tangren +Elan Ruusamäe +Elan Ruusamäe +Eric G. Noriega +Eric Hanchrow +Eric Rosenberg +Erica Windisch +Erica Windisch +Erik Hollensbe +Erwin van der Koogh +Euan Kemp +Eugen Krizo +Evan Hazlett +Evelyn Xu +Evgeny Shmarnev +Faiz Khan +Felix Hupfeld +Felix Ruess +Feng Yan +Fengtu Wang +Francisco Carriedo +Frank Rosquin +Frederick F. Kautz IV +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao <1373319223@qq.com> +George Kontridze +Gerwim Feiken +Giampaolo Mancini +Gopikannan Venugopalsamy +Gou Rao +Greg Stephens +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Gurjeet Singh +Gustav Sinder +Günther Jungbluth +Hakan Özler +Hao Shu Wei +Hao Shu Wei +Harald Albers +Harold Cooper +Harry Zhang +Harry Zhang +Harry Zhang +Harry Zhang +Harshal Patil +Helen Xie +Hollie Teal +Hollie Teal +Hollie Teal +Hu Keping +Huu Nguyen +Hyzhou Zhy +Hyzhou Zhy <1187766782@qq.com> +Ilya Khlopotov +Jack Laxson +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jamie Hannaford +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Nickoloff +Jeroen Franse +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jim Galasyn +Jiuyue Ma +Joey Geiger +Joffrey F +Joffrey F +Joffrey F +Johan Euphrosine +John Harris +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Stephens +Jordan Arentsen +Jordan Jennings +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Soref +Josh Wilson +Joyce Jang +Julien Bordellier +Julien Bordellier +Justin Cormack +Justin Cormack +Justin Cormack +Justin Simonelis +Jérôme Petazzoni +Jérôme Petazzoni +Jérôme Petazzoni +K. Heller +Kai Qiang Wu (Kennan) +Kai Qiang Wu (Kennan) +Kamil Domański +Kamjar Gerami +Kat Samperi +Ken Cochrane +Ken Herner +Kenfe-Mickaël Laventure +Kevin Feyrer +Kevin Kern +Kevin Meredith +Kir Kolyshkin +Kir Kolyshkin +Kir Kolyshkin +Konrad Kleine +Konstantin Gribov +Konstantin Pelykh +Kotaro Yoshimatsu +Kunal Kushwaha +Lajos Papp +Lei Jitang +Lei Jitang +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Linus Heckemann +Linus Heckemann +Lokesh Mandvekar +Lorenzo Fontana +Louis Opter +Louis Opter +Luca Favatella +Luke Marsden +Lyn +Lynda O'Leary +Lynda O'Leary +Ma Müller +Madhan Raj Mookkandy +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mansi Nahar +Mansi Nahar +Marc Abramowitz +Marcelo Horacio Fortino +Marcus Linke +Marianna Tessel +Mark Oates +Markan Patel +Markus Kortlang +Martin Redmond +Martin Redmond +Mary Anthony +Mary Anthony +Mary Anthony moxiegirl +Mateusz Major +Matt Bentley +Matt Schurenko +Matt Williams +Matt Williams +Matthew Heon +Matthew Mosesohn +Matthew Mueller +Matthias Kühnle +Mauricio Garavaglia +Michael Crosby +Michael Crosby +Michael Crosby +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Spetsiotis +Michal Minář +Miguel Angel Alvarez Cabrerizo <30386061+doncicuto@users.noreply.github.com> +Miguel Angel Fernández +Mihai Borobocea +Mike Casas +Mike Goelzer +Milind Chawre +Misty Stanley-Jones +Mohit Soni +Moorthy RS +Moysés Borges +Moysés Borges +Nace Oroz +Nathan LeClaire +Nathan LeClaire +Neil Horman +Nick Russo +Nicolas Borboën +Nigel Poulton +Nik Nyby +Nolan Darilek +O.S. Tezer +O.S. Tezer +Oh Jinkyun +Ouyang Liduo +Patrick Stapleton +Paul Liljenberg +Pavel Tikhomirov +Pawel Konczalski +Peter Choi +Peter Dave Hello +Peter Hsu +Peter Jaffe +Peter Nagy +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Qiang Huang +Qiang Huang +Ray Tsang +Renaud Gaubert +Robert Terhaar +Roberto G. Hashioka +Roberto Muñoz Fernández +Roman Dudin +Ross Boucher +Runshen Zhu +Ryan Stelly +Sakeven Jiang +Sandeep Bansal +Sandeep Bansal +Sargun Dhillon +Sean Lee +Sebastiaan van Stijn +Sebastiaan van Stijn +Shaun Kaasten +Shawn Landden +Shengbo Song +Shengbo Song +Shih-Yuan Lee +Shishir Mahajan +Shukui Yang +Shuwei Hao +Shuwei Hao +Sidhartha Mani +Sjoerd Langkemper +Solomon Hykes +Solomon Hykes +Solomon Hykes +Soshi Katsuta +Soshi Katsuta +Sridhar Ratnakumar +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Stefan Berger +Stefan Berger +Stefan J. Wernli +Stefan S. +Stephen Day +Stephen Day +Stephen Day +Steve Desmond +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sun Jianbo +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sylvain Bellemare +Sylvain Bellemare +Tangi Colin +Tejesh Mehta +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens +Thomas Gazagnaire +Thomas Krzero +Thomas Léveil +Thomas Léveil +Tibor Vass +Tibor Vass +Tim Bart +Tim Bosse +Tim Ruffles +Tim Terhorst +Tim Zju <21651152@zju.edu.cn> +Timothy Hobbs +Toli Kuznets +Tom Barlow +Tom Sweeney +Tõnis Tiigi +Trishna Guha +Tristan Carel +Tristan Carel +Umesh Yadav +Umesh Yadav +Victor Lyuboslavsky +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Viktor Vojnovski +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Demeester +Vincent Demeester +Vishnu Kannan +Vladimir Rutsky +Walter Stanish +Wang Guoliang +Wang Jie +Wang Ping +Wang Xing +Wang Yuexiao +Wayne Chang +Wayne Song +Wei Wu cizixs +Wenjun Tang +Wewang Xiaorenfine +Will Weaver +Xianglin Gao +Xianlu Bird +Xiaoyu Zhang +Xuecong Liao +Yamasaki Masahide +Yao Zaiyong +Yassine Tijani +Yazhong Liu +Yestin Sun +Yi EungJun +Ying Li +Ying Li +Yong Tang +Yosef Fertel +Yu Changchun +Yu Chengxia +Yu Peng +Yu Peng +Zachary Jaffee +Zachary Jaffee +ZhangHang +Zhenkun Bi +Zhou Hao +Zhu Kunjia +Zou Yu + diff --git a/cli/AUTHORS b/cli/AUTHORS new file mode 100644 index 00000000..bc1edce7 --- /dev/null +++ b/cli/AUTHORS @@ -0,0 +1,648 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/docs/generate-authors.sh`. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Addam Hardy +Adolfo Ochagavía +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Aleksa Sarai +Alessandro Boch +Alex Mavrogiannis +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Andrea Luzzardi +Andreas Köhler +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Arash Deshmeh +Arnaud Porterie +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bonnefoy +Ben Firshman +Benjamin Boudreau +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Bradley Cicenas +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +ChaYoung You +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Jones +Christy Perez +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Corey Farrell +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Dan Cotora +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Lechner +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Don Kjer +Dong Chen +Doug Davis +Drew Erny +Ed Costello +Eli Uriegas +Eli Uriegas +Elias Faxö +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik St. Martin +Ethan Haynes +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Hupfeld +Felix Rabe +Flavio Crisciani +Florian Klein +Foysal Iqbal +Fred Lifton +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Gou Rao +Grant Reaber +Greg Pflaum +Guilhem Lettron +Guillaume J. Charmes +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +huqun +Huu Nguyen +Hyzhou Zhy +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Isabel Jimenez +Ivan Grcic +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Heiss +Jason Plum +Jay Kamat +Jean-Pierre Huynh +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jimmy Leger +Jimmy Song +jimmyxian +Joao Fernandes +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonh Wendell +Jordan Jennings +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Kyle Spiers +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lei Jitang +Lennie +Leo Gallucci +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marco Mariani +Marcus Martins +Marianna Tessel +Marius Sturm +Mark Oates +Martin Mosegaard Amdisen +Mary Anthony +Mason Malone +Mateusz Major +Matt Gucci +Matt Robenolt +Matthew Heon +Matthieu Hauglustaine +Max Shytikov +Maxime Petazzoni +Mei ChunTao +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Moorthy RS +Morgan Bauer +Moysés Borges +Mrunal Patel +muicoder +Muthukumar R +Máximo Cuadros +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +ohmystack +Olle Jonsson +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Weaver +Pavel Pospisil +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Edge +Peter Hsu +Peter Jaffe +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +pidster +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Rodolfo Ortiz +Rogelio Canedo +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sambuddha Basu +Samuel Karp +Santhosh Manohar +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +squeegels <1674195+squeegels@users.noreply.github.com> +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephen Day +Stephen Rust +Steve Durrheimer +Steven Burgess +Subhajit Ghosh +Sun Jianbo +Sungwon Han +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +Taylor Jones +Thatcher Peskens +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Umesh Yadav +Valentin Lorentz +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yunxiang Huang +zebrilee +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +徐俊杰 diff --git a/cli/CONTRIBUTING.md b/cli/CONTRIBUTING.md new file mode 100644 index 00000000..245d3a3a --- /dev/null +++ b/cli/CONTRIBUTING.md @@ -0,0 +1,365 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/cli/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/cli/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Community Slack + The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up with this link. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](README.md) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, make sure the test suite passes. Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in the pull request +description that close an issue. Including references automatically closes the issue +on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. \ No newline at end of file diff --git a/cli/Jenkinsfile b/cli/Jenkinsfile new file mode 100644 index 00000000..c5fd5055 --- /dev/null +++ b/cli/Jenkinsfile @@ -0,0 +1,12 @@ +wrappedNode(label: 'linux && x86_64', cleanWorkspace: true) { + timeout(time: 60, unit: 'MINUTES') { + stage "Git Checkout" + checkout scm + + stage "Run end-to-end test suite" + sh "docker version" + sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \ + IMAGE_TAG=clie2e${BUILD_NUMBER} \ + make -f docker.Makefile test-e2e" + } +} diff --git a/cli/LICENSE b/cli/LICENSE new file mode 100644 index 00000000..9c8e20ab --- /dev/null +++ b/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cli/MAINTAINERS b/cli/MAINTAINERS new file mode 100644 index 00000000..c0b64509 --- /dev/null +++ b/cli/MAINTAINERS @@ -0,0 +1,136 @@ +# Docker maintainers file +# +# This file describes who runs the docker/cli project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + people = [ + "aaronlehmann", + "albers", + "cpuguy83", + "dnephin", + "justincormack", + "silvin-lubecki", + "stevvooe", + "thajeztah", + "tibor", + "tonistiigi", + "vdemeester", + "vieux", + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "programmerq", + "thajeztah" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.albers] + Name = "Harald Albers" + Email = "github@albersweb.de" + GitHub = "albers" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + GitHub = "cpuguy83" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.silvin-lubecki] + Name = "Silvin Lubecki" + Email = "silvin.lubecki@docker.com" + GitHub = "silvin-lubecki" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stevvooe@gmail.com" + GitHub = "stevvooe" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + diff --git a/cli/Makefile b/cli/Makefile new file mode 100644 index 00000000..b56befa3 --- /dev/null +++ b/cli/Makefile @@ -0,0 +1,90 @@ +# +# github.com/docker/cli +# +all: binary + + +_:=$(shell ./scripts/warn-outside-container $(MAKECMDGOALS)) + +.PHONY: clean +clean: ## remove build artifacts + rm -rf ./build/* cli/winresources/rsrc_* ./man/man[1-9] docs/yaml/gen + +.PHONY: test-unit +test-unit: ## run unit test + ./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/') + +.PHONY: test +test: test-unit ## run tests + +.PHONY: test-coverage +test-coverage: ## run test coverage + ./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/') + +.PHONY: lint +lint: ## run all the lint tools + gometalinter --config gometalinter.json ./... + +.PHONY: binary +binary: ## build executable for Linux + @echo "WARNING: binary creates a Linux executable. Use cross for macOS or Windows." + ./scripts/build/binary + +.PHONY: cross +cross: ## build executable for macOS and Windows + ./scripts/build/cross + +.PHONY: binary-windows +binary-windows: ## build executable for Windows + ./scripts/build/windows + +.PHONY: binary-osx +binary-osx: ## build executable for macOS + ./scripts/build/osx + +.PHONY: dynbinary +dynbinary: ## build dynamically linked binary + ./scripts/build/dynbinary + +.PHONY: watch +watch: ## monitor file changes and run go test + ./scripts/test/watch + +vendor: vendor.conf ## check that vendor matches vendor.conf + rm -rf vendor + bash -c 'vndr |& grep -v -i clone' + scripts/validate/check-git-diff vendor + +.PHONY: authors +authors: ## generate AUTHORS file from git history + scripts/docs/generate-authors.sh + +.PHONY: manpages +manpages: ## generate man pages from go source and markdown + scripts/docs/generate-man.sh + +.PHONY: yamldocs +yamldocs: ## generate documentation YAML files consumed by docs repo + scripts/docs/generate-yaml.sh + +.PHONY: shellcheck +shellcheck: ## run shellcheck validation + scripts/validate/shellcheck + +.PHONY: help +help: ## print this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + + +cli/compose/schema/bindata.go: cli/compose/schema/data/*.json + go generate github.com/docker/cli/cli/compose/schema + +compose-jsonschema: cli/compose/schema/bindata.go + scripts/validate/check-git-diff cli/compose/schema/bindata.go + +.PHONY: ci-validate +ci-validate: + time make -B vendor + time make -B compose-jsonschema + time make manpages + time make yamldocs diff --git a/cli/NOTICE b/cli/NOTICE new file mode 100644 index 00000000..0c74e15b --- /dev/null +++ b/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/cli/README.md b/cli/README.md new file mode 100644 index 00000000..2964377e --- /dev/null +++ b/cli/README.md @@ -0,0 +1,69 @@ +[![build status](https://circleci.com/gh/docker/cli.svg?style=shield)](https://circleci.com/gh/docker/cli/tree/master) [![Build Status](https://jenkins.dockerproject.org/job/docker/job/cli/job/master/badge/icon)](https://jenkins.dockerproject.org/job/docker/job/cli/job/master/) + +docker/cli +========== + +This repository is the home of the cli used in the Docker CE and +Docker EE products. + +Development +=========== + +`docker/cli` is developed using Docker. + +Build a linux binary: + +``` +$ make -f docker.Makefile binary +``` + +Build binaries for all supported platforms: + +``` +$ make -f docker.Makefile cross +``` + +Run all linting: + +``` +$ make -f docker.Makefile lint +``` + +List all the available targets: + +``` +$ make help +``` + +### In-container development environment + +Start an interactive development environment: + +``` +$ make -f docker.Makefile shell +``` + +In the development environment you can run many tasks, including build binaries: + +``` +$ make binary +``` + +Legal +===== +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/cli/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +Licensing +========= +docker/cli is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. diff --git a/cli/TESTING.md b/cli/TESTING.md new file mode 100644 index 00000000..63bba1d3 --- /dev/null +++ b/cli/TESTING.md @@ -0,0 +1,87 @@ +# Testing + +The following guidelines summarize the testing policy for docker/cli. + +## Unit Test Suite + +All code changes should have unit test coverage. + +Error cases should be tested with unit tests. + +Bug fixes should be covered by new unit tests or additional assertions in +existing unit tests. + +### Details + +The unit test suite follows the standard Go testing convention. Tests are +located in the package directory in `_test.go` files. + +Unit tests should be named using the convention: + +``` +Test +``` + +[Table tests](https://github.com/golang/go/wiki/TableDrivenTests) should be used +where appropriate, but may not be appropriate in all cases. + +Assertions should be made using +[testify/assert](https://godoc.org/github.com/stretchr/testify/assert) and test +requirements should be verified using +[testify/require](https://godoc.org/github.com/stretchr/testify/require). + +Fakes, and testing utilities can be found in +[internal/test](https://godoc.org/github.com/docker/cli/internal/test) and +[gotestyourself](https://godoc.org/github.com/gotestyourself/gotestyourself). + +## End-to-End Test Suite + +The end-to-end test suite tests a cli binary against a real API backend. + +### Guidelines + +Each feature (subcommand) should have a single end-to-end test for +the success case. The test should include all (or most) flags/options supported +by that feature. + +In some rare cases a couple additional end-to-end tests may be written for a +sufficiently complex and critical feature (ex: `container run`, `service +create`, `service update`, and `docker build` may have ~3-5 cases each). + +In some rare cases a sufficiently critical error paths may have a single +end-to-end test case. + +In all other cases the behaviour should be covered by unit tests. + +If a code change adds a new flag, that flag should be added to the existing +"success case" end-to-end test. + +If a code change fixes a bug, that bug fix should be covered either by adding +assertions to the existing end-to-end test, or with one or more unit test. + +### Details + +The end-to-end test suite is located in +[./e2e](https://github.com/docker/cli/tree/master/e2e). Each directory in `e2e` +corresponds to a directory in `cli/command` and contains the tests for that +subcommand. Files in each directory should be named `_test.go` where +command is the basename of the command (ex: the test for `docker stack deploy` +is found in `e2e/stack/deploy_test.go`). + +Tests should be named using the convention: + +``` +Test[] +``` + +where the test case name is only required when there are multiple test cases for +a single command. + +End-to-end test should run the `docker` binary using +[gotestyourself/icmd](https://godoc.org/github.com/gotestyourself/gotestyourself/icmd) +and make assertions about the exit code, stdout, stderr, and local file system. + +Any Docker image or registry operations should use `registry:5000/` +to communicate with the local instance of the Docker registry. To load +additional fixture images to the registry see +[scripts/test/e2e/run](https://github.com/docker/cli/blob/master/scripts/test/e2e/run). diff --git a/cli/VERSION b/cli/VERSION new file mode 100644 index 00000000..944d763c --- /dev/null +++ b/cli/VERSION @@ -0,0 +1 @@ +18.06.0-dev diff --git a/cli/appveyor.yml b/cli/appveyor.yml new file mode 100644 index 00000000..bb1e7d47 --- /dev/null +++ b/cli/appveyor.yml @@ -0,0 +1,23 @@ +version: "{build}" + +clone_folder: c:\gopath\src\github.com\docker\cli + +environment: + GOPATH: c:\gopath + GOVERSION: 1.10.3 + DEPVERSION: v0.4.1 + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - go version + - go env + +deploy: false + +build_script: + - ps: .\scripts\make.ps1 -Binary + +test_script: + - ps: .\scripts\make.ps1 -TestUnit \ No newline at end of file diff --git a/cli/circle.yml b/cli/circle.yml new file mode 100644 index 00000000..53fcac73 --- /dev/null +++ b/cli/circle.yml @@ -0,0 +1,116 @@ +version: 2 + +jobs: + + lint: + working_directory: /work + docker: [{image: 'docker:17.06-git'}] + steps: + - checkout + - setup_remote_docker: + reusable: true + exclusive: false + - run: + command: docker version + - run: + name: "Lint" + command: | + dockerfile=dockerfiles/Dockerfile.lint + echo "COPY . ." >> $dockerfile + docker build -f $dockerfile --tag cli-linter:$CIRCLE_BUILD_NUM . + docker run --rm cli-linter:$CIRCLE_BUILD_NUM + + cross: + working_directory: /work + docker: [{image: 'docker:17.06-git'}] + parallelism: 3 + steps: + - checkout + - setup_remote_docker: + reusable: true + exclusive: false + - run: + name: "Cross" + command: | + dockerfile=dockerfiles/Dockerfile.cross + echo "COPY . ." >> $dockerfile + docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM . + name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX + docker run \ + -e CROSS_GROUP=$CIRCLE_NODE_INDEX \ + --name $name cli-builder:$CIRCLE_BUILD_NUM \ + make cross + docker cp \ + $name:/go/src/github.com/docker/cli/build \ + /work/build + - store_artifacts: + path: /work/build + + test: + working_directory: /work + docker: [{image: 'docker:17.06-git'}] + steps: + - checkout + - setup_remote_docker: + reusable: true + exclusive: false + - run: + name: "Unit Test with Coverage" + command: | + dockerfile=dockerfiles/Dockerfile.dev + echo "COPY . ." >> $dockerfile + docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM . + docker run --name \ + test-$CIRCLE_BUILD_NUM cli-builder:$CIRCLE_BUILD_NUM \ + make test-coverage + + - run: + name: "Upload to Codecov" + command: | + docker cp \ + test-$CIRCLE_BUILD_NUM:/go/src/github.com/docker/cli/coverage.txt \ + coverage.txt + apk add -U bash curl + curl -s https://codecov.io/bash | bash || \ + echo 'Codecov failed to upload' + + validate: + working_directory: /work + docker: [{image: 'docker:17.06-git'}] + steps: + - checkout + - setup_remote_docker: + reusable: true + exclusive: false + - run: + name: "Validate Vendor, Docs, and Code Generation" + command: | + dockerfile=dockerfiles/Dockerfile.dev + echo "COPY . ." >> $dockerfile + rm -f .dockerignore # include .git + docker build -f $dockerfile --tag cli-builder-with-git:$CIRCLE_BUILD_NUM . + docker run --rm cli-builder-with-git:$CIRCLE_BUILD_NUM \ + make ci-validate + shellcheck: + working_directory: /work + docker: [{image: 'docker:17.06-git'}] + steps: + - checkout + - setup_remote_docker + - run: + name: "Run shellcheck" + command: | + dockerfile=dockerfiles/Dockerfile.shellcheck + echo "COPY . ." >> $dockerfile + docker build -f $dockerfile --tag cli-validator:$CIRCLE_BUILD_NUM . + docker run --rm cli-validator:$CIRCLE_BUILD_NUM \ + make shellcheck +workflows: + version: 2 + ci: + jobs: + - lint + - cross + - test + - validate + - shellcheck diff --git a/cli/cli/cobra.go b/cli/cli/cobra.go new file mode 100644 index 00000000..03cdfcc5 --- /dev/null +++ b/cli/cli/cobra.go @@ -0,0 +1,152 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + rootCmd.SetVersionTemplate("Docker version {{.Version}}\n") + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") + rootCmd.PersistentFlags().Lookup("help").Hidden = true +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/cli/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasAvailableFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/cli/cli/command/bundlefile/bundlefile.go b/cli/cli/command/bundlefile/bundlefile.go new file mode 100644 index 00000000..07e2c8b0 --- /dev/null +++ b/cli/cli/command/bundlefile/bundlefile.go @@ -0,0 +1,70 @@ +package bundlefile + +import ( + "encoding/json" + "io" + + "github.com/pkg/errors" +) + +// Bundlefile stores the contents of a bundlefile +type Bundlefile struct { + Version string + Services map[string]Service +} + +// Service is a service from a bundlefile +type Service struct { + Image string + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Env []string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Ports []Port `json:",omitempty"` + WorkingDir *string `json:",omitempty"` + User *string `json:",omitempty"` + Networks []string `json:",omitempty"` +} + +// Port is a port as defined in a bundlefile +type Port struct { + Protocol string + Port uint32 +} + +// LoadFile loads a bundlefile from a path to the file +func LoadFile(reader io.Reader) (*Bundlefile, error) { + bundlefile := &Bundlefile{} + + decoder := json.NewDecoder(reader) + if err := decoder.Decode(bundlefile); err != nil { + switch jsonErr := err.(type) { + case *json.SyntaxError: + return nil, errors.Errorf( + "JSON syntax error at byte %v: %s", + jsonErr.Offset, + jsonErr.Error()) + case *json.UnmarshalTypeError: + return nil, errors.Errorf( + "Unexpected type at byte %v. Expected %s but received %s.", + jsonErr.Offset, + jsonErr.Type, + jsonErr.Value) + } + return nil, err + } + + return bundlefile, nil +} + +// Print writes the contents of the bundlefile to the output writer +// as human readable json +func Print(out io.Writer, bundle *Bundlefile) error { + bytes, err := json.MarshalIndent(*bundle, "", " ") + if err != nil { + return err + } + + _, err = out.Write(bytes) + return err +} diff --git a/cli/cli/command/bundlefile/bundlefile_test.go b/cli/cli/command/bundlefile/bundlefile_test.go new file mode 100644 index 00000000..cbaa341c --- /dev/null +++ b/cli/cli/command/bundlefile/bundlefile_test.go @@ -0,0 +1,78 @@ +package bundlefile + +import ( + "bytes" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestLoadFileV01Success(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } + }`) + + bundle, err := LoadFile(reader) + assert.NilError(t, err) + assert.Check(t, is.Equal("0.1", bundle.Version)) + assert.Check(t, is.Len(bundle.Services, 2)) +} + +func TestLoadFileSyntaxError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": unquoted string + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "JSON syntax error at byte 37: invalid character 'u' looking for beginning of value") +} + +func TestLoadFileTypeError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "web": { + "Image": "redis", + "Networks": "none" + } + } + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string.") +} + +func TestPrint(t *testing.T) { + var buffer bytes.Buffer + bundle := &Bundlefile{ + Version: "0.1", + Services: map[string]Service{ + "web": { + Image: "image", + Command: []string{"echo", "something"}, + }, + }, + } + assert.Check(t, Print(&buffer, bundle)) + output := buffer.String() + assert.Check(t, is.Contains(output, "\"Image\": \"image\"")) + assert.Check(t, is.Contains(output, + `"Command": [ + "echo", + "something" + ]`)) +} diff --git a/cli/cli/command/checkpoint/client_test.go b/cli/cli/command/checkpoint/client_test.go new file mode 100644 index 00000000..c8fe190e --- /dev/null +++ b/cli/cli/command/checkpoint/client_test.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + checkpointCreateFunc func(container string, options types.CheckpointCreateOptions) error + checkpointDeleteFunc func(container string, options types.CheckpointDeleteOptions) error + checkpointListFunc func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} + +func (cli *fakeClient) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + if cli.checkpointCreateFunc != nil { + return cli.checkpointCreateFunc(container, options) + } + return nil +} + +func (cli *fakeClient) CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error { + if cli.checkpointDeleteFunc != nil { + return cli.checkpointDeleteFunc(container, options) + } + return nil +} + +func (cli *fakeClient) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + if cli.checkpointListFunc != nil { + return cli.checkpointListFunc(container, options) + } + return []types.Checkpoint{}, nil +} diff --git a/cli/cli/command/checkpoint/cmd.go b/cli/cli/command/checkpoint/cmd.go new file mode 100644 index 00000000..2a698e74 --- /dev/null +++ b/cli/cli/command/checkpoint/cmd.go @@ -0,0 +1,28 @@ +package checkpoint + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) +func NewCheckpointCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "checkpoint", + Short: "Manage checkpoints", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "experimental": "", + "ostype": "linux", + "version": "1.25", + }, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/checkpoint/create.go b/cli/cli/command/checkpoint/create.go new file mode 100644 index 00000000..45b4bd63 --- /dev/null +++ b/cli/cli/command/checkpoint/create.go @@ -0,0 +1,57 @@ +package checkpoint + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type createOptions struct { + container string + checkpoint string + checkpointDir string + leaveRunning bool +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + var opts createOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONTAINER CHECKPOINT", + Short: "Create a checkpoint from a running container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.checkpoint = args[1] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runCreate(dockerCli command.Cli, opts createOptions) error { + client := dockerCli.Client() + + checkpointOpts := types.CheckpointCreateOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + Exit: !opts.leaveRunning, + } + + err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) + return nil +} diff --git a/cli/cli/command/checkpoint/create_test.go b/cli/cli/command/checkpoint/create_test.go new file mode 100644 index 00000000..70c6aad7 --- /dev/null +++ b/cli/cli/command/checkpoint/create_test.go @@ -0,0 +1,72 @@ +package checkpoint + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCheckpointCreateErrors(t *testing.T) { + testCases := []struct { + args []string + checkpointCreateFunc func(container string, options types.CheckpointCreateOptions) error + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"foo", "bar"}, + checkpointCreateFunc: func(container string, options types.CheckpointCreateOptions) error { + return errors.Errorf("error creating checkpoint for container foo") + }, + expectedError: "error creating checkpoint for container foo", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + checkpointCreateFunc: tc.checkpointCreateFunc, + }) + cmd := newCreateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCheckpointCreateWithOptions(t *testing.T) { + var containerID, checkpointID, checkpointDir string + var exit bool + cli := test.NewFakeCli(&fakeClient{ + checkpointCreateFunc: func(container string, options types.CheckpointCreateOptions) error { + containerID = container + checkpointID = options.CheckpointID + checkpointDir = options.CheckpointDir + exit = options.Exit + return nil + }, + }) + cmd := newCreateCommand(cli) + checkpoint := "checkpoint-bar" + cmd.SetArgs([]string{"container-foo", checkpoint}) + cmd.Flags().Set("leave-running", "true") + cmd.Flags().Set("checkpoint-dir", "/dir/foo") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("container-foo", containerID)) + assert.Check(t, is.Equal(checkpoint, checkpointID)) + assert.Check(t, is.Equal("/dir/foo", checkpointDir)) + assert.Check(t, is.Equal(false, exit)) + assert.Check(t, is.Equal(checkpoint, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/checkpoint/list.go b/cli/cli/command/checkpoint/list.go new file mode 100644 index 00000000..7b041cfe --- /dev/null +++ b/cli/cli/command/checkpoint/list.go @@ -0,0 +1,54 @@ +package checkpoint + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type listOptions struct { + checkpointDir string +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS] CONTAINER", + Aliases: []string{"list"}, + Short: "List checkpoints for a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, args[0], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd + +} + +func runList(dockerCli command.Cli, container string, opts listOptions) error { + client := dockerCli.Client() + + listOpts := types.CheckpointListOptions{ + CheckpointDir: opts.checkpointDir, + } + + checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) + if err != nil { + return err + } + + cpCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewCheckpointFormat(formatter.TableFormatKey), + } + return formatter.CheckpointWrite(cpCtx, checkpoints) +} diff --git a/cli/cli/command/checkpoint/list_test.go b/cli/cli/command/checkpoint/list_test.go new file mode 100644 index 00000000..986d3ee4 --- /dev/null +++ b/cli/cli/command/checkpoint/list_test.go @@ -0,0 +1,67 @@ +package checkpoint + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestCheckpointListErrors(t *testing.T) { + testCases := []struct { + args []string + checkpointListFunc func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) + expectedError string + }{ + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"foo"}, + checkpointListFunc: func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + return []types.Checkpoint{}, errors.Errorf("error getting checkpoints for container foo") + }, + expectedError: "error getting checkpoints for container foo", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + checkpointListFunc: tc.checkpointListFunc, + }) + cmd := newListCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCheckpointListWithOptions(t *testing.T) { + var containerID, checkpointDir string + cli := test.NewFakeCli(&fakeClient{ + checkpointListFunc: func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + containerID = container + checkpointDir = options.CheckpointDir + return []types.Checkpoint{ + {Name: "checkpoint-foo"}, + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.SetArgs([]string{"container-foo"}) + cmd.Flags().Set("checkpoint-dir", "/dir/foo") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("container-foo", containerID)) + assert.Check(t, is.Equal("/dir/foo", checkpointDir)) + golden.Assert(t, cli.OutBuffer().String(), "checkpoint-list-with-options.golden") +} diff --git a/cli/cli/command/checkpoint/remove.go b/cli/cli/command/checkpoint/remove.go new file mode 100644 index 00000000..3f894421 --- /dev/null +++ b/cli/cli/command/checkpoint/remove.go @@ -0,0 +1,44 @@ +package checkpoint + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type removeOptions struct { + checkpointDir string +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER CHECKPOINT", + Aliases: []string{"remove"}, + Short: "Remove a checkpoint", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args[0], args[1], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runRemove(dockerCli command.Cli, container string, checkpoint string, opts removeOptions) error { + client := dockerCli.Client() + + removeOpts := types.CheckpointDeleteOptions{ + CheckpointID: checkpoint, + CheckpointDir: opts.checkpointDir, + } + + return client.CheckpointDelete(context.Background(), container, removeOpts) +} diff --git a/cli/cli/command/checkpoint/remove_test.go b/cli/cli/command/checkpoint/remove_test.go new file mode 100644 index 00000000..d1a9ac4b --- /dev/null +++ b/cli/cli/command/checkpoint/remove_test.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCheckpointRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + checkpointDeleteFunc func(container string, options types.CheckpointDeleteOptions) error + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"foo", "bar"}, + checkpointDeleteFunc: func(container string, options types.CheckpointDeleteOptions) error { + return errors.Errorf("error deleting checkpoint") + }, + expectedError: "error deleting checkpoint", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + checkpointDeleteFunc: tc.checkpointDeleteFunc, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCheckpointRemoveWithOptions(t *testing.T) { + var containerID, checkpointID, checkpointDir string + cli := test.NewFakeCli(&fakeClient{ + checkpointDeleteFunc: func(container string, options types.CheckpointDeleteOptions) error { + containerID = container + checkpointID = options.CheckpointID + checkpointDir = options.CheckpointDir + return nil + }, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs([]string{"container-foo", "checkpoint-bar"}) + cmd.Flags().Set("checkpoint-dir", "/dir/foo") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("container-foo", containerID)) + assert.Check(t, is.Equal("checkpoint-bar", checkpointID)) + assert.Check(t, is.Equal("/dir/foo", checkpointDir)) +} diff --git a/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden b/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden new file mode 100644 index 00000000..f53f016a --- /dev/null +++ b/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden @@ -0,0 +1,2 @@ +CHECKPOINT NAME +checkpoint-foo diff --git a/cli/cli/command/cli.go b/cli/cli/command/cli.go new file mode 100644 index 00000000..b17aaf23 --- /dev/null +++ b/cli/cli/command/cli.go @@ -0,0 +1,320 @@ +package command + +import ( + "context" + "io" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/config" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + cliflags "github.com/docker/cli/cli/flags" + manifeststore "github.com/docker/cli/cli/manifest/store" + registryclient "github.com/docker/cli/cli/registry/client" + "github.com/docker/cli/cli/trust" + dopts "github.com/docker/cli/opts" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary" + notaryclient "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *InStream + Out() *OutStream + Err() io.Writer +} + +// Cli represents the docker command line client. +type Cli interface { + Client() client.APIClient + Out() *OutStream + Err() io.Writer + In() *InStream + SetIn(in *InStream) + ConfigFile() *configfile.ConfigFile + ServerInfo() ServerInfo + ClientInfo() ClientInfo + NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) + DefaultVersion() string + ManifestStore() manifeststore.Store + RegistryClient(bool) registryclient.RegistryClient + ContentTrustEnabled() bool +} + +// DockerCli is an instance the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + client client.APIClient + serverInfo ServerInfo + clientInfo ClientInfo + contentTrust bool +} + +// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.clientInfo.DefaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *OutStream { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// SetIn sets the reader used for stdin +func (cli *DockerCli) SetIn(in *InStream) { + cli.in = in +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *InStream { + return cli.in +} + +// ShowHelp shows the command help. +func ShowHelp(err io.Writer) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + cmd.SetOutput(err) + cmd.HelpFunc()(cmd, args) + return nil + } +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// ServerInfo returns the server version details for the host this client is +// connected to +func (cli *DockerCli) ServerInfo() ServerInfo { + return cli.serverInfo +} + +// ClientInfo returns the client details for the cli +func (cli *DockerCli) ClientInfo() ClientInfo { + return cli.clientInfo +} + +// ContentTrustEnabled returns whether content trust has been enabled by an +// environment variable. +func (cli *DockerCli) ContentTrustEnabled() bool { + return cli.contentTrust +} + +// ManifestStore returns a store for local manifests +func (cli *DockerCli) ManifestStore() manifeststore.Store { + // TODO: support override default location from config file + return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests")) +} + +// RegistryClient returns a client for communicating with a Docker distribution +// registry +func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient { + resolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return ResolveAuthConfig(ctx, cli, index) + } + return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure) +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { + cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err) + + var err error + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + if tlsconfig.IsErrEncryptedKey(err) { + passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) + newClient := func(password string) (client.APIClient, error) { + opts.Common.TLSOptions.Passphrase = password + return NewAPIClientFromFlags(opts.Common, cli.configFile) + } + cli.client, err = getClientWithPassword(passRetriever, newClient) + } + if err != nil { + return err + } + var experimentalValue string + // Environment variable always overrides configuration + if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" { + experimentalValue = cli.configFile.Experimental + } + hasExperimental, err := isEnabled(experimentalValue) + if err != nil { + return errors.Wrap(err, "Experimental field") + } + cli.clientInfo = ClientInfo{ + DefaultVersion: cli.client.ClientVersion(), + HasExperimental: hasExperimental, + } + cli.initializeFromClient() + return nil +} + +func isEnabled(value string) (bool, error) { + switch value { + case "enabled": + return true, nil + case "", "disabled": + return false, nil + default: + return false, errors.Errorf("%q is not valid, should be either enabled or disabled", value) + } +} + +func (cli *DockerCli) initializeFromClient() { + ping, err := cli.client.Ping(context.Background()) + if err != nil { + // Default to true if we fail to connect to daemon + cli.serverInfo = ServerInfo{HasExperimental: true} + + if ping.APIVersion != "" { + cli.client.NegotiateAPIVersionPing(ping) + } + return + } + + cli.serverInfo = ServerInfo{ + HasExperimental: ping.Experimental, + OSType: ping.OSType, + } + cli.client.NegotiateAPIVersionPing(ping) +} + +func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) { + for attempts := 0; ; attempts++ { + passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts) + if giveup || err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase") + } + + apiclient, err := newClient(passwd) + if !tlsconfig.IsErrEncryptedKey(err) { + return apiclient, err + } + } +} + +// NotaryClient provides a Notary Repository to interact with signed metadata for an image +func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) { + return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...) +} + +// ServerInfo stores details about the supported features and platform of the +// server +type ServerInfo struct { + HasExperimental bool + OSType string +} + +// ClientInfo stores details about the supported features of the client +type ClientInfo struct { + HasExperimental bool + DefaultVersion string +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted} +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + + verStr := api.DefaultVersion + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + return client.NewClientWithOpts( + withHTTPClient(opts.TLSOptions), + client.WithHTTPHeaders(customHeaders), + client.WithVersion(verStr), + client.WithHost(host), + ) +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) { + var host string + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + return dopts.ParseHost(tlsOptions != nil, host) +} + +func withHTTPClient(tlsOpts *tlsconfig.Options) func(*client.Client) error { + return func(c *client.Client) error { + if tlsOpts == nil { + // Use the default HTTPClient + return nil + } + + opts := *tlsOpts + opts.ExclusiveRootPools = true + tlsConfig, err := tlsconfig.Client(opts) + if err != nil { + return err + } + + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DialContext: (&net.Dialer{ + KeepAlive: 30 * time.Second, + Timeout: 30 * time.Second, + }).DialContext, + }, + CheckRedirect: client.CheckRedirect, + } + return client.WithHTTPClient(httpClient)(c) + } +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")" +} diff --git a/cli/cli/command/cli_test.go b/cli/cli/command/cli_test.go new file mode 100644 index 00000000..a4b06e69 --- /dev/null +++ b/cli/cli/command/cli_test.go @@ -0,0 +1,228 @@ +package command + +import ( + "context" + "crypto/x509" + "os" + "runtime" + "testing" + + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/flags" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/env" + "gotest.tools/fs" +) + +func TestNewAPIClientFromFlags(t *testing.T) { + host := "unix://path" + if runtime.GOOS == "windows" { + host = "npipe://./" + } + opts := &flags.CommonOptions{Hosts: []string{host}} + configFile := &configfile.ConfigFile{ + HTTPHeaders: map[string]string{ + "My-Header": "Custom-Value", + }, + } + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + assert.Check(t, is.Equal(host, apiclient.DaemonHost())) + + expectedHeaders := map[string]string{ + "My-Header": "Custom-Value", + "User-Agent": UserAgent(), + } + assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders())) + assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion())) +} + +func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) { + customVersion := "v3.3.3" + defer env.Patch(t, "DOCKER_API_VERSION", customVersion)() + + opts := &flags.CommonOptions{} + configFile := &configfile.ConfigFile{} + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion())) +} + +type fakeClient struct { + client.Client + pingFunc func() (types.Ping, error) + version string + negotiated bool +} + +func (c *fakeClient) Ping(_ context.Context) (types.Ping, error) { + return c.pingFunc() +} + +func (c *fakeClient) ClientVersion() string { + return c.version +} + +func (c *fakeClient) NegotiateAPIVersionPing(types.Ping) { + c.negotiated = true +} + +func TestInitializeFromClient(t *testing.T) { + defaultVersion := "v1.55" + + var testcases = []struct { + doc string + pingFunc func() (types.Ping, error) + expectedServer ServerInfo + negotiated bool + }{ + { + doc: "successful ping", + pingFunc: func() (types.Ping, error) { + return types.Ping{Experimental: true, OSType: "linux", APIVersion: "v1.30"}, nil + }, + expectedServer: ServerInfo{HasExperimental: true, OSType: "linux"}, + negotiated: true, + }, + { + doc: "failed ping, no API version", + pingFunc: func() (types.Ping, error) { + return types.Ping{}, errors.New("failed") + }, + expectedServer: ServerInfo{HasExperimental: true}, + }, + { + doc: "failed ping, with API version", + pingFunc: func() (types.Ping, error) { + return types.Ping{APIVersion: "v1.33"}, errors.New("failed") + }, + expectedServer: ServerInfo{HasExperimental: true}, + negotiated: true, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + apiclient := &fakeClient{ + pingFunc: testcase.pingFunc, + version: defaultVersion, + } + + cli := &DockerCli{client: apiclient} + cli.initializeFromClient() + assert.Check(t, is.DeepEqual(testcase.expectedServer, cli.serverInfo)) + assert.Check(t, is.Equal(testcase.negotiated, apiclient.negotiated)) + }) + } +} + +func TestExperimentalCLI(t *testing.T) { + defaultVersion := "v1.55" + + var testcases = []struct { + doc string + configfile string + expectedExperimentalCLI bool + }{ + { + doc: "default", + configfile: `{}`, + expectedExperimentalCLI: false, + }, + { + doc: "experimental", + configfile: `{ + "experimental": "enabled" +}`, + expectedExperimentalCLI: true, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + dir := fs.NewDir(t, testcase.doc, fs.WithFile("config.json", testcase.configfile)) + defer dir.Remove() + apiclient := &fakeClient{ + version: defaultVersion, + } + + cli := &DockerCli{client: apiclient, err: os.Stderr} + cliconfig.SetDir(dir.Path()) + err := cli.Initialize(flags.NewClientOptions()) + assert.NilError(t, err) + assert.Check(t, is.Equal(testcase.expectedExperimentalCLI, cli.ClientInfo().HasExperimental)) + }) + } +} + +func TestGetClientWithPassword(t *testing.T) { + expected := "password" + + var testcases = []struct { + doc string + password string + retrieverErr error + retrieverGiveup bool + newClientErr error + expectedErr string + }{ + { + doc: "successful connect", + password: expected, + }, + { + doc: "password retriever exhausted", + retrieverGiveup: true, + retrieverErr: errors.New("failed"), + expectedErr: "private key is encrypted, but could not get passphrase", + }, + { + doc: "password retriever error", + retrieverErr: errors.New("failed"), + expectedErr: "failed", + }, + { + doc: "newClient error", + newClientErr: errors.New("failed to connect"), + expectedErr: "failed to connect", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + passRetriever := func(_, _ string, _ bool, attempts int) (passphrase string, giveup bool, err error) { + // Always return an invalid pass first to test iteration + switch attempts { + case 0: + return "something else", false, nil + default: + return testcase.password, testcase.retrieverGiveup, testcase.retrieverErr + } + } + + newClient := func(currentPassword string) (client.APIClient, error) { + if testcase.newClientErr != nil { + return nil, testcase.newClientErr + } + if currentPassword == expected { + return &client.Client{}, nil + } + return &client.Client{}, x509.IncorrectPasswordError + } + + _, err := getClientWithPassword(passRetriever, newClient) + if testcase.expectedErr != "" { + assert.ErrorContains(t, err, testcase.expectedErr) + return + } + + assert.NilError(t, err) + }) + } +} diff --git a/cli/cli/command/commands/commands.go b/cli/cli/command/commands/commands.go new file mode 100644 index 00000000..87808305 --- /dev/null +++ b/cli/cli/command/commands/commands.go @@ -0,0 +1,133 @@ +package commands + +import ( + "os" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/checkpoint" + "github.com/docker/cli/cli/command/config" + "github.com/docker/cli/cli/command/container" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/manifest" + "github.com/docker/cli/cli/command/network" + "github.com/docker/cli/cli/command/node" + "github.com/docker/cli/cli/command/plugin" + "github.com/docker/cli/cli/command/registry" + "github.com/docker/cli/cli/command/secret" + "github.com/docker/cli/cli/command/service" + "github.com/docker/cli/cli/command/stack" + "github.com/docker/cli/cli/command/swarm" + "github.com/docker/cli/cli/command/system" + "github.com/docker/cli/cli/command/trust" + "github.com/docker/cli/cli/command/volume" + "github.com/spf13/cobra" +) + +// AddCommands adds all the commands from cli/command to the root command +func AddCommands(cmd *cobra.Command, dockerCli command.Cli) { + cmd.AddCommand( + // checkpoint + checkpoint.NewCheckpointCommand(dockerCli), + + // config + config.NewConfigCommand(dockerCli), + + // container + container.NewContainerCommand(dockerCli), + container.NewRunCommand(dockerCli), + + // image + image.NewImageCommand(dockerCli), + image.NewBuildCommand(dockerCli), + + // manifest + manifest.NewManifestCommand(dockerCli), + + // network + network.NewNetworkCommand(dockerCli), + + // node + node.NewNodeCommand(dockerCli), + + // plugin + plugin.NewPluginCommand(dockerCli), + + // registry + registry.NewLoginCommand(dockerCli), + registry.NewLogoutCommand(dockerCli), + registry.NewSearchCommand(dockerCli), + + // secret + secret.NewSecretCommand(dockerCli), + + // service + service.NewServiceCommand(dockerCli), + + // system + system.NewSystemCommand(dockerCli), + system.NewVersionCommand(dockerCli), + + // stack + stack.NewStackCommand(dockerCli), + stack.NewTopLevelDeployCommand(dockerCli), + + // swarm + swarm.NewSwarmCommand(dockerCli), + + // trust + trust.NewTrustCommand(dockerCli), + + // volume + volume.NewVolumeCommand(dockerCli), + + // legacy commands may be hidden + hide(system.NewEventsCommand(dockerCli)), + hide(system.NewInfoCommand(dockerCli)), + hide(system.NewInspectCommand(dockerCli)), + hide(container.NewAttachCommand(dockerCli)), + hide(container.NewCommitCommand(dockerCli)), + hide(container.NewCopyCommand(dockerCli)), + hide(container.NewCreateCommand(dockerCli)), + hide(container.NewDiffCommand(dockerCli)), + hide(container.NewExecCommand(dockerCli)), + hide(container.NewExportCommand(dockerCli)), + hide(container.NewKillCommand(dockerCli)), + hide(container.NewLogsCommand(dockerCli)), + hide(container.NewPauseCommand(dockerCli)), + hide(container.NewPortCommand(dockerCli)), + hide(container.NewPsCommand(dockerCli)), + hide(container.NewRenameCommand(dockerCli)), + hide(container.NewRestartCommand(dockerCli)), + hide(container.NewRmCommand(dockerCli)), + hide(container.NewStartCommand(dockerCli)), + hide(container.NewStatsCommand(dockerCli)), + hide(container.NewStopCommand(dockerCli)), + hide(container.NewTopCommand(dockerCli)), + hide(container.NewUnpauseCommand(dockerCli)), + hide(container.NewUpdateCommand(dockerCli)), + hide(container.NewWaitCommand(dockerCli)), + hide(image.NewHistoryCommand(dockerCli)), + hide(image.NewImagesCommand(dockerCli)), + hide(image.NewImportCommand(dockerCli)), + hide(image.NewLoadCommand(dockerCli)), + hide(image.NewPullCommand(dockerCli)), + hide(image.NewPushCommand(dockerCli)), + hide(image.NewRemoveCommand(dockerCli)), + hide(image.NewSaveCommand(dockerCli)), + hide(image.NewTagCommand(dockerCli)), + ) + +} + +func hide(cmd *cobra.Command) *cobra.Command { + // If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty, + // these legacy commands (such as `docker ps`, `docker exec`, etc) + // will not be shown in output console. + if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { + return cmd + } + cmdCopy := *cmd + cmdCopy.Hidden = true + cmdCopy.Aliases = []string{} + return &cmdCopy +} diff --git a/cli/cli/command/config/client_test.go b/cli/cli/command/config/client_test.go new file mode 100644 index 00000000..2e19b775 --- /dev/null +++ b/cli/cli/command/config/client_test.go @@ -0,0 +1,45 @@ +package config + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + configCreateFunc func(swarm.ConfigSpec) (types.ConfigCreateResponse, error) + configInspectFunc func(string) (swarm.Config, []byte, error) + configListFunc func(types.ConfigListOptions) ([]swarm.Config, error) + configRemoveFunc func(string) error +} + +func (c *fakeClient) ConfigCreate(ctx context.Context, spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if c.configCreateFunc != nil { + return c.configCreateFunc(spec) + } + return types.ConfigCreateResponse{}, nil +} + +func (c *fakeClient) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if c.configInspectFunc != nil { + return c.configInspectFunc(id) + } + return swarm.Config{}, nil, nil +} + +func (c *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if c.configListFunc != nil { + return c.configListFunc(options) + } + return []swarm.Config{}, nil +} + +func (c *fakeClient) ConfigRemove(ctx context.Context, name string) error { + if c.configRemoveFunc != nil { + return c.configRemoveFunc(name) + } + return nil +} diff --git a/cli/cli/command/config/cmd.go b/cli/cli/command/config/cmd.go new file mode 100644 index 00000000..7defe2a6 --- /dev/null +++ b/cli/cli/command/config/cmd.go @@ -0,0 +1,29 @@ +package config + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewConfigCommand returns a cobra command for `config` subcommands +func NewConfigCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Manage Docker configs", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.30", + "swarm": "", + }, + } + cmd.AddCommand( + newConfigListCommand(dockerCli), + newConfigCreateCommand(dockerCli), + newConfigInspectCommand(dockerCli), + newConfigRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/config/create.go b/cli/cli/command/config/create.go new file mode 100644 index 00000000..04130313 --- /dev/null +++ b/cli/cli/command/config/create.go @@ -0,0 +1,86 @@ +package config + +import ( + "context" + "fmt" + "io" + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + templateDriver string + file string + labels opts.ListOpts +} + +func newConfigCreateCommand(dockerCli command.Cli) *cobra.Command { + createOpts := createOptions{ + labels: opts.NewListOpts(opts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONFIG file|-", + Short: "Create a config from a file or STDIN", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + createOpts.name = args[0] + createOpts.file = args[1] + return runConfigCreate(dockerCli, createOpts) + }, + } + flags := cmd.Flags() + flags.VarP(&createOpts.labels, "label", "l", "Config labels") + flags.StringVar(&createOpts.templateDriver, "template-driver", "", "Template driver") + flags.SetAnnotation("driver", "version", []string{"1.37"}) + + return cmd +} + +func runConfigCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var in io.Reader = dockerCli.In() + if options.file != "-" { + file, err := system.OpenSequential(options.file) + if err != nil { + return err + } + in = file + defer file.Close() + } + + configData, err := ioutil.ReadAll(in) + if err != nil { + return errors.Errorf("Error reading content from %q: %v", options.file, err) + } + + spec := swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + Data: configData, + } + if options.templateDriver != "" { + spec.Templating = &swarm.Driver{ + Name: options.templateDriver, + } + } + r, err := client.ConfigCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} diff --git a/cli/cli/command/config/create_test.go b/cli/cli/command/config/create_test.go new file mode 100644 index 00000000..bb2ea946 --- /dev/null +++ b/cli/cli/command/config/create_test.go @@ -0,0 +1,143 @@ +package config + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +const configDataFile = "config-create-with-name.golden" + +func TestConfigCreateErrors(t *testing.T) { + testCases := []struct { + args []string + configCreateFunc func(swarm.ConfigSpec) (types.ConfigCreateResponse, error) + expectedError string + }{ + { + args: []string{"too_few"}, + expectedError: "requires exactly 2 arguments", + }, + {args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"name", filepath.Join("testdata", configDataFile)}, + configCreateFunc: func(configSpec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + return types.ConfigCreateResponse{}, errors.Errorf("error creating config") + }, + expectedError: "error creating config", + }, + } + for _, tc := range testCases { + cmd := newConfigCreateCommand( + test.NewFakeCli(&fakeClient{ + configCreateFunc: tc.configCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigCreateWithName(t *testing.T) { + name := "foo" + var actual []byte + cli := test.NewFakeCli(&fakeClient{ + configCreateFunc: func(spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if spec.Name != name { + return types.ConfigCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + actual = spec.Data + + return types.ConfigCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newConfigCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, string(actual), configDataFile) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestConfigCreateWithLabels(t *testing.T) { + expectedLabels := map[string]string{ + "lbl1": "Label-foo", + "lbl2": "Label-bar", + } + name := "foo" + + data, err := ioutil.ReadFile(filepath.Join("testdata", configDataFile)) + assert.NilError(t, err) + + expected := swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: expectedLabels, + }, + Data: data, + } + + cli := test.NewFakeCli(&fakeClient{ + configCreateFunc: func(spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if !reflect.DeepEqual(spec, expected) { + return types.ConfigCreateResponse{}, errors.Errorf("expected %+v, got %+v", expected, spec) + } + + return types.ConfigCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newConfigCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)}) + cmd.Flags().Set("label", "lbl1=Label-foo") + cmd.Flags().Set("label", "lbl2=Label-bar") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestConfigCreateWithTemplatingDriver(t *testing.T) { + expectedDriver := &swarm.Driver{ + Name: "template-driver", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + configCreateFunc: func(spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if spec.Name != name { + return types.ConfigCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if spec.Templating.Name != expectedDriver.Name { + return types.ConfigCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels) + } + + return types.ConfigCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newConfigCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)}) + cmd.Flags().Set("template-driver", expectedDriver.Name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/config/inspect.go b/cli/cli/command/config/inspect.go new file mode 100644 index 00000000..d1515ec9 --- /dev/null +++ b/cli/cli/command/config/inspect.go @@ -0,0 +1,66 @@ +package config + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + names []string + format string + pretty bool +} + +func newConfigInspectCommand(dockerCli command.Cli) *cobra.Command { + opts := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONFIG [CONFIG...]", + Short: "Display detailed information on one or more configs", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runConfigInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runConfigInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(id string) (interface{}, []byte, error) { + return client.ConfigInspectWithRaw(ctx, id) + } + f := opts.format + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + configCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewConfigFormat(f, false), + } + + if err := formatter.ConfigInspectWrite(configCtx, opts.names, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil + +} diff --git a/cli/cli/command/config/inspect_test.go b/cli/cli/command/config/inspect_test.go new file mode 100644 index 00000000..1b4f275c --- /dev/null +++ b/cli/cli/command/config/inspect_test.go @@ -0,0 +1,172 @@ +package config + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestConfigInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + configInspectFunc func(configID string) (swarm.Config, []byte, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"foo"}, + configInspectFunc: func(configID string) (swarm.Config, []byte, error) { + return swarm.Config{}, nil, errors.Errorf("error while inspecting the config") + }, + expectedError: "error while inspecting the config", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + args: []string{"foo", "bar"}, + configInspectFunc: func(configID string) (swarm.Config, []byte, error) { + if configID == "foo" { + return *Config(ConfigName("foo")), nil, nil + } + return swarm.Config{}, nil, errors.Errorf("error while inspecting the config") + }, + expectedError: "error while inspecting the config", + }, + } + for _, tc := range testCases { + cmd := newConfigInspectCommand( + test.NewFakeCli(&fakeClient{ + configInspectFunc: tc.configInspectFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigInspectWithoutFormat(t *testing.T) { + testCases := []struct { + name string + args []string + configInspectFunc func(configID string) (swarm.Config, []byte, error) + }{ + { + name: "single-config", + args: []string{"foo"}, + configInspectFunc: func(name string) (swarm.Config, []byte, error) { + if name != "foo" { + return swarm.Config{}, nil, errors.Errorf("Invalid name, expected %s, got %s", "foo", name) + } + return *Config(ConfigID("ID-foo"), ConfigName("foo")), nil, nil + }, + }, + { + name: "multiple-configs-with-labels", + args: []string{"foo", "bar"}, + configInspectFunc: func(name string) (swarm.Config, []byte, error) { + return *Config(ConfigID("ID-"+name), ConfigName(name), ConfigLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{configInspectFunc: tc.configInspectFunc}) + cmd := newConfigInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("config-inspect-without-format.%s.golden", tc.name)) + } +} + +func TestConfigInspectWithFormat(t *testing.T) { + configInspectFunc := func(name string) (swarm.Config, []byte, error) { + return *Config(ConfigName("foo"), ConfigLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + } + testCases := []struct { + name string + format string + args []string + configInspectFunc func(name string) (swarm.Config, []byte, error) + }{ + { + name: "simple-template", + format: "{{.Spec.Name}}", + args: []string{"foo"}, + configInspectFunc: configInspectFunc, + }, + { + name: "json-template", + format: "{{json .Spec.Labels}}", + args: []string{"foo"}, + configInspectFunc: configInspectFunc, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + configInspectFunc: tc.configInspectFunc, + }) + cmd := newConfigInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("format", tc.format) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("config-inspect-with-format.%s.golden", tc.name)) + } +} + +func TestConfigInspectPretty(t *testing.T) { + testCases := []struct { + name string + configInspectFunc func(string) (swarm.Config, []byte, error) + }{ + { + name: "simple", + configInspectFunc: func(id string) (swarm.Config, []byte, error) { + return *Config( + ConfigLabels(map[string]string{ + "lbl1": "value1", + }), + ConfigID("configID"), + ConfigName("configName"), + ConfigCreatedAt(time.Time{}), + ConfigUpdatedAt(time.Time{}), + ConfigData([]byte("payload here")), + ), []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + configInspectFunc: tc.configInspectFunc, + }) + cmd := newConfigInspectCommand(cli) + + cmd.SetArgs([]string{"configID"}) + cmd.Flags().Set("pretty", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("config-inspect-pretty.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/config/ls.go b/cli/cli/command/config/ls.go new file mode 100644 index 00000000..dd2d89ed --- /dev/null +++ b/cli/cli/command/config/ls.go @@ -0,0 +1,77 @@ +package config + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type byConfigName []swarm.Config + +func (r byConfigName) Len() int { return len(r) } +func (r byConfigName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byConfigName) Less(i, j int) bool { + return sortorder.NaturalLess(r[i].Spec.Name, r[j].Spec.Name) +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newConfigListCommand(dockerCli command.Cli) *cobra.Command { + listOpts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List configs", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runConfigList(dockerCli, listOpts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&listOpts.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVarP(&listOpts.format, "format", "", "", "Pretty-print configs using a Go template") + flags.VarP(&listOpts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runConfigList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + configs, err := client.ConfigList(ctx, types.ConfigListOptions{Filters: options.filter.Value()}) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ConfigFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ConfigFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byConfigName(configs)) + + configCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewConfigFormat(format, options.quiet), + } + return formatter.ConfigWrite(configCtx, configs) +} diff --git a/cli/cli/command/config/ls_test.go b/cli/cli/command/config/ls_test.go new file mode 100644 index 00000000..d3055b4a --- /dev/null +++ b/cli/cli/command/config/ls_test.go @@ -0,0 +1,158 @@ +package config + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestConfigListErrors(t *testing.T) { + testCases := []struct { + args []string + configListFunc func(types.ConfigListOptions) ([]swarm.Config, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{}, errors.Errorf("error listing configs") + }, + expectedError: "error listing configs", + }, + } + for _, tc := range testCases { + cmd := newConfigListCommand( + test.NewFakeCli(&fakeClient{ + configListFunc: tc.configListFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigList(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-1-foo"), + ConfigName("1-foo"), + ConfigVersion(swarm.Version{Index: 10}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Config(ConfigID("ID-10-foo"), + ConfigName("10-foo"), + ConfigVersion(swarm.Version{Index: 11}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Config(ConfigID("ID-2-foo"), + ConfigName("2-foo"), + ConfigVersion(swarm.Version{Index: 11}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-sort.golden") +} + +func TestConfigListWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-foo"), ConfigName("foo")), + *Config(ConfigID("ID-bar"), ConfigName("bar"), ConfigLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-quiet-option.golden") +} + +func TestConfigListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-foo"), ConfigName("foo")), + *Config(ConfigID("ID-bar"), ConfigName("bar"), ConfigLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + ConfigFormat: "{{ .Name }} {{ .Labels }}", + }) + cmd := newConfigListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-config-format.golden") +} + +func TestConfigListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-foo"), ConfigName("foo")), + *Config(ConfigID("ID-bar"), ConfigName("bar"), ConfigLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-format.golden") +} + +func TestConfigListWithFilter(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + assert.Check(t, is.Equal("foo", options.Filters.Get("name")[0])) + assert.Check(t, is.Equal("lbl1=Label-bar", options.Filters.Get("label")[0])) + return []swarm.Config{ + *Config(ConfigID("ID-foo"), + ConfigName("foo"), + ConfigVersion(swarm.Version{Index: 10}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Config(ConfigID("ID-bar"), + ConfigName("bar"), + ConfigVersion(swarm.Version{Index: 11}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + cmd.Flags().Set("filter", "name=foo") + cmd.Flags().Set("filter", "label=lbl1=Label-bar") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-filter.golden") +} diff --git a/cli/cli/command/config/remove.go b/cli/cli/command/config/remove.go new file mode 100644 index 00000000..3240a5a3 --- /dev/null +++ b/cli/cli/command/config/remove.go @@ -0,0 +1,53 @@ +package config + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + names []string +} + +func newConfigRemoveCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "rm CONFIG [CONFIG...]", + Aliases: []string{"remove"}, + Short: "Remove one or more configs", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := removeOptions{ + names: args, + } + return runConfigRemove(dockerCli, opts) + }, + } +} + +func runConfigRemove(dockerCli command.Cli, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, name := range opts.names { + if err := client.ConfigRemove(ctx, name); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), name) + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/config/remove_test.go b/cli/cli/command/config/remove_test.go new file mode 100644 index 00000000..4a1980bc --- /dev/null +++ b/cli/cli/command/config/remove_test.go @@ -0,0 +1,79 @@ +package config + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + configRemoveFunc func(string) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + args: []string{"foo"}, + configRemoveFunc: func(name string) error { + return errors.Errorf("error removing config") + }, + expectedError: "error removing config", + }, + } + for _, tc := range testCases { + cmd := newConfigRemoveCommand( + test.NewFakeCli(&fakeClient{ + configRemoveFunc: tc.configRemoveFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigRemoveWithName(t *testing.T) { + names := []string{"foo", "bar"} + var removedConfigs []string + cli := test.NewFakeCli(&fakeClient{ + configRemoveFunc: func(name string) error { + removedConfigs = append(removedConfigs, name) + return nil + }, + }) + cmd := newConfigRemoveCommand(cli) + cmd.SetArgs(names) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(names, strings.Split(strings.TrimSpace(cli.OutBuffer().String()), "\n"))) + assert.Check(t, is.DeepEqual(names, removedConfigs)) +} + +func TestConfigRemoveContinueAfterError(t *testing.T) { + names := []string{"foo", "bar"} + var removedConfigs []string + + cli := test.NewFakeCli(&fakeClient{ + configRemoveFunc: func(name string) error { + removedConfigs = append(removedConfigs, name) + if name == "foo" { + return errors.Errorf("error removing config: %s", name) + } + return nil + }, + }) + + cmd := newConfigRemoveCommand(cli) + cmd.SetArgs(names) + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), "error removing config: foo") + assert.Check(t, is.DeepEqual(names, removedConfigs)) +} diff --git a/cli/cli/command/config/testdata/config-create-with-name.golden b/cli/cli/command/config/testdata/config-create-with-name.golden new file mode 100644 index 00000000..7b28bb3f --- /dev/null +++ b/cli/cli/command/config/testdata/config-create-with-name.golden @@ -0,0 +1 @@ +config_foo_bar diff --git a/cli/cli/command/config/testdata/config-inspect-pretty.simple.golden b/cli/cli/command/config/testdata/config-inspect-pretty.simple.golden new file mode 100644 index 00000000..60b5c7fa --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-pretty.simple.golden @@ -0,0 +1,8 @@ +ID: configID +Name: configName +Labels: + - lbl1=value1 +Created at: 0001-01-01 00:00:00 +0000 utc +Updated at: 0001-01-01 00:00:00 +0000 utc +Data: +payload here diff --git a/cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden b/cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden new file mode 100644 index 00000000..aab678f8 --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden @@ -0,0 +1 @@ +{"label1":"label-foo"} diff --git a/cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden b/cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden new file mode 100644 index 00000000..257cc564 --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden @@ -0,0 +1 @@ +foo diff --git a/cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden b/cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden new file mode 100644 index 00000000..b01a400c --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden @@ -0,0 +1,26 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": { + "label1": "label-foo" + } + } + }, + { + "ID": "ID-bar", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "bar", + "Labels": { + "label1": "label-foo" + } + } + } +] diff --git a/cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden b/cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden new file mode 100644 index 00000000..c4f41c10 --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden @@ -0,0 +1,12 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": null + } + } +] diff --git a/cli/cli/command/config/testdata/config-list-sort.golden b/cli/cli/command/config/testdata/config-list-sort.golden new file mode 100644 index 00000000..141057c3 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-sort.golden @@ -0,0 +1,4 @@ +ID NAME CREATED UPDATED +ID-1-foo 1-foo 2 hours ago About an hour ago +ID-2-foo 2-foo 2 hours ago About an hour ago +ID-10-foo 10-foo 2 hours ago About an hour ago diff --git a/cli/cli/command/config/testdata/config-list-with-config-format.golden b/cli/cli/command/config/testdata/config-list-with-config-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-config-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/config/testdata/config-list-with-filter.golden b/cli/cli/command/config/testdata/config-list-with-filter.golden new file mode 100644 index 00000000..6fdc13b8 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-filter.golden @@ -0,0 +1,3 @@ +ID NAME CREATED UPDATED +ID-bar bar 2 hours ago About an hour ago +ID-foo foo 2 hours ago About an hour ago diff --git a/cli/cli/command/config/testdata/config-list-with-format.golden b/cli/cli/command/config/testdata/config-list-with-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/config/testdata/config-list-with-quiet-option.golden b/cli/cli/command/config/testdata/config-list-with-quiet-option.golden new file mode 100644 index 00000000..145fc38d --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-quiet-option.golden @@ -0,0 +1,2 @@ +ID-bar +ID-foo diff --git a/cli/cli/command/container/attach.go b/cli/cli/command/container/attach.go new file mode 100644 index 00000000..de96a3b7 --- /dev/null +++ b/cli/cli/command/container/attach.go @@ -0,0 +1,181 @@ +package container + +import ( + "context" + "fmt" + "io" + "net/http/httputil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type attachOptions struct { + noStdin bool + proxy bool + detachKeys string + + container string +} + +func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, args string) (*types.ContainerJSON, error) { + c, err := cli.ContainerInspect(ctx, args) + if err != nil { + return nil, err + } + if !c.State.Running { + return nil, errors.New("You cannot attach to a stopped container, start it first") + } + if c.State.Paused { + return nil, errors.New("You cannot attach to a paused container, unpause it first") + } + if c.State.Restarting { + return nil, errors.New("You cannot attach to a restarting container, wait until it is running") + } + + return &c, nil +} + +// NewAttachCommand creates a new cobra.Command for `docker attach` +func NewAttachCommand(dockerCli command.Cli) *cobra.Command { + var opts attachOptions + + cmd := &cobra.Command{ + Use: "attach [OPTIONS] CONTAINER", + Short: "Attach local standard input, output, and error streams to a running container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runAttach(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + return cmd +} + +func runAttach(dockerCli command.Cli, opts *attachOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + // request channel to wait for client + resultC, errC := client.ContainerWait(ctx, opts.container, "") + + c, err := inspectContainerAndCheckState(ctx, client, opts.container) + if err != nil { + return err + } + + if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { + return err + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: !opts.noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = dockerCli.In() + } + + if opts.proxy && !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, opts.container) + defer signal.StopCatch(sigc) + } + + resp, errAttach := client.ContainerAttach(ctx, opts.container, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + // If use docker attach command to attach to a stop container, it will return + // "You cannot attach to a stopped container" error, it's ok, but when + // attach to a running container, it(docker attach) use inspect to check + // the container's state, if it pass the state check on the client side, + // and then the container is stopped, docker attach command still attach to + // the container and not exit. + // + // Recheck the container's state to avoid attach block. + _, err = inspectContainerAndCheckState(ctx, client, opts.container) + if err != nil { + return err + } + + if c.Config.Tty && dockerCli.Out().IsTerminal() { + resizeTTY(ctx, dockerCli, opts.container) + } + + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: dockerCli.Out(), + errorStream: dockerCli.Err(), + resp: resp, + tty: c.Config.Tty, + detachKeys: options.DetachKeys, + } + + if err := streamer.stream(ctx); err != nil { + return err + } + + if errAttach != nil { + return errAttach + } + + return getExitStatus(errC, resultC) +} + +func getExitStatus(errC <-chan error, resultC <-chan container.ContainerWaitOKBody) error { + select { + case result := <-resultC: + if result.Error != nil { + return fmt.Errorf(result.Error.Message) + } + if result.StatusCode != 0 { + return cli.StatusError{StatusCode: int(result.StatusCode)} + } + case err := <-errC: + return err + } + + return nil +} + +func resizeTTY(ctx context.Context, dockerCli command.Cli, containerID string) { + height, width := dockerCli.Out().GetTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + resizeTtyTo(ctx, dockerCli.Client(), containerID, height+1, width+1, false) + + // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back + // to the actual size. + if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } +} diff --git a/cli/cli/command/container/attach_test.go b/cli/cli/command/container/attach_test.go new file mode 100644 index 00000000..7d8d3f6e --- /dev/null +++ b/cli/cli/command/container/attach_test.go @@ -0,0 +1,129 @@ +package container + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNewAttachCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + containerInspectFunc func(img string) (types.ContainerJSON, error) + }{ + { + name: "client-error", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "something went wrong", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{}, errors.Errorf("something went wrong") + }, + }, + { + name: "client-stopped", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "You cannot attach to a stopped container", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + c := types.ContainerJSON{} + c.ContainerJSONBase = &types.ContainerJSONBase{} + c.ContainerJSONBase.State = &types.ContainerState{Running: false} + return c, nil + }, + }, + { + name: "client-paused", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "You cannot attach to a paused container", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + c := types.ContainerJSON{} + c.ContainerJSONBase = &types.ContainerJSONBase{} + c.ContainerJSONBase.State = &types.ContainerState{ + Running: true, + Paused: true, + } + return c, nil + }, + }, + { + name: "client-restarting", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "You cannot attach to a restarting container", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + c := types.ContainerJSON{} + c.ContainerJSONBase = &types.ContainerJSONBase{} + c.ContainerJSONBase.State = &types.ContainerState{ + Running: true, + Paused: false, + Restarting: true, + } + return c, nil + }, + }, + } + for _, tc := range testCases { + cmd := NewAttachCommand(test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestGetExitStatus(t *testing.T) { + var ( + expectedErr = fmt.Errorf("unexpected error") + errC = make(chan error, 1) + resultC = make(chan container.ContainerWaitOKBody, 1) + ) + + testcases := []struct { + result *container.ContainerWaitOKBody + err error + expectedError error + }{ + { + result: &container.ContainerWaitOKBody{ + StatusCode: 0, + }, + }, + { + err: expectedErr, + expectedError: expectedErr, + }, + { + result: &container.ContainerWaitOKBody{ + Error: &container.ContainerWaitOKBodyError{Message: expectedErr.Error()}, + }, + expectedError: expectedErr, + }, + { + result: &container.ContainerWaitOKBody{ + StatusCode: 15, + }, + expectedError: cli.StatusError{StatusCode: 15}, + }, + } + + for _, testcase := range testcases { + if testcase.err != nil { + errC <- testcase.err + } + if testcase.result != nil { + resultC <- *testcase.result + } + err := getExitStatus(errC, resultC) + if testcase.expectedError == nil { + assert.NilError(t, err) + } else { + assert.Error(t, err, testcase.expectedError.Error()) + } + } +} diff --git a/cli/cli/command/container/client_test.go b/cli/cli/command/container/client_test.go new file mode 100644 index 00000000..a2c39bc6 --- /dev/null +++ b/cli/cli/command/container/client_test.go @@ -0,0 +1,126 @@ +package container + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + inspectFunc func(string) (types.ContainerJSON, error) + execInspectFunc func(execID string) (types.ContainerExecInspect, error) + execCreateFunc func(container string, config types.ExecConfig) (types.IDResponse, error) + createContainerFunc func(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + containerStartFunc func(container string, options types.ContainerStartOptions) error + imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + infoFunc func() (types.Info, error) + containerStatPathFunc func(container, path string) (types.ContainerPathStat, error) + containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + logFunc func(string, types.ContainerLogsOptions) (io.ReadCloser, error) + waitFunc func(string) (<-chan container.ContainerWaitOKBody, <-chan error) + containerListFunc func(types.ContainerListOptions) ([]types.Container, error) + Version string +} + +func (f *fakeClient) ContainerList(_ context.Context, options types.ContainerListOptions) ([]types.Container, error) { + if f.containerListFunc != nil { + return f.containerListFunc(options) + } + return []types.Container{}, nil +} + +func (f *fakeClient) ContainerInspect(_ context.Context, containerID string) (types.ContainerJSON, error) { + if f.inspectFunc != nil { + return f.inspectFunc(containerID) + } + return types.ContainerJSON{}, nil +} + +func (f *fakeClient) ContainerExecCreate(_ context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + if f.execCreateFunc != nil { + return f.execCreateFunc(container, config) + } + return types.IDResponse{}, nil +} + +func (f *fakeClient) ContainerExecInspect(_ context.Context, execID string) (types.ContainerExecInspect, error) { + if f.execInspectFunc != nil { + return f.execInspectFunc(execID) + } + return types.ContainerExecInspect{}, nil +} + +func (f *fakeClient) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + return nil +} + +func (f *fakeClient) ContainerCreate( + _ context.Context, + config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, +) (container.ContainerCreateCreatedBody, error) { + if f.createContainerFunc != nil { + return f.createContainerFunc(config, hostConfig, networkingConfig, containerName) + } + return container.ContainerCreateCreatedBody{}, nil +} + +func (f *fakeClient) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + if f.imageCreateFunc != nil { + return f.imageCreateFunc(parentReference, options) + } + return nil, nil +} + +func (f *fakeClient) Info(_ context.Context) (types.Info, error) { + if f.infoFunc != nil { + return f.infoFunc() + } + return types.Info{}, nil +} + +func (f *fakeClient) ContainerStatPath(_ context.Context, container, path string) (types.ContainerPathStat, error) { + if f.containerStatPathFunc != nil { + return f.containerStatPathFunc(container, path) + } + return types.ContainerPathStat{}, nil +} + +func (f *fakeClient) CopyFromContainer(_ context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + if f.containerCopyFromFunc != nil { + return f.containerCopyFromFunc(container, srcPath) + } + return nil, types.ContainerPathStat{}, nil +} + +func (f *fakeClient) ContainerLogs(_ context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + if f.logFunc != nil { + return f.logFunc(container, options) + } + return nil, nil +} + +func (f *fakeClient) ClientVersion() string { + return f.Version +} + +func (f *fakeClient) ContainerWait(_ context.Context, container string, _ container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if f.waitFunc != nil { + return f.waitFunc(container) + } + return nil, nil +} + +func (f *fakeClient) ContainerStart(_ context.Context, container string, options types.ContainerStartOptions) error { + if f.containerStartFunc != nil { + return f.containerStartFunc(container, options) + } + return nil +} diff --git a/cli/cli/command/container/cmd.go b/cli/cli/command/container/cmd.go new file mode 100644 index 00000000..dcf8116e --- /dev/null +++ b/cli/cli/command/container/cmd.go @@ -0,0 +1,45 @@ +package container + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewContainerCommand returns a cobra command for `container` subcommands +func NewContainerCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "container", + Short: "Manage containers", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewAttachCommand(dockerCli), + NewCommitCommand(dockerCli), + NewCopyCommand(dockerCli), + NewCreateCommand(dockerCli), + NewDiffCommand(dockerCli), + NewExecCommand(dockerCli), + NewExportCommand(dockerCli), + NewKillCommand(dockerCli), + NewLogsCommand(dockerCli), + NewPauseCommand(dockerCli), + NewPortCommand(dockerCli), + NewRenameCommand(dockerCli), + NewRestartCommand(dockerCli), + NewRmCommand(dockerCli), + NewRunCommand(dockerCli), + NewStartCommand(dockerCli), + NewStatsCommand(dockerCli), + NewStopCommand(dockerCli), + NewTopCommand(dockerCli), + NewUnpauseCommand(dockerCli), + NewUpdateCommand(dockerCli), + NewWaitCommand(dockerCli), + newListCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/container/commit.go b/cli/cli/command/container/commit.go new file mode 100644 index 00000000..0a30f55d --- /dev/null +++ b/cli/cli/command/container/commit.go @@ -0,0 +1,75 @@ +package container + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type commitOptions struct { + container string + reference string + + pause bool + comment string + author string + changes opts.ListOpts +} + +// NewCommitCommand creates a new cobra.Command for `docker commit` +func NewCommitCommand(dockerCli command.Cli) *cobra.Command { + var options commitOptions + + cmd := &cobra.Command{ + Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", + Short: "Create a new image from a container's changes", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.container = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runCommit(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.BoolVarP(&options.pause, "pause", "p", true, "Pause container during commit") + flags.StringVarP(&options.comment, "message", "m", "", "Commit message") + flags.StringVarP(&options.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") + + options.changes = opts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + + return cmd +} + +func runCommit(dockerCli command.Cli, options *commitOptions) error { + ctx := context.Background() + + name := options.container + reference := options.reference + + commitOptions := types.ContainerCommitOptions{ + Reference: reference, + Comment: options.comment, + Author: options.author, + Changes: options.changes.GetAll(), + Pause: options.pause, + } + + response, err := dockerCli.Client().ContainerCommit(ctx, name, commitOptions) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} diff --git a/cli/cli/command/container/cp.go b/cli/cli/command/container/cp.go new file mode 100644 index 00000000..ffb9a211 --- /dev/null +++ b/cli/cli/command/container/cp.go @@ -0,0 +1,304 @@ +package container + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type copyOptions struct { + source string + destination string + followLink bool + copyUIDGID bool +} + +type copyDirection int + +const ( + fromContainer copyDirection = 1 << iota + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool + copyUIDGID bool + sourcePath string + destPath string + container string +} + +// NewCopyCommand creates a new `docker cp` command +func NewCopyCommand(dockerCli command.Cli) *cobra.Command { + var opts copyOptions + + cmd := &cobra.Command{ + Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, + Short: "Copy files/folders between a container and the local filesystem", + Long: strings.Join([]string{ + "Copy files/folders between a container and the local filesystem\n", + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + if args[0] == "" { + return errors.New("source can not be empty") + } + if args[1] == "" { + return errors.New("destination can not be empty") + } + opts.source = args[0] + opts.destination = args[1] + return runCopy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") + flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)") + return cmd +} + +func runCopy(dockerCli command.Cli, opts copyOptions) error { + srcContainer, srcPath := splitCpArg(opts.source) + destContainer, destPath := splitCpArg(opts.destination) + + copyConfig := cpConfig{ + followLink: opts.followLink, + copyUIDGID: opts.copyUIDGID, + sourcePath: srcPath, + destPath: destPath, + } + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + copyConfig.container = srcContainer + } + if destContainer != "" { + direction |= toContainer + copyConfig.container = destContainer + } + + ctx := context.Background() + + switch direction { + case fromContainer: + return copyFromContainer(ctx, dockerCli, copyConfig) + case toContainer: + return copyToContainer(ctx, dockerCli, copyConfig) + case acrossContainers: + return errors.New("copying between containers is not supported") + default: + return errors.New("must specify at least one container source") + } +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + return archive.PreserveTrailingDotOrSeparator(absPath, localPath, filepath.Separator), nil +} + +func copyFromContainer(ctx context.Context, dockerCli command.Cli, copyConfig cpConfig) (err error) { + dstPath := copyConfig.destPath + srcPath := copyConfig.sourcePath + + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + client := dockerCli.Client() + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if copyConfig.followLink { + srcStat, err := client.ContainerStatPath(ctx, copyConfig.container, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := client.CopyFromContainer(ctx, copyConfig.container, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + _, err = io.Copy(dockerCli.Out(), content) + return err + } + + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +// In order to get the copy behavior right, we need to know information +// about both the source and destination. The API is a simple tar +// archive/extract API but we can use the stat info header about the +// destination to be more informed about exactly what the destination is. +func copyToContainer(ctx context.Context, dockerCli command.Cli, copyConfig cpConfig) (err error) { + srcPath := copyConfig.sourcePath + dstPath := copyConfig.destPath + + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + client := dockerCli.Client() + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := client.ContainerStatPath(ctx, copyConfig.container, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = client.ContainerStatPath(ctx, copyConfig.container, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return errors.Errorf("destination \"%s:%s\" must be a directory", copyConfig.container, dstPath) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, copyConfig.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + CopyUIDGID: copyConfig.copyUIDGID, + } + return client.CopyToContainer(ctx, copyConfig.container, resolvedDstPath, content, options) +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} diff --git a/cli/cli/command/container/cp_test.go b/cli/cli/command/container/cp_test.go new file mode 100644 index 00000000..67cdaf15 --- /dev/null +++ b/cli/cli/command/container/cp_test.go @@ -0,0 +1,192 @@ +package container + +import ( + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" + "gotest.tools/skip" +) + +func TestRunCopyWithInvalidArguments(t *testing.T) { + var testcases = []struct { + doc string + options copyOptions + expectedErr string + }{ + { + doc: "copy between container", + options: copyOptions{ + source: "first:/path", + destination: "second:/path", + }, + expectedErr: "copying between containers is not supported", + }, + { + doc: "copy without a container", + options: copyOptions{ + source: "./source", + destination: "./dest", + }, + expectedErr: "must specify at least one container source", + }, + } + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + err := runCopy(test.NewFakeCli(nil), testcase.options) + assert.Error(t, err, testcase.expectedErr) + }) + } +} + +func TestRunCopyFromContainerToStdout(t *testing.T) { + tarContent := "the tar content" + + fakeClient := &fakeClient{ + containerCopyFromFunc: func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + assert.Check(t, is.Equal("container", container)) + return ioutil.NopCloser(strings.NewReader(tarContent)), types.ContainerPathStat{}, nil + }, + } + options := copyOptions{source: "container:/path", destination: "-"} + cli := test.NewFakeCli(fakeClient) + err := runCopy(cli, options) + assert.NilError(t, err) + assert.Check(t, is.Equal(tarContent, cli.OutBuffer().String())) + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) +} + +func TestRunCopyFromContainerToFilesystem(t *testing.T) { + destDir := fs.NewDir(t, "cp-test", + fs.WithFile("file1", "content\n")) + defer destDir.Remove() + + fakeClient := &fakeClient{ + containerCopyFromFunc: func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + assert.Check(t, is.Equal("container", container)) + readCloser, err := archive.TarWithOptions(destDir.Path(), &archive.TarOptions{}) + return readCloser, types.ContainerPathStat{}, err + }, + } + options := copyOptions{source: "container:/path", destination: destDir.Path()} + cli := test.NewFakeCli(fakeClient) + err := runCopy(cli, options) + assert.NilError(t, err) + assert.Check(t, is.Equal("", cli.OutBuffer().String())) + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) + + content, err := ioutil.ReadFile(destDir.Join("file1")) + assert.NilError(t, err) + assert.Check(t, is.Equal("content\n", string(content))) +} + +func TestRunCopyFromContainerToFilesystemMissingDestinationDirectory(t *testing.T) { + destDir := fs.NewDir(t, "cp-test", + fs.WithFile("file1", "content\n")) + defer destDir.Remove() + + fakeClient := &fakeClient{ + containerCopyFromFunc: func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + assert.Check(t, is.Equal("container", container)) + readCloser, err := archive.TarWithOptions(destDir.Path(), &archive.TarOptions{}) + return readCloser, types.ContainerPathStat{}, err + }, + } + + options := copyOptions{ + source: "container:/path", + destination: destDir.Join("missing", "foo"), + } + cli := test.NewFakeCli(fakeClient) + err := runCopy(cli, options) + assert.ErrorContains(t, err, destDir.Join("missing")) +} + +func TestRunCopyToContainerFromFileWithTrailingSlash(t *testing.T) { + srcFile := fs.NewFile(t, t.Name()) + defer srcFile.Remove() + + options := copyOptions{ + source: srcFile.Path() + string(os.PathSeparator), + destination: "container:/path", + } + cli := test.NewFakeCli(&fakeClient{}) + err := runCopy(cli, options) + + expectedError := "not a directory" + if runtime.GOOS == "windows" { + expectedError = "The filename, directory name, or volume label syntax is incorrect" + } + assert.ErrorContains(t, err, expectedError) +} + +func TestRunCopyToContainerSourceDoesNotExist(t *testing.T) { + options := copyOptions{ + source: "/does/not/exist", + destination: "container:/path", + } + cli := test.NewFakeCli(&fakeClient{}) + err := runCopy(cli, options) + expected := "no such file or directory" + if runtime.GOOS == "windows" { + expected = "cannot find the file specified" + } + assert.ErrorContains(t, err, expected) +} + +func TestSplitCpArg(t *testing.T) { + var testcases = []struct { + doc string + path string + os string + expectedContainer string + expectedPath string + }{ + { + doc: "absolute path with colon", + os: "linux", + path: "/abs/path:withcolon", + expectedPath: "/abs/path:withcolon", + }, + { + doc: "relative path with colon", + path: "./relative:path", + expectedPath: "./relative:path", + }, + { + doc: "absolute path with drive", + os: "windows", + path: `d:\abs\path`, + expectedPath: `d:\abs\path`, + }, + { + doc: "no separator", + path: "relative/path", + expectedPath: "relative/path", + }, + { + doc: "with separator", + path: "container:/opt/foo", + expectedPath: "/opt/foo", + expectedContainer: "container", + }, + } + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + skip.If(t, testcase.os != "" && testcase.os != runtime.GOOS) + + container, path := splitCpArg(testcase.path) + assert.Check(t, is.Equal(testcase.expectedContainer, container)) + assert.Check(t, is.Equal(testcase.expectedPath, path)) + }) + } +} diff --git a/cli/cli/command/container/create.go b/cli/cli/command/container/create.go new file mode 100644 index 00000000..62d1a088 --- /dev/null +++ b/cli/cli/command/container/create.go @@ -0,0 +1,229 @@ +package container + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type createOptions struct { + name string + platform string + untrusted bool +} + +// NewCreateCommand creates a new cobra.Command for `docker create` +func NewCreateCommand(dockerCli command.Cli) *cobra.Command { + var opts createOptions + var copts *containerOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddPlatformFlag(flags, &opts.platform) + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + copts = addFlags(flags) + return cmd +} + +func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, opts *createOptions, copts *containerOptions) error { + containerConfig, err := parse(flags, copts) + if err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + response, err := createContainer(context.Background(), dockerCli, containerConfig, opts) + if err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} + +func pullImage(ctx context.Context, dockerCli command.Cli, image string, platform string, out io.Writer) error { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + RegistryAuth: encodedAuth, + Platform: platform, + } + + responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream( + responseBody, + out, + dockerCli.Out().FD(), + dockerCli.Out().IsTerminal(), + nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func (cid *cidFile) Close() error { + if cid.file == nil { + return nil + } + cid.file.Close() + + if cid.written { + return nil + } + if err := os.Remove(cid.path); err != nil { + return errors.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if cid.file == nil { + return nil + } + if _, err := cid.file.Write([]byte(id)); err != nil { + return errors.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func newCIDFile(path string) (*cidFile, error) { + if path == "" { + return &cidFile{}, nil + } + if _, err := os.Stat(path); err == nil { + return nil, errors.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, errors.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.ContainerCreateCreatedBody, error) { + config := containerConfig.Config + hostConfig := containerConfig.HostConfig + networkingConfig := containerConfig.NetworkingConfig + stderr := dockerCli.Err() + + var ( + trustedRef reference.Canonical + namedRef reference.Named + ) + + containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile) + if err != nil { + return nil, err + } + defer containerIDFile.Close() + + ref, err := reference.ParseAnyReference(config.Image) + if err != nil { + return nil, err + } + if named, ok := ref.(reference.Named); ok { + namedRef = reference.TagNameOnly(named) + + if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.untrusted { + var err error + trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil) + if err != nil { + return nil, err + } + config.Image = reference.FamiliarString(trustedRef) + } + } + + //create the container + response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, opts.name) + + //if image not found try to pull it + if err != nil { + if apiclient.IsErrNotFound(err) && namedRef != nil { + fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef)) + + // we don't want to write to stdout anything apart from container.ID + if err := pullImage(ctx, dockerCli, config.Image, opts.platform, stderr); err != nil { + return nil, err + } + if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil { + if err := image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, opts.name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(stderr, "WARNING: %s\n", warning) + } + err = containerIDFile.Write(response.ID) + return &response, err +} diff --git a/cli/cli/command/container/create_test.go b/cli/cli/command/container/create_test.go new file mode 100644 index 00000000..1df09c7d --- /dev/null +++ b/cli/cli/command/container/create_test.go @@ -0,0 +1,172 @@ +package container + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestCIDFileNoOPWithNoFilename(t *testing.T) { + file, err := newCIDFile("") + assert.NilError(t, err) + assert.DeepEqual(t, &cidFile{}, file, cmp.AllowUnexported(cidFile{})) + + assert.NilError(t, file.Write("id")) + assert.NilError(t, file.Close()) +} + +func TestNewCIDFileWhenFileAlreadyExists(t *testing.T) { + tempfile := fs.NewFile(t, "test-cid-file") + defer tempfile.Remove() + + _, err := newCIDFile(tempfile.Path()) + assert.ErrorContains(t, err, "Container ID file found") +} + +func TestCIDFileCloseWithNoWrite(t *testing.T) { + tempdir := fs.NewDir(t, "test-cid-file") + defer tempdir.Remove() + + path := tempdir.Join("cidfile") + file, err := newCIDFile(path) + assert.NilError(t, err) + assert.Check(t, is.Equal(file.path, path)) + + assert.NilError(t, file.Close()) + _, err = os.Stat(path) + assert.Check(t, os.IsNotExist(err)) +} + +func TestCIDFileCloseWithWrite(t *testing.T) { + tempdir := fs.NewDir(t, "test-cid-file") + defer tempdir.Remove() + + path := tempdir.Join("cidfile") + file, err := newCIDFile(path) + assert.NilError(t, err) + + content := "id" + assert.NilError(t, file.Write(content)) + + actual, err := ioutil.ReadFile(path) + assert.NilError(t, err) + assert.Check(t, is.Equal(content, string(actual))) + + assert.NilError(t, file.Close()) + _, err = os.Stat(path) + assert.NilError(t, err) +} + +func TestCreateContainerPullsImageIfMissing(t *testing.T) { + imageName := "does-not-exist-locally" + responseCounter := 0 + containerID := "abcdef" + + client := &fakeClient{ + createContainerFunc: func( + config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + defer func() { responseCounter++ }() + switch responseCounter { + case 0: + return container.ContainerCreateCreatedBody{}, fakeNotFound{} + case 1: + return container.ContainerCreateCreatedBody{ID: containerID}, nil + default: + return container.ContainerCreateCreatedBody{}, errors.New("unexpected") + } + }, + imageCreateFunc: func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: "http://indexserver"}, nil + }, + } + cli := test.NewFakeCli(client) + config := &containerConfig{ + Config: &container.Config{ + Image: imageName, + }, + HostConfig: &container.HostConfig{}, + } + body, err := createContainer(context.Background(), cli, config, &createOptions{ + name: "name", + platform: runtime.GOOS, + untrusted: true, + }) + assert.NilError(t, err) + expected := container.ContainerCreateCreatedBody{ID: containerID} + assert.Check(t, is.DeepEqual(expected, *body)) + stderr := cli.ErrBuffer().String() + assert.Check(t, is.Contains(stderr, "Unable to find image 'does-not-exist-locally:latest' locally")) +} + +func TestNewCreateCommandWithContentTrustErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + name: "offline-notary-server", + notaryFunc: notary.GetOfflineNotaryRepository, + expectedError: "client is offline", + args: []string{"image:tag"}, + }, + { + name: "uninitialized-notary-server", + notaryFunc: notary.GetUninitializedNotaryRepository, + expectedError: "remote trust data does not exist", + args: []string{"image:tag"}, + }, + { + name: "empty-notary-server", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + expectedError: "No valid trust data for tag", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{}, fmt.Errorf("shouldn't try to pull image") + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := NewCreateCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.ErrorContains(t, err, tc.expectedError) + } +} + +type fakeNotFound struct{} + +func (f fakeNotFound) NotFound() bool { return true } +func (f fakeNotFound) Error() string { return "error fake not found" } diff --git a/cli/cli/command/container/diff.go b/cli/cli/command/container/diff.go new file mode 100644 index 00000000..39b71c80 --- /dev/null +++ b/cli/cli/command/container/diff.go @@ -0,0 +1,47 @@ +package container + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type diffOptions struct { + container string +} + +// NewDiffCommand creates a new cobra.Command for `docker diff` +func NewDiffCommand(dockerCli command.Cli) *cobra.Command { + var opts diffOptions + + return &cobra.Command{ + Use: "diff CONTAINER", + Short: "Inspect changes to files or directories on a container's filesystem", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runDiff(dockerCli, &opts) + }, + } +} + +func runDiff(dockerCli command.Cli, opts *diffOptions) error { + if opts.container == "" { + return errors.New("Container name cannot be empty") + } + ctx := context.Background() + + changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) + if err != nil { + return err + } + diffCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewDiffFormat("{{.Type}} {{.Path}}"), + } + return formatter.DiffWrite(diffCtx, changes) +} diff --git a/cli/cli/command/container/exec.go b/cli/cli/command/container/exec.go new file mode 100644 index 00000000..c96f4055 --- /dev/null +++ b/cli/cli/command/container/exec.go @@ -0,0 +1,214 @@ +package container + +import ( + "context" + "fmt" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type execOptions struct { + detachKeys string + interactive bool + tty bool + detach bool + user string + privileged bool + env opts.ListOpts + workdir string + container string + command []string +} + +func newExecOptions() execOptions { + return execOptions{env: opts.NewListOpts(opts.ValidateEnv)} +} + +// NewExecCommand creates a new cobra.Command for `docker exec` +func NewExecCommand(dockerCli command.Cli) *cobra.Command { + options := newExecOptions() + + cmd := &cobra.Command{ + Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", + Short: "Run a command in a running container", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.container = args[0] + options.command = args[1:] + return runExec(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVarP(&options.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") + flags.BoolVarP(&options.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.BoolVarP(&options.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.BoolVarP(&options.detach, "detach", "d", false, "Detached mode: run command in the background") + flags.StringVarP(&options.user, "user", "u", "", "Username or UID (format: [:])") + flags.BoolVarP(&options.privileged, "privileged", "", false, "Give extended privileges to the command") + flags.VarP(&options.env, "env", "e", "Set environment variables") + flags.SetAnnotation("env", "version", []string{"1.25"}) + flags.StringVarP(&options.workdir, "workdir", "w", "", "Working directory inside the container") + flags.SetAnnotation("workdir", "version", []string{"1.35"}) + + return cmd +} + +func runExec(dockerCli command.Cli, options execOptions) error { + execConfig := parseExec(options, dockerCli.ConfigFile()) + ctx := context.Background() + client := dockerCli.Client() + + // We need to check the tty _before_ we do the ContainerExecCreate, because + // otherwise if we error out we will leak execIDs on the server (and + // there's no easy way to clean those up). But also in order to make "not + // exist" errors take precedence we do a dummy inspect first. + if _, err := client.ContainerInspect(ctx, options.container); err != nil { + return err + } + if !execConfig.Detach { + if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } + + response, err := client.ContainerExecCreate(ctx, options.container, *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + return errors.New("exec ID empty") + } + + if execConfig.Detach { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + return client.ContainerExecStart(ctx, execID, execStartCheck) + } + return interactiveExec(ctx, dockerCli, execConfig, execID) +} + +func interactiveExec(ctx context.Context, dockerCli command.Cli, execConfig *types.ExecConfig, execID string) error { + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + ) + + if execConfig.AttachStdin { + in = dockerCli.In() + } + if execConfig.AttachStdout { + out = dockerCli.Out() + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = dockerCli.Out() + } else { + stderr = dockerCli.Err() + } + } + + client := dockerCli.Client() + execStartCheck := types.ExecStartCheck{ + Tty: execConfig.Tty, + } + resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck) + if err != nil { + return err + } + defer resp.Close() + + errCh := make(chan error, 1) + + go func() { + defer close(errCh) + errCh <- func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: out, + errorStream: stderr, + resp: resp, + tty: execConfig.Tty, + detachKeys: execConfig.DetachKeys, + } + + return streamer.stream(ctx) + }() + }() + + if execConfig.Tty && dockerCli.In().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { + fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + return getExecExitStatus(ctx, client, execID) +} + +func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error { + resp, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !apiclient.IsErrConnectionFailed(err) { + return err + } + return cli.StatusError{StatusCode: -1} + } + status := resp.ExitCode + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +// parseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +func parseExec(opts execOptions, configFile *configfile.ConfigFile) *types.ExecConfig { + execConfig := &types.ExecConfig{ + User: opts.user, + Privileged: opts.privileged, + Tty: opts.tty, + Cmd: opts.command, + Detach: opts.detach, + Env: opts.env.GetAll(), + WorkingDir: opts.workdir, + } + + // If -d is not set, attach to everything by default + if !opts.detach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if opts.interactive { + execConfig.AttachStdin = true + } + } + + if opts.detachKeys != "" { + execConfig.DetachKeys = opts.detachKeys + } else { + execConfig.DetachKeys = configFile.DetachKeys + } + return execConfig +} diff --git a/cli/cli/command/container/exec_test.go b/cli/cli/command/container/exec_test.go new file mode 100644 index 00000000..0c6e2614 --- /dev/null +++ b/cli/cli/command/container/exec_test.go @@ -0,0 +1,227 @@ +package container + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func withDefaultOpts(options execOptions) execOptions { + options.env = opts.NewListOpts(opts.ValidateEnv) + if len(options.command) == 0 { + options.command = []string{"command"} + } + return options +} + +func TestParseExec(t *testing.T) { + testcases := []struct { + options execOptions + configFile configfile.ConfigFile + expected types.ExecConfig + }{ + { + expected: types.ExecConfig{ + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + options: withDefaultOpts(execOptions{}), + }, + { + expected: types.ExecConfig{ + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + options: withDefaultOpts(execOptions{ + command: []string{"command1", "command2"}, + }), + }, + { + options: withDefaultOpts(execOptions{ + interactive: true, + tty: true, + user: "uid", + }), + expected: types.ExecConfig{ + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Cmd: []string{"command"}, + }, + }, + { + options: withDefaultOpts(execOptions{detach: true}), + expected: types.ExecConfig{ + Detach: true, + Cmd: []string{"command"}, + }, + }, + { + options: withDefaultOpts(execOptions{ + tty: true, + interactive: true, + detach: true, + }), + expected: types.ExecConfig{ + Detach: true, + Tty: true, + Cmd: []string{"command"}, + }, + }, + { + options: withDefaultOpts(execOptions{detach: true}), + configFile: configfile.ConfigFile{DetachKeys: "de"}, + expected: types.ExecConfig{ + Cmd: []string{"command"}, + DetachKeys: "de", + Detach: true, + }, + }, + { + options: withDefaultOpts(execOptions{ + detach: true, + detachKeys: "ab", + }), + configFile: configfile.ConfigFile{DetachKeys: "de"}, + expected: types.ExecConfig{ + Cmd: []string{"command"}, + DetachKeys: "ab", + Detach: true, + }, + }, + } + + for _, testcase := range testcases { + execConfig := parseExec(testcase.options, &testcase.configFile) + assert.Check(t, is.DeepEqual(testcase.expected, *execConfig)) + } +} + +func TestRunExec(t *testing.T) { + var testcases = []struct { + doc string + options execOptions + client fakeClient + expectedError string + expectedOut string + expectedErr string + }{ + { + doc: "successful detach", + options: withDefaultOpts(execOptions{ + container: "thecontainer", + detach: true, + }), + client: fakeClient{execCreateFunc: execCreateWithID}, + }, + { + doc: "inspect error", + options: newExecOptions(), + client: fakeClient{ + inspectFunc: func(string) (types.ContainerJSON, error) { + return types.ContainerJSON{}, errors.New("failed inspect") + }, + }, + expectedError: "failed inspect", + }, + { + doc: "missing exec ID", + options: newExecOptions(), + expectedError: "exec ID empty", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + cli := test.NewFakeCli(&testcase.client) + + err := runExec(cli, testcase.options) + if testcase.expectedError != "" { + assert.ErrorContains(t, err, testcase.expectedError) + } else { + if !assert.Check(t, err) { + return + } + } + assert.Check(t, is.Equal(testcase.expectedOut, cli.OutBuffer().String())) + assert.Check(t, is.Equal(testcase.expectedErr, cli.ErrBuffer().String())) + }) + } +} + +func execCreateWithID(_ string, _ types.ExecConfig) (types.IDResponse, error) { + return types.IDResponse{ID: "execid"}, nil +} + +func TestGetExecExitStatus(t *testing.T) { + execID := "the exec id" + expecatedErr := errors.New("unexpected error") + + testcases := []struct { + inspectError error + exitCode int + expectedError error + }{ + { + inspectError: nil, + exitCode: 0, + }, + { + inspectError: expecatedErr, + expectedError: expecatedErr, + }, + { + exitCode: 15, + expectedError: cli.StatusError{StatusCode: 15}, + }, + } + + for _, testcase := range testcases { + client := &fakeClient{ + execInspectFunc: func(id string) (types.ContainerExecInspect, error) { + assert.Check(t, is.Equal(execID, id)) + return types.ContainerExecInspect{ExitCode: testcase.exitCode}, testcase.inspectError + }, + } + err := getExecExitStatus(context.Background(), client, execID) + assert.Check(t, is.Equal(testcase.expectedError, err)) + } +} + +func TestNewExecCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + containerInspectFunc func(img string) (types.ContainerJSON, error) + }{ + { + name: "client-error", + args: []string{"5cb5bb5e4a3b", "-t", "-i", "bash"}, + expectedError: "something went wrong", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc}) + cmd := NewExecCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} diff --git a/cli/cli/command/container/export.go b/cli/cli/command/container/export.go new file mode 100644 index 00000000..f0f67373 --- /dev/null +++ b/cli/cli/command/container/export.go @@ -0,0 +1,58 @@ +package container + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type exportOptions struct { + container string + output string +} + +// NewExportCommand creates a new `docker export` command +func NewExportCommand(dockerCli command.Cli) *cobra.Command { + var opts exportOptions + + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTAINER", + Short: "Export a container's filesystem as a tar archive", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runExport(dockerCli command.Cli, opts exportOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ContainerExport(context.Background(), opts.container) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/cli/cli/command/container/hijack.go b/cli/cli/command/container/hijack.go new file mode 100644 index 00000000..78fbebe0 --- /dev/null +++ b/cli/cli/command/container/hijack.go @@ -0,0 +1,208 @@ +package container + +import ( + "context" + "fmt" + "io" + "runtime" + "sync" + + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/sirupsen/logrus" +) + +// The default escape key sequence: ctrl-p, ctrl-q +// TODO: This could be moved to `pkg/term`. +var defaultEscapeKeys = []byte{16, 17} + +// A hijackedIOStreamer handles copying input to and output from streams to the +// connection. +type hijackedIOStreamer struct { + streams command.Streams + inputStream io.ReadCloser + outputStream io.Writer + errorStream io.Writer + + resp types.HijackedResponse + + tty bool + detachKeys string +} + +// stream handles setting up the IO and then begins streaming stdin/stdout +// to/from the hijacked connection, blocking until it is either done reading +// output, the user inputs the detach key sequence when in TTY mode, or when +// the given context is cancelled. +func (h *hijackedIOStreamer) stream(ctx context.Context) error { + restoreInput, err := h.setupInput() + if err != nil { + return fmt.Errorf("unable to setup input stream: %s", err) + } + + defer restoreInput() + + outputDone := h.beginOutputStream(restoreInput) + inputDone, detached := h.beginInputStream(restoreInput) + + select { + case err := <-outputDone: + return err + case <-inputDone: + // Input stream has closed. + if h.outputStream != nil || h.errorStream != nil { + // Wait for output to complete streaming. + select { + case err := <-outputDone: + return err + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + case err := <-detached: + // Got a detach key sequence. + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +func (h *hijackedIOStreamer) setupInput() (restore func(), err error) { + if h.inputStream == nil || !h.tty { + // No need to setup input TTY. + // The restore func is a nop. + return func() {}, nil + } + + if err := setRawTerminal(h.streams); err != nil { + return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err) + } + + // Use sync.Once so we may call restore multiple times but ensure we + // only restore the terminal once. + var restoreOnce sync.Once + restore = func() { + restoreOnce.Do(func() { + restoreTerminal(h.streams, h.inputStream) + }) + } + + // Wrap the input to detect detach escape sequence. + // Use default escape keys if an invalid sequence is given. + escapeKeys := defaultEscapeKeys + if h.detachKeys != "" { + customEscapeKeys, err := term.ToBytes(h.detachKeys) + if err != nil { + logrus.Warnf("invalid detach escape keys, using default: %s", err) + } else { + escapeKeys = customEscapeKeys + } + } + + h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close) + + return restore, nil +} + +func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error { + if h.outputStream == nil && h.errorStream == nil { + // There is no need to copy output. + return nil + } + + outputDone := make(chan error) + go func() { + var err error + + // When TTY is ON, use regular copy + if h.outputStream != nil && h.tty { + _, err = io.Copy(h.outputStream, h.resp.Reader) + // We should restore the terminal as soon as possible + // once the connection ends so any following print + // messages will be in normal type. + restoreInput() + } else { + _, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader) + } + + logrus.Debug("[hijack] End of stdout") + + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + } + + outputDone <- err + }() + + return outputDone +} + +func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) { + inputDone := make(chan struct{}) + detached := make(chan error) + + go func() { + if h.inputStream != nil { + _, err := io.Copy(h.resp.Conn, h.inputStream) + // We should restore the terminal as soon as possible + // once the connection ends so any following print + // messages will be in normal type. + restoreInput() + + logrus.Debug("[hijack] End of stdin") + + if _, ok := err.(term.EscapeError); ok { + detached <- err + return + } + + if err != nil { + // This error will also occur on the receive + // side (from stdout) where it will be + // propagated back to the caller. + logrus.Debugf("Error sendStdin: %s", err) + } + } + + if err := h.resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + + close(inputDone) + }() + + return inputDone, detached +} + +func setRawTerminal(streams command.Streams) error { + if err := streams.In().SetRawTerminal(); err != nil { + return err + } + return streams.Out().SetRawTerminal() +} + +// nolint: unparam +func restoreTerminal(streams command.Streams, in io.Closer) error { + streams.In().RestoreTerminal() + streams.Out().RestoreTerminal() + // WARNING: DO NOT REMOVE THE OS CHECKS !!! + // For some reason this Close call blocks on darwin.. + // As the client exits right after, simply discard the close + // until we find a better solution. + // + // This can also cause the client on Windows to get stuck in Win32 CloseHandle() + // in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442 + // Tracked internally at Microsoft by VSO #11352156. In the + // Windows case, you hit this if you are using the native/v2 console, + // not the "legacy" console, and you start the client in a new window. eg + // `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar` + // will hang. Remove start, and it won't repro. + if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" { + return in.Close() + } + return nil +} diff --git a/cli/cli/command/container/inspect.go b/cli/cli/command/container/inspect.go new file mode 100644 index 00000000..4f50e2a0 --- /dev/null +++ b/cli/cli/command/container/inspect.go @@ -0,0 +1,47 @@ +package container + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + size bool + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker container inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Display detailed information on one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ContainerInspectWithRaw(ctx, ref, opts.size) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/cli/cli/command/container/kill.go b/cli/cli/command/container/kill.go new file mode 100644 index 00000000..feedbc01 --- /dev/null +++ b/cli/cli/command/container/kill.go @@ -0,0 +1,56 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type killOptions struct { + signal string + + containers []string +} + +// NewKillCommand creates a new cobra.Command for `docker kill` +func NewKillCommand(dockerCli command.Cli) *cobra.Command { + var opts killOptions + + cmd := &cobra.Command{ + Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Kill one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runKill(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") + return cmd +} + +func runKill(dockerCli command.Cli, opts *killOptions) error { + var errs []string + ctx := context.Background() + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + return dockerCli.Client().ContainerKill(ctx, container, opts.signal) + }) + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintln(dockerCli.Out(), name) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/list.go b/cli/cli/command/container/list.go new file mode 100644 index 00000000..a79507e7 --- /dev/null +++ b/cli/cli/command/container/list.go @@ -0,0 +1,140 @@ +package container + +import ( + "context" + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type psOptions struct { + quiet bool + size bool + all bool + noTrunc bool + nLatest bool + last int + format string + filter opts.FilterOpt +} + +// NewPsCommand creates a new cobra.Command for `docker ps` +func NewPsCommand(dockerCli command.Cli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS]", + Short: "List containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPs(dockerCli, &options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display numeric IDs") + flags.BoolVarP(&options.size, "size", "s", false, "Display total file sizes") + flags.BoolVarP(&options.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVarP(&options.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") + flags.IntVarP(&options.last, "last", "n", -1, "Show n last created containers (includes all states)") + flags.StringVarP(&options.format, "format", "", "", "Pretty-print containers using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewPsCommand(dockerCli) + cmd.Aliases = []string{"ps", "list"} + cmd.Use = "ls [OPTIONS]" + return &cmd +} + +// listOptionsProcessor is used to set any container list options which may only +// be embedded in the format template. +// This is passed directly into tmpl.Execute in order to allow the preprocessor +// to set any list options that were not provided by flags (e.g. `.Size`). +// It is using a `map[string]bool` so that unknown fields passed into the +// template format do not cause errors. These errors will get picked up when +// running through the actual template processor. +type listOptionsProcessor map[string]bool + +// Size sets the size of the map when called by a template execution. +func (o listOptionsProcessor) Size() bool { + o["size"] = true + return true +} + +// Label is needed here as it allows the correct pre-processing +// because Label() is a method with arguments +func (o listOptionsProcessor) Label(name string) string { + return "" +} + +func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { + options := &types.ContainerListOptions{ + All: opts.all, + Limit: opts.last, + Size: opts.size, + Filters: opts.filter.Value(), + } + + if opts.nLatest && opts.last == -1 { + options.Limit = 1 + } + + tmpl, err := templates.Parse(opts.format) + + if err != nil { + return nil, err + } + + optionsProcessor := listOptionsProcessor{} + // This shouldn't error out but swallowing the error makes it harder + // to track down if preProcessor issues come up. Ref #24696 + if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { + return nil, err + } + // At the moment all we need is to capture .Size for preprocessor + options.Size = opts.size || optionsProcessor["size"] + + return options, nil +} + +func runPs(dockerCli command.Cli, options *psOptions) error { + ctx := context.Background() + + listOptions, err := buildContainerListOptions(options) + if err != nil { + return err + } + + containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PsFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().PsFormat + } else { + format = formatter.TableFormatKey + } + } + + containerCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewContainerFormat(format, options.quiet, listOptions.Size), + Trunc: !options.noTrunc, + } + return formatter.ContainerWrite(containerCtx, containers) +} diff --git a/cli/cli/command/container/list_test.go b/cli/cli/command/container/list_test.go new file mode 100644 index 00000000..2bc1949a --- /dev/null +++ b/cli/cli/command/container/list_test.go @@ -0,0 +1,164 @@ +package container + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestContainerListErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + containerListFunc func(types.ContainerListOptions) ([]types.Container, error) + expectedError string + }{ + { + flags: map[string]string{ + "format": "{{invalid}}", + }, + expectedError: `function "invalid" not defined`, + }, + { + flags: map[string]string{ + "format": "{{join}}", + }, + expectedError: `wrong number of args for join`, + }, + { + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return nil, fmt.Errorf("error listing containers") + }, + expectedError: "error listing containers", + }, + } + for _, tc := range testCases { + cmd := newListCommand( + test.NewFakeCli(&fakeClient{ + containerListFunc: tc.containerListFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestContainerListWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1"), + *Container("c2", WithName("foo")), + *Container("c3", WithPort(80, 80, TCP), WithPort(81, 81, TCP), WithPort(82, 82, TCP)), + *Container("c4", WithPort(81, 81, UDP)), + *Container("c5", WithPort(82, 82, IP("8.8.8.8"), TCP)), + }, nil + }, + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-without-format.golden") +} + +func TestContainerListNoTrunc(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1"), + *Container("c2", WithName("foo/bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("no-trunc", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-without-format-no-trunc.golden") +} + +// Test for GitHub issue docker/docker#21772 +func TestContainerListNamesMultipleTime(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1"), + *Container("c2", WithName("foo/bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{.Names}} {{.Names}}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-format-name-name.golden") +} + +// Test for GitHub issue docker/docker#30291 +func TestContainerListFormatTemplateWithArg(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1", WithLabel("some.label", "value")), + *Container("c2", WithName("foo/bar"), WithLabel("foo", "bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", `{{.Names}} {{.Label "some.label"}}`) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-format-with-arg.golden") +} + +func TestContainerListFormatSizeSetsOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(options types.ContainerListOptions) ([]types.Container, error) { + assert.Check(t, options.Size) + return []types.Container{}, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", `{{.Size}}`) + assert.NilError(t, cmd.Execute()) +} + +func TestContainerListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1", WithLabel("some.label", "value")), + *Container("c2", WithName("foo/bar"), WithLabel("foo", "bar")), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + PsFormat: "{{ .Names }} {{ .Image }} {{ .Labels }}", + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-with-config-format.golden") +} + +func TestContainerListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1", WithLabel("some.label", "value")), + *Container("c2", WithName("foo/bar"), WithLabel("foo", "bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{ .Names }} {{ .Image }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-with-format.golden") +} diff --git a/cli/cli/command/container/logs.go b/cli/cli/command/container/logs.go new file mode 100644 index 00000000..b5b526f2 --- /dev/null +++ b/cli/cli/command/container/logs.go @@ -0,0 +1,80 @@ +package container + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +type logsOptions struct { + follow bool + since string + until string + timestamps bool + details bool + tail string + + container string +} + +// NewLogsCommand creates a new cobra.Command for `docker logs` +func NewLogsCommand(dockerCli command.Cli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] CONTAINER", + Short: "Fetch the logs of a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runLogs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.StringVar(&opts.until, "until", "", "Show logs before a timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.SetAnnotation("until", "version", []string{"1.35"}) + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli command.Cli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Until: opts.until, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) + if err != nil { + return err + } + defer responseBody.Close() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if c.Config.Tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + } else { + _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) + } + return err +} diff --git a/cli/cli/command/container/logs_test.go b/cli/cli/command/container/logs_test.go new file mode 100644 index 00000000..a618ad5e --- /dev/null +++ b/cli/cli/command/container/logs_test.go @@ -0,0 +1,62 @@ +package container + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +var logFn = func(expectedOut string) func(string, types.ContainerLogsOptions) (io.ReadCloser, error) { + return func(container string, opts types.ContainerLogsOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader(expectedOut)), nil + } +} + +func TestRunLogs(t *testing.T) { + inspectFn := func(containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{ + Config: &container.Config{Tty: true}, + ContainerJSONBase: &types.ContainerJSONBase{State: &types.ContainerState{Running: false}}, + }, nil + } + + var testcases = []struct { + doc string + options *logsOptions + client fakeClient + expectedError string + expectedOut string + expectedErr string + }{ + { + doc: "successful logs", + expectedOut: "foo", + options: &logsOptions{}, + client: fakeClient{logFunc: logFn("foo"), inspectFunc: inspectFn}, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + cli := test.NewFakeCli(&testcase.client) + + err := runLogs(cli, testcase.options) + if testcase.expectedError != "" { + assert.ErrorContains(t, err, testcase.expectedError) + } else { + if !assert.Check(t, err) { + return + } + } + assert.Check(t, is.Equal(testcase.expectedOut, cli.OutBuffer().String())) + assert.Check(t, is.Equal(testcase.expectedErr, cli.ErrBuffer().String())) + }) + } +} diff --git a/cli/cli/command/container/opts.go b/cli/cli/command/container/opts.go new file mode 100644 index 00000000..b8ff5e4a --- /dev/null +++ b/cli/cli/command/container/opts.go @@ -0,0 +1,836 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +var ( + deviceCgroupRuleRegexp = regexp.MustCompile(`^[acb] ([0-9]+|\*):([0-9]+|\*) [rwm]{1,3}$`) +) + +// containerOptions is a data object with all the options for creating a container +type containerOptions struct { + attach opts.ListOpts + volumes opts.ListOpts + tmpfs opts.ListOpts + mounts opts.MountOpt + blkioWeightDevice opts.WeightdeviceOpt + deviceReadBps opts.ThrottledeviceOpt + deviceWriteBps opts.ThrottledeviceOpt + links opts.ListOpts + aliases opts.ListOpts + linkLocalIPs opts.ListOpts + deviceReadIOps opts.ThrottledeviceOpt + deviceWriteIOps opts.ThrottledeviceOpt + env opts.ListOpts + labels opts.ListOpts + deviceCgroupRules opts.ListOpts + devices opts.ListOpts + ulimits *opts.UlimitOpt + sysctls *opts.MapOpts + publish opts.ListOpts + expose opts.ListOpts + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOptions opts.ListOpts + extraHosts opts.ListOpts + volumesFrom opts.ListOpts + envFile opts.ListOpts + capAdd opts.ListOpts + capDrop opts.ListOpts + groupAdd opts.ListOpts + securityOpt opts.ListOpts + storageOpt opts.ListOpts + labelsFile opts.ListOpts + loggingOpts opts.ListOpts + privileged bool + pidMode string + utsMode string + usernsMode string + publishAll bool + stdin bool + tty bool + oomKillDisable bool + oomScoreAdj int + containerIDFile string + entrypoint string + hostname string + memory opts.MemBytes + memoryReservation opts.MemBytes + memorySwap opts.MemSwapBytes + kernelMemory opts.MemBytes + user string + workingDir string + cpuCount int64 + cpuShares int64 + cpuPercent int64 + cpuPeriod int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpuQuota int64 + cpus opts.NanoCPUs + cpusetCpus string + cpusetMems string + blkioWeight uint16 + ioMaxBandwidth opts.MemBytes + ioMaxIOps uint64 + swappiness int64 + netMode string + macAddress string + ipv4Address string + ipv6Address string + ipcMode string + pidsLimit int64 + restartPolicy string + readonlyRootfs bool + loggingDriver string + cgroupParent string + volumeDriver string + stopSignal string + stopTimeout int + isolation string + shmSize opts.MemBytes + noHealthcheck bool + healthCmd string + healthInterval time.Duration + healthTimeout time.Duration + healthStartPeriod time.Duration + healthRetries int + runtime string + autoRemove bool + init bool + + Image string + Args []string +} + +// addFlags adds all command line flags that will be used by parse to the FlagSet +func addFlags(flags *pflag.FlagSet) *containerOptions { + copts := &containerOptions{ + aliases: opts.NewListOpts(nil), + attach: opts.NewListOpts(validateAttach), + blkioWeightDevice: opts.NewWeightdeviceOpt(opts.ValidateWeightDevice), + capAdd: opts.NewListOpts(nil), + capDrop: opts.NewListOpts(nil), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOptions: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + deviceCgroupRules: opts.NewListOpts(validateDeviceCgroupRule), + deviceReadBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), + deviceReadIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), + deviceWriteBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), + deviceWriteIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), + devices: opts.NewListOpts(validateDevice), + env: opts.NewListOpts(opts.ValidateEnv), + envFile: opts.NewListOpts(nil), + expose: opts.NewListOpts(nil), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + groupAdd: opts.NewListOpts(nil), + labels: opts.NewListOpts(nil), + labelsFile: opts.NewListOpts(nil), + linkLocalIPs: opts.NewListOpts(nil), + links: opts.NewListOpts(opts.ValidateLink), + loggingOpts: opts.NewListOpts(nil), + publish: opts.NewListOpts(nil), + securityOpt: opts.NewListOpts(nil), + storageOpt: opts.NewListOpts(nil), + sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), + tmpfs: opts.NewListOpts(nil), + ulimits: opts.NewUlimitOpt(nil), + volumes: opts.NewListOpts(nil), + volumesFrom: opts.NewListOpts(nil), + } + + // General purpose flags + flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") + flags.Var(&copts.deviceCgroupRules, "device-cgroup-rule", "Add a rule to the cgroup allowed devices list") + flags.Var(&copts.devices, "device", "Add a host device to the container") + flags.VarP(&copts.env, "env", "e", "Set environment variables") + flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") + flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") + flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") + flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") + flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") + flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") + flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") + flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") + flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, "Signal to stop a container") + flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") + flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) + flags.Var(copts.sysctls, "sysctl", "Sysctl options") + flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.Var(copts.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") + flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") + flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") + + // Security + flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") + flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") + flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") + flags.Var(&copts.securityOpt, "security-opt", "Security Options") + flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") + + // Network and port publishing flag + flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.Var(&copts.dns, "dns", "Set custom DNS servers") + // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. + // This is to be consistent with service create/update + flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") + flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") + flags.MarkHidden("dns-opt") + flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") + flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") + flags.StringVar(&copts.ipv4Address, "ip", "", "IPv4 address (e.g., 172.30.100.104)") + flags.StringVar(&copts.ipv6Address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") + flags.Var(&copts.links, "link", "Add link to another container") + flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") + flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g., 92:d0:c6:0a:29:33)") + flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") + flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") + // We allow for both "--net" and "--network", although the latter is the recommended way. + flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") + flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") + flags.MarkHidden("net") + // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. + flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") + flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") + flags.MarkHidden("net-alias") + + // Logging and storage + flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") + flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") + flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") + flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") + flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") + flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") + flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") + flags.Var(&copts.mounts, "mount", "Attach a filesystem mount to the container") + + // Health-checking + flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") + flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ms|s|m|h) (default 0s)") + flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") + flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ms|s|m|h) (default 0s)") + flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)") + flags.SetAnnotation("health-start-period", "version", []string{"1.29"}) + flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") + + // Resource management + flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") + flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") + flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") + flags.SetAnnotation("cpu-count", "ostype", []string{"windows"}) + flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") + flags.SetAnnotation("cpu-percent", "ostype", []string{"windows"}) + flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") + flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) + flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") + flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) + flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Var(&copts.cpus, "cpus", "Number of CPUs") + flags.SetAnnotation("cpus", "version", []string{"1.25"}) + flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") + flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") + flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") + flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") + flags.Var(&copts.ioMaxBandwidth, "io-maxbandwidth", "Maximum IO bandwidth limit for the system drive (Windows only)") + flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"}) + flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") + flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"}) + flags.Var(&copts.kernelMemory, "kernel-memory", "Kernel memory limit") + flags.VarP(&copts.memory, "memory", "m", "Memory limit") + flags.Var(&copts.memoryReservation, "memory-reservation", "Memory soft limit") + flags.Var(&copts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") + flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") + flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") + flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + + // Low-level execution (cgroups, namespaces, ...) + flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&copts.ipcMode, "ipc", "", "IPC mode to use") + flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") + flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") + flags.Var(&copts.shmSize, "shm-size", "Size of /dev/shm") + flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") + flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") + + flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") + flags.SetAnnotation("init", "version", []string{"1.25"}) + return copts +} + +type containerConfig struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *networktypes.NetworkingConfig +} + +// parse parses the args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +// nolint: gocyclo +func parse(flags *pflag.FlagSet, copts *containerOptions) (*containerConfig, error) { + var ( + attachStdin = copts.attach.Get("stdin") + attachStdout = copts.attach.Get("stdout") + attachStderr = copts.attach.Get("stderr") + ) + + // Validate the input mac address + if copts.macAddress != "" { + if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil { + return nil, errors.Errorf("%s is not a valid mac address", copts.macAddress) + } + } + if copts.stdin { + attachStdin = true + } + // If -a is not set, attach to stdout and stderr + if copts.attach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + swappiness := copts.swappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, errors.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + mounts := copts.mounts.Value() + if len(mounts) > 0 && copts.volumeDriver != "" { + logrus.Warn("`--volume-driver` is ignored for volumes specified via `--mount`. Use `--mount type=volume,volume-driver=...` instead.") + } + var binds []string + volumes := copts.volumes.GetMap() + // add any bind targets to the list of container volumes + for bind := range copts.volumes.GetMap() { + parsed, _ := loader.ParseVolume(bind) + if parsed.Source != "" { + // after creating the bind mount we want to delete it from the copts.volumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if + // there are duplicates entries. + delete(volumes, bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range copts.tmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + ) + + if len(copts.Args) > 0 { + runCmd = strslice.StrSlice(copts.Args) + } + + if copts.entrypoint != "" { + entrypoint = strslice.StrSlice{copts.entrypoint} + } else if flags.Changed("entrypoint") { + // if `--entrypoint=` is parsed then Entrypoint is reset + entrypoint = []string{""} + } + + ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) + if err != nil { + return nil, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range copts.expose.GetAll() { + if strings.Contains(e, ":") { + return nil, errors.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, errors.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []container.DeviceMapping{} + for _, device := range copts.devices.GetAll() { + deviceMapping, err := parseDevice(device) + if err != nil { + return nil, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := opts.ReadKVEnvStrings(copts.envFile.GetAll(), copts.env.GetAll()) + if err != nil { + return nil, err + } + + // collect all the labels for the container + labels, err := opts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) + if err != nil { + return nil, err + } + + pidMode := container.PidMode(copts.pidMode) + if !pidMode.Valid() { + return nil, errors.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(copts.utsMode) + if !utsMode.Valid() { + return nil, errors.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(copts.usernsMode) + if !usernsMode.Valid() { + return nil, errors.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := opts.ParseRestartPolicy(copts.restartPolicy) + if err != nil { + return nil, err + } + + loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) + if err != nil { + return nil, err + } + + securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) + if err != nil { + return nil, err + } + + storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) + if err != nil { + return nil, err + } + + // Healthcheck + var healthConfig *container.HealthConfig + haveHealthSettings := copts.healthCmd != "" || + copts.healthInterval != 0 || + copts.healthTimeout != 0 || + copts.healthStartPeriod != 0 || + copts.healthRetries != 0 + if copts.noHealthcheck { + if haveHealthSettings { + return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options") + } + test := strslice.StrSlice{"NONE"} + healthConfig = &container.HealthConfig{Test: test} + } else if haveHealthSettings { + var probe strslice.StrSlice + if copts.healthCmd != "" { + args := []string{"CMD-SHELL", copts.healthCmd} + probe = strslice.StrSlice(args) + } + if copts.healthInterval < 0 { + return nil, errors.Errorf("--health-interval cannot be negative") + } + if copts.healthTimeout < 0 { + return nil, errors.Errorf("--health-timeout cannot be negative") + } + if copts.healthRetries < 0 { + return nil, errors.Errorf("--health-retries cannot be negative") + } + if copts.healthStartPeriod < 0 { + return nil, fmt.Errorf("--health-start-period cannot be negative") + } + + healthConfig = &container.HealthConfig{ + Test: probe, + Interval: copts.healthInterval, + Timeout: copts.healthTimeout, + StartPeriod: copts.healthStartPeriod, + Retries: copts.healthRetries, + } + } + + resources := container.Resources{ + CgroupParent: copts.cgroupParent, + Memory: copts.memory.Value(), + MemoryReservation: copts.memoryReservation.Value(), + MemorySwap: copts.memorySwap.Value(), + MemorySwappiness: &copts.swappiness, + KernelMemory: copts.kernelMemory.Value(), + OomKillDisable: &copts.oomKillDisable, + NanoCPUs: copts.cpus.Value(), + CPUCount: copts.cpuCount, + CPUPercent: copts.cpuPercent, + CPUShares: copts.cpuShares, + CPUPeriod: copts.cpuPeriod, + CpusetCpus: copts.cpusetCpus, + CpusetMems: copts.cpusetMems, + CPUQuota: copts.cpuQuota, + CPURealtimePeriod: copts.cpuRealtimePeriod, + CPURealtimeRuntime: copts.cpuRealtimeRuntime, + PidsLimit: copts.pidsLimit, + BlkioWeight: copts.blkioWeight, + BlkioWeightDevice: copts.blkioWeightDevice.GetList(), + BlkioDeviceReadBps: copts.deviceReadBps.GetList(), + BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), + BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), + BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), + IOMaximumIOps: copts.ioMaxIOps, + IOMaximumBandwidth: uint64(copts.ioMaxBandwidth), + Ulimits: copts.ulimits.GetList(), + DeviceCgroupRules: copts.deviceCgroupRules.GetAll(), + Devices: deviceMappings, + } + + config := &container.Config{ + Hostname: copts.hostname, + ExposedPorts: ports, + User: copts.user, + Tty: copts.tty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: copts.stdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: copts.Image, + Volumes: volumes, + MacAddress: copts.macAddress, + Entrypoint: entrypoint, + WorkingDir: copts.workingDir, + Labels: opts.ConvertKVStringsToMap(labels), + Healthcheck: healthConfig, + } + if flags.Changed("stop-signal") { + config.StopSignal = copts.stopSignal + } + if flags.Changed("stop-timeout") { + config.StopTimeout = &copts.stopTimeout + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: copts.containerIDFile, + OomScoreAdj: copts.oomScoreAdj, + AutoRemove: copts.autoRemove, + Privileged: copts.privileged, + PortBindings: portBindings, + Links: copts.links.GetAll(), + PublishAllPorts: copts.publishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: copts.dns.GetAllOrEmpty(), + DNSSearch: copts.dnsSearch.GetAllOrEmpty(), + DNSOptions: copts.dnsOptions.GetAllOrEmpty(), + ExtraHosts: copts.extraHosts.GetAll(), + VolumesFrom: copts.volumesFrom.GetAll(), + NetworkMode: container.NetworkMode(copts.netMode), + IpcMode: container.IpcMode(copts.ipcMode), + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), + CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), + GroupAdd: copts.groupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + StorageOpt: storageOpts, + ReadonlyRootfs: copts.readonlyRootfs, + LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, + VolumeDriver: copts.volumeDriver, + Isolation: container.Isolation(copts.isolation), + ShmSize: copts.shmSize.Value(), + Resources: resources, + Tmpfs: tmpfs, + Sysctls: copts.sysctls.GetAll(), + Runtime: copts.runtime, + Mounts: mounts, + } + + if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, errors.Errorf("Conflicting options: --restart and --rm") + } + + // only set this value if the user provided the flag, else it should default to nil + if flags.Changed("init") { + hostConfig.Init = &copts.init + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { + epConfig := &networktypes.EndpointSettings{} + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + + epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: copts.ipv4Address, + IPv6Address: copts.ipv6Address, + } + + if copts.linkLocalIPs.Len() > 0 { + epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) + copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) + } + } + + if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Links = make([]string, len(hostConfig.Links)) + copy(epConfig.Links, hostConfig.Links) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + if copts.aliases.Len() > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Aliases = make([]string, copts.aliases.Len()) + copy(epConfig.Aliases, copts.aliases.GetAll()) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + return &containerConfig{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + }, nil +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := opts.ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, errors.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, errors.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// parses storage options per container into a map +func parseStorageOpts(storageOpts []string) (map[string]string, error) { + m := make(map[string]string) + for _, option := range storageOpts { + if strings.Contains(option, "=") { + opt := strings.SplitN(option, "=", 2) + m[opt[0]] = opt[1] + } else { + return nil, errors.Errorf("invalid storage option") + } + } + return m, nil +} + +// parseDevice parses a device mapping string to a container.DeviceMapping struct +func parseDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if validDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, errors.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// validateDeviceCgroupRule validates a device cgroup rule string format +// It will make sure 'val' is in the form: +// 'type major:minor mode' +func validateDeviceCgroupRule(val string) (string, error) { + if deviceCgroupRuleRegexp.MatchString(val) { + return val, nil + } + + return val, errors.Errorf("invalid device cgroup format '%s'", val) +} + +// validDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func validDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// validateDevice validates a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func validateDevice(val string) (string, error) { + return validatePath(val, validDeviceMode) +} + +func validatePath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, errors.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, errors.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, errors.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, errors.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// validateAttach validates that the specified string is a valid attach option. +func validateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, errors.Errorf("valid streams are STDIN, STDOUT and STDERR") +} diff --git a/cli/cli/command/container/opts_test.go b/cli/cli/command/container/opts_test.go new file mode 100644 index 00000000..67f0cad4 --- /dev/null +++ b/cli/cli/command/container/opts_test.go @@ -0,0 +1,646 @@ +package container + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := validateAttach("invalid"); err == nil { + t.Fatal("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := validateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +// nolint: unparam +func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + flags, copts := setupRunFlags() + if err := flags.Parse(args); err != nil { + return nil, nil, nil, err + } + // TODO: fix tests to accept ContainerConfig + containerConfig, err := parse(flags, copts) + if err != nil { + return nil, nil, nil, err + } + return containerConfig.Config, containerConfig.HostConfig, containerConfig.NetworkingConfig, err +} + +func setupRunFlags() (*pflag.FlagSet, *containerOptions) { + flags := pflag.NewFlagSet("run", pflag.ContinueOnError) + flags.SetOutput(ioutil.Discard) + flags.Usage = nil + copts := addFlags(flags) + return flags, copts +} + +func parseMustError(t *testing.T, args string) { + _, _, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + assert.ErrorContains(t, err, "", args) +} + +func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { + config, hostConfig, _, err := parseRun(append(strings.Split(args, " "), "ubuntu", "bash")) + assert.NilError(t, err) + return config, hostConfig +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } +} + +func TestParseRunWithInvalidArgs(t *testing.T) { + parseMustError(t, "-a") + parseMustError(t, "-a invalid") + parseMustError(t, "-a invalid -a stdout") + parseMustError(t, "-a stdout -a stderr -d") + parseMustError(t, "-a stdin -d") + parseMustError(t, "-a stdout -d") + parseMustError(t, "-a stderr -d") + parseMustError(t, "-d --rm") +} + +// nolint: gocyclo +func TestParseWithVolumes(t *testing.T) { + + // A single volume + arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) + } + + // Two volumes + arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) + } + + // A single bind mount + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) + } + + // Two bind mounts. + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Two bind mounts, first read-only, second read-write. + // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 + arr, tryit = setupPlatformVolume( + []string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, + []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Similar to previous test but with alternate modes which are only supported by Linux + if runtime.GOOS != "windows" { + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + } + + // One bind mount and one volume + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) + } + + // Root to non-c: drive letter (Windows specific) + if runtime.GOOS == "windows" { + arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { + t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) + } + } + +} + +// setupPlatformVolume takes two arrays of volume specs - a Unix style +// spec and a Windows style spec. Depending on the platform being unit tested, +// it returns one of them, along with a volume string that would be passed +// on the docker CLI (e.g. -v /bar -v /foo). +func setupPlatformVolume(u []string, w []string) ([]string, string) { + var a []string + if runtime.GOOS == "windows" { + a = w + } else { + a = u + } + s := "" + for _, v := range a { + s = s + "-v " + v + " " + } + return a, s +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return errors.Errorf("strings don't match") +} + +// Simple parse with MacAddress validation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestRunFlagsParseWithMemory(t *testing.T) { + flags, _ := setupRunFlags() + args := []string{"--memory=invalid", "img", "cmd"} + err := flags.Parse(args) + assert.ErrorContains(t, err, `invalid argument "invalid" for "-m, --memory" flag`) + + _, hostconfig := mustParse(t, "--memory=1G") + assert.Check(t, is.Equal(int64(1073741824), hostconfig.Memory)) +} + +func TestParseWithMemorySwap(t *testing.T) { + flags, _ := setupRunFlags() + args := []string{"--memory-swap=invalid", "img", "cmd"} + err := flags.Parse(args) + assert.ErrorContains(t, err, `invalid argument "invalid" for "--memory-swap" flag`) + + _, hostconfig := mustParse(t, "--memory-swap=1G") + assert.Check(t, is.Equal(int64(1073741824), hostconfig.MemorySwap)) + + _, hostconfig = mustParse(t, "--memory-swap=-1") + assert.Check(t, is.Equal(int64(-1), hostconfig.MemorySwap)) +} + +func TestParseHostname(t *testing.T) { + validHostnames := map[string]string{ + "hostname": "hostname", + "host-name": "host-name", + "hostname123": "hostname123", + "123hostname": "123hostname", + "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", + } + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + for hostname, expectedHostname := range validHostnames { + if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "invalid port format for --expose: :", + "8080:9090": "invalid port format for --expose: 8080:9090", + "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + valids := map[string]container.DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rw", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rw", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseModes(t *testing.T) { + // pid ko + flags, copts := setupRunFlags() + args := []string{"--pid=container:", "img", "cmd"} + assert.NilError(t, flags.Parse(args)) + _, err := parse(flags, copts) + assert.ErrorContains(t, err, "--pid: invalid PID mode") + + // pid ok + _, hostconfig, _, err := parseRun([]string{"--pid=host", "img", "cmd"}) + assert.NilError(t, err) + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + + // uts ko + _, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"}) + assert.ErrorContains(t, err, "--uts: invalid UTS mode") + + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + assert.NilError(t, err) + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } +} + +func TestRunFlagsParseShmSize(t *testing.T) { + // shm-size ko + flags, _ := setupRunFlags() + args := []string{"--shm-size=a128m", "img", "cmd"} + expectedErr := `invalid argument "a128m" for "--shm-size" flag: invalid size: 'a128m'` + err := flags.Parse(args) + assert.ErrorContains(t, err, expectedErr) + + // shm-size ok + _, hostconfig, _, err := parseRun([]string{"--shm-size=128m", "img", "cmd"}) + assert.NilError(t, err) + if hostconfig.ShmSize != 134217728 { + t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "always:2:3": "invalid restart policy format", + "on-failure:invalid": "maximum retry count must be an integer", + } + valids := map[string]container.RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseRestartPolicyAutoRemove(t *testing.T) { + expected := "Conflicting options: --restart and --rm" + _, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) + if err == nil || err.Error() != expected { + t.Fatalf("Expected error %v, but got none", expected) + } +} + +func TestParseHealth(t *testing.T) { + checkOk := func(args ...string) *container.HealthConfig { + config, _, _, err := parseRun(args) + if err != nil { + t.Fatalf("%#v: %v", args, err) + } + return config.Healthcheck + } + checkError := func(expected string, args ...string) { + config, _, _, err := parseRun(args) + if err == nil { + t.Fatalf("Expected error, but got %#v", config) + } + if err.Error() != expected { + t.Fatalf("Expected %#v, got %#v", expected, err) + } + } + health := checkOk("--no-healthcheck", "img", "cmd") + if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { + t.Fatalf("--no-healthcheck failed: %#v", health) + } + + health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") + if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { + t.Fatalf("--health-cmd: got %#v", health.Test) + } + if health.Timeout != 0 { + t.Fatalf("--health-cmd: timeout = %s", health.Timeout) + } + + checkError("--no-healthcheck conflicts with --health-* options", + "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") + + health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "--health-start-period=5s", "img", "cmd") + if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond || health.StartPeriod != 5*time.Second { + t.Fatalf("--health-*: got %#v", health) + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=testdata/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=testdata/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { + // UTF8 with BOM + config, _, _, err := parseRun([]string{"--env-file=testdata/utf8.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} + if len(config.Env) != len(env) { + t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) + } + for i, v := range env { + if config.Env[i] != v { + t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) + } + } + + // UTF16 with BOM + e := "contains invalid utf8 bytes at line" + if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // UTF16BE with BOM + if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=testdata/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=testdata/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } +} + +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:r", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for path: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for path: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for path: :test", + ":/test": "bad format for path: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for path: :test:", + "::": "bad format for path: ::", + ":::": "bad format for path: :::", + "/tmp:::": "bad format for path: /tmp:::", + ":/tmp::": "bad format for path: :/tmp::", + "path:ro": "ro is not an absolute path", + "path:rr": "rr is not an absolute path", + "a:/b:ro": "bad mode specified: ro", + "a:/b:rr": "bad mode specified: rr", + } + + for _, path := range valid { + if _, err := validateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := validateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} diff --git a/cli/cli/command/container/pause.go b/cli/cli/command/container/pause.go new file mode 100644 index 00000000..1118b7f0 --- /dev/null +++ b/cli/cli/command/container/pause.go @@ -0,0 +1,49 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type pauseOptions struct { + containers []string +} + +// NewPauseCommand creates a new cobra.Command for `docker pause` +func NewPauseCommand(dockerCli command.Cli) *cobra.Command { + var opts pauseOptions + + return &cobra.Command{ + Use: "pause CONTAINER [CONTAINER...]", + Short: "Pause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runPause(dockerCli, &opts) + }, + } +} + +func runPause(dockerCli command.Cli, opts *pauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/port.go b/cli/cli/command/container/port.go new file mode 100644 index 00000000..83e16a98 --- /dev/null +++ b/cli/cli/command/container/port.go @@ -0,0 +1,78 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type portOptions struct { + container string + + port string +} + +// NewPortCommand creates a new cobra.Command for `docker port` +func NewPortCommand(dockerCli command.Cli) *cobra.Command { + var opts portOptions + + cmd := &cobra.Command{ + Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", + Short: "List port mappings or a specific mapping for the container", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.port = args[1] + } + return runPort(dockerCli, &opts) + }, + } + return cmd +} + +func runPort(dockerCli command.Cli, opts *portOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if opts.port != "" { + port := opts.port + proto := "tcp" + parts := strings.SplitN(port, "/", 2) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return errors.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/cli/cli/command/container/prune.go b/cli/cli/command/container/prune.go new file mode 100644 index 00000000..ba5f8055 --- /dev/null +++ b/cli/cli/command/container/prune.go @@ -0,0 +1,78 @@ +package container + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for containers +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all stopped containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const warning = `WARNING! This will remove all stopped containers. +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return 0, "", nil + } + + report, err := dockerCli.Client().ContainersPrune(context.Background(), pruneFilters) + if err != nil { + return 0, "", err + } + + if len(report.ContainersDeleted) > 0 { + output = "Deleted Containers:\n" + for _, id := range report.ContainersDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return spaceReclaimed, output, nil +} + +// RunPrune calls the Container Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, filter: filter}) +} diff --git a/cli/cli/command/container/ps_test.go b/cli/cli/command/container/ps_test.go new file mode 100644 index 00000000..28853576 --- /dev/null +++ b/cli/cli/command/container/ps_test.go @@ -0,0 +1,119 @@ +package container + +import ( + "testing" + + "github.com/docker/cli/opts" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestBuildContainerListOptions(t *testing.T) { + filters := opts.NewFilterOpt() + assert.NilError(t, filters.Set("foo=bar")) + assert.NilError(t, filters.Set("baz=foo")) + + contexts := []struct { + psOpts *psOptions + expectedAll bool + expectedSize bool + expectedLimit int + expectedFilters map[string]string + }{ + { + psOpts: &psOptions{ + all: true, + size: true, + last: 5, + filter: filters, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: true, + last: -1, + nLatest: true, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 1, + expectedFilters: make(map[string]string), + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // Without .Size, size should be false + format: "{{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: false, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + } + + for _, c := range contexts { + options, err := buildContainerListOptions(c.psOpts) + assert.NilError(t, err) + + assert.Check(t, is.Equal(c.expectedAll, options.All)) + assert.Check(t, is.Equal(c.expectedSize, options.Size)) + assert.Check(t, is.Equal(c.expectedLimit, options.Limit)) + assert.Check(t, is.Equal(len(c.expectedFilters), options.Filters.Len())) + + for k, v := range c.expectedFilters { + f := options.Filters + if !f.ExactMatch(k, v) { + t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) + } + } + } +} diff --git a/cli/cli/command/container/rename.go b/cli/cli/command/container/rename.go new file mode 100644 index 00000000..bc58ea20 --- /dev/null +++ b/cli/cli/command/container/rename.go @@ -0,0 +1,51 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type renameOptions struct { + oldName string + newName string +} + +// NewRenameCommand creates a new cobra.Command for `docker rename` +func NewRenameCommand(dockerCli command.Cli) *cobra.Command { + var opts renameOptions + + cmd := &cobra.Command{ + Use: "rename CONTAINER NEW_NAME", + Short: "Rename a container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.oldName = args[0] + opts.newName = args[1] + return runRename(dockerCli, &opts) + }, + } + return cmd +} + +func runRename(dockerCli command.Cli, opts *renameOptions) error { + ctx := context.Background() + + oldName := strings.TrimSpace(opts.oldName) + newName := strings.TrimSpace(opts.newName) + + if oldName == "" || newName == "" { + return errors.New("Error: Neither old nor new names may be empty") + } + + if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return errors.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/cli/cli/command/container/restart.go b/cli/cli/command/container/restart.go new file mode 100644 index 00000000..6e02ee46 --- /dev/null +++ b/cli/cli/command/container/restart.go @@ -0,0 +1,62 @@ +package container + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type restartOptions struct { + nSeconds int + nSecondsChanged bool + + containers []string +} + +// NewRestartCommand creates a new cobra.Command for `docker restart` +func NewRestartCommand(dockerCli command.Cli) *cobra.Command { + var opts restartOptions + + cmd := &cobra.Command{ + Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Restart one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nSecondsChanged = cmd.Flags().Changed("time") + return runRestart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") + return cmd +} + +func runRestart(dockerCli command.Cli, opts *restartOptions) error { + ctx := context.Background() + var errs []string + var timeout *time.Duration + if opts.nSecondsChanged { + timeoutValue := time.Duration(opts.nSeconds) * time.Second + timeout = &timeoutValue + } + + for _, name := range opts.containers { + if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/rm.go b/cli/cli/command/container/rm.go new file mode 100644 index 00000000..2dcd4b6a --- /dev/null +++ b/cli/cli/command/container/rm.go @@ -0,0 +1,73 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type rmOptions struct { + rmVolumes bool + rmLink bool + force bool + + containers []string +} + +// NewRmCommand creates a new cobra.Command for `docker rm` +func NewRmCommand(dockerCli command.Cli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Remove one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runRm(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") + flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") + return cmd +} + +func runRm(dockerCli command.Cli, opts *rmOptions) error { + ctx := context.Background() + + var errs []string + options := types.ContainerRemoveOptions{ + RemoveVolumes: opts.rmVolumes, + RemoveLinks: opts.rmLink, + Force: opts.force, + } + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + container = strings.Trim(container, "/") + if container == "" { + return errors.New("Container name cannot be empty") + } + return dockerCli.Client().ContainerRemove(ctx, container, options) + }) + + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/run.go b/cli/cli/command/container/run.go new file mode 100644 index 00000000..d2ca5873 --- /dev/null +++ b/cli/cli/command/container/run.go @@ -0,0 +1,341 @@ +package container + +import ( + "context" + "fmt" + "io" + "net/http/httputil" + "os" + "regexp" + "runtime" + "strings" + "syscall" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type runOptions struct { + createOptions + detach bool + sigProxy bool + detachKeys string +} + +// NewRunCommand create a new `docker run` command +func NewRunCommand(dockerCli command.Cli) *cobra.Command { + var opts runOptions + var copts *containerOptions + + cmd := &cobra.Command{ + Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Run a command in a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runRun(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + // These are flags not stored in Config/HostConfig + flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") + flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddPlatformFlag(flags, &opts.platform) + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + copts = addFlags(flags) + return cmd +} + +func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) { + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.") + } +} + +// check the DNS settings passed via --dns against localhost regexp to warn if +// they are trying to set a DNS to a localhost address +func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) { + for _, dnsIP := range hostConfig.DNS { + if isLocalhost(dnsIP) { + fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + return + } + } +} + +// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. +const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + +var localhostIPRegexp = regexp.MustCompile(ipLocalhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func isLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} + +func runRun(dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error { + proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), copts.env.GetAll()) + newEnv := []string{} + for k, v := range proxyConfig { + if v == nil { + newEnv = append(newEnv, k) + } else { + newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v)) + } + } + copts.env = *opts.NewListOptsRef(&newEnv, nil) + containerConfig, err := parse(flags, copts) + // just in case the parse does not exit + if err != nil { + reportError(dockerCli.Err(), "run", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + return runContainer(dockerCli, ropts, copts, containerConfig) +} + +// nolint: gocyclo +func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptions, containerConfig *containerConfig) error { + config := containerConfig.Config + hostConfig := containerConfig.HostConfig + stdout, stderr := dockerCli.Out(), dockerCli.Err() + client := dockerCli.Client() + + warnOnOomKillDisable(*hostConfig, stderr) + warnOnLocalhostDNS(*hostConfig, stderr) + + config.ArgsEscaped = false + + if !opts.detach { + if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if copts.attach.Len() != 0 { + return errors.New("Conflicting options: -a and -d") + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable sigProxy when in TTY mode + if config.Tty { + opts.sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() + } + + ctx, cancelFun := context.WithCancel(context.Background()) + defer cancelFun() + + createResponse, err := createContainer(ctx, dockerCli, containerConfig, &opts.createOptions) + if err != nil { + reportError(stderr, "run", err.Error(), true) + return runStartContainerErr(err) + } + if opts.sigProxy { + sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintln(stdout, createResponse.ID) + }() + } + attach := config.AttachStdin || config.AttachStdout || config.AttachStderr + if attach { + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + close, err := attachContainer(ctx, dockerCli, &errCh, config, createResponse.ID) + + if err != nil { + return err + } + defer close() + } + + statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, copts.autoRemove) + + //start the container + if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { + // If we have hijackedIOStreamer, we should notify + // hijackedIOStreamer we are going to exit and wait + // to avoid the terminal are not restored. + if attach { + cancelFun() + <-errCh + } + + reportError(stderr, "run", err.Error(), false) + if copts.autoRemove { + // wait container to be removed + <-statusChan + } + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { + fmt.Fprintln(stderr, "Error monitoring TTY size:", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + status := <-statusChan + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +func attachContainer( + ctx context.Context, + dockerCli command.Cli, + errCh *chan error, + config *container.Config, + containerID string, +) (func(), error) { + stdout, stderr := dockerCli.Out(), dockerCli.Err() + var ( + out, cerr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = dockerCli.In() + } + if config.AttachStdout { + out = stdout + } + if config.AttachStderr { + if config.Tty { + cerr = stdout + } else { + cerr = stderr + } + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return nil, errAttach + } + + ch := make(chan error, 1) + *errCh = ch + + go func() { + ch <- func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: out, + errorStream: cerr, + resp: resp, + tty: config.Tty, + detachKeys: options.DetachKeys, + } + + if errHijack := streamer.stream(ctx); errHijack != nil { + return errHijack + } + return errAttach + }() + }() + return resp.Close, nil +} + +// reportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func reportError(stderr io.Writer, name string, str string, withHelp bool) { + str = strings.TrimSuffix(str, ".") + "." + if withHelp { + str += "\nSee '" + os.Args[0] + " " + name + " --help'." + } + fmt.Fprintf(stderr, "%s: %s\n", os.Args[0], str) +} + +// if container start fails with 'not found'/'no such' error, return 127 +// if container start fails with 'permission denied' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") + statusError := cli.StatusError{StatusCode: 125} + if strings.Contains(trimmedErr, "executable file not found") || + strings.Contains(trimmedErr, "no such file or directory") || + strings.Contains(trimmedErr, "system cannot find the file specified") { + statusError = cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { + statusError = cli.StatusError{StatusCode: 126} + } + + return statusError +} diff --git a/cli/cli/command/container/run_test.go b/cli/cli/command/container/run_test.go new file mode 100644 index 00000000..4938a564 --- /dev/null +++ b/cli/cli/command/container/run_test.go @@ -0,0 +1,75 @@ +package container + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRunLabel(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ string) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{ + ID: "id", + }, nil + }, + Version: "1.36", + }) + cmd := NewRunCommand(cli) + cmd.Flags().Set("detach", "true") + cmd.SetArgs([]string{"--label", "foo", "busybox"}) + assert.NilError(t, cmd.Execute()) +} + +func TestRunCommandWithContentTrustErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + name: "offline-notary-server", + notaryFunc: notary.GetOfflineNotaryRepository, + expectedError: "client is offline", + args: []string{"image:tag"}, + }, + { + name: "uninitialized-notary-server", + notaryFunc: notary.GetUninitializedNotaryRepository, + expectedError: "remote trust data does not exist", + args: []string{"image:tag"}, + }, + { + name: "empty-notary-server", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + expectedError: "No valid trust data for tag", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{}, fmt.Errorf("shouldn't try to pull image") + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := NewRunCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + err := cmd.Execute() + assert.Assert(t, err != nil) + assert.Assert(t, is.Contains(cli.ErrBuffer().String(), tc.expectedError)) + } +} diff --git a/cli/cli/command/container/start.go b/cli/cli/command/container/start.go new file mode 100644 index 00000000..e3883028 --- /dev/null +++ b/cli/cli/command/container/start.go @@ -0,0 +1,202 @@ +package container + +import ( + "context" + "fmt" + "io" + "net/http/httputil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type startOptions struct { + attach bool + openStdin bool + detachKeys string + checkpoint string + checkpointDir string + + containers []string +} + +// NewStartCommand creates a new cobra.Command for `docker start` +func NewStartCommand(dockerCli command.Cli) *cobra.Command { + var opts startOptions + + cmd := &cobra.Command{ + Use: "start [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Start one or more stopped containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") + flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") + flags.SetAnnotation("checkpoint", "experimental", nil) + flags.SetAnnotation("checkpoint", "ostype", []string{"linux"}) + flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") + flags.SetAnnotation("checkpoint-dir", "experimental", nil) + flags.SetAnnotation("checkpoint-dir", "ostype", []string{"linux"}) + return cmd +} + +// nolint: gocyclo +func runStart(dockerCli command.Cli, opts *startOptions) error { + ctx, cancelFun := context.WithCancel(context.Background()) + defer cancelFun() + + if opts.attach || opts.openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if len(opts.containers) > 1 { + return errors.New("you cannot start and attach multiple containers at once") + } + + // 2. Attach to the container. + container := opts.containers[0] + c, err := dockerCli.Client().ContainerInspect(ctx, container) + if err != nil { + return err + } + + // We always use c.ID instead of container to maintain consistency during `docker start` + if !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, c.ID) + defer signal.StopCatch(sigc) + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: opts.openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + + if options.Stdin { + in = dockerCli.In() + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach return an ErrPersistEOF (connection closed) + // means server met an error and already put it in Hijacked connection, + // we would keep the error and read the detailed error message from hijacked connection + return errAttach + } + defer resp.Close() + + cErr := make(chan error, 1) + + go func() { + cErr <- func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: dockerCli.Out(), + errorStream: dockerCli.Err(), + resp: resp, + tty: c.Config.Tty, + detachKeys: options.DetachKeys, + } + + errHijack := streamer.stream(ctx) + if errHijack == nil { + return errAttach + } + return errHijack + }() + }() + + // 3. We should open a channel for receiving status code of the container + // no matter it's detached, removed on daemon side(--rm) or exit normally. + statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + + // 4. Start the container. + if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { + cancelFun() + <-cErr + if c.HostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return err + } + + // 5. Wait for attachment to break. + if c.Config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { + fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } + } + if attachErr := <-cErr; attachErr != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + return attachErr + } + + if status := <-statusChan; status != 0 { + return cli.StatusError{StatusCode: status} + } + } else if opts.checkpoint != "" { + if len(opts.containers) > 1 { + return errors.New("you cannot restore multiple containers at once") + } + container := opts.containers[0] + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + return dockerCli.Client().ContainerStart(ctx, container, startOptions) + + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) + } + + return nil +} + +func startContainersWithoutAttachments(ctx context.Context, dockerCli command.Cli, containers []string) error { + var failedContainers []string + for _, container := range containers { + if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + failedContainers = append(failedContainers, container) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + + if len(failedContainers) > 0 { + return errors.Errorf("Error: failed to start containers: %s", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/cli/cli/command/container/stats.go b/cli/cli/command/container/stats.go new file mode 100644 index 00000000..4efcb19e --- /dev/null +++ b/cli/cli/command/container/stats.go @@ -0,0 +1,245 @@ +package container + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type statsOptions struct { + all bool + noStream bool + noTrunc bool + format string + containers []string +} + +// NewStatsCommand creates a new cobra.Command for `docker stats` +func NewStatsCommand(dockerCli command.Cli) *cobra.Command { + var opts statsOptions + + cmd := &cobra.Command{ + Use: "stats [OPTIONS] [CONTAINER...]", + Short: "Display a live stream of container(s) resource usage statistics", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStats(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + return cmd +} + +// runStats displays a live stream of resource usage statistics for one or more containers. +// This shows real-time information on CPU usage, memory usage, and network I/O. +// nolint: gocyclo +func runStats(dockerCli command.Cli, opts *statsOptions) error { + showAll := len(opts.containers) == 0 + closeChan := make(chan error) + + ctx := context.Background() + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + + eventq, errq := dockerCli.Client().Events(ctx, options) + + // Whether we successfully subscribed to eventq or not, we can now + // unblock the main goroutine. + close(started) + + for { + select { + case event := <-eventq: + c <- event + case err := <-errq: + closeChan <- err + return + } + } + } + + // Get the daemonOSType if not set already + if daemonOSType == "" { + svctx := context.Background() + sv, err := dockerCli.Client().ServerVersion(svctx) + if err != nil { + return err + } + daemonOSType = sv.Os + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: opts.all, + } + cs, err := dockerCli.Client().ContainerList(ctx, options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := formatter.NewContainerStats(container.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := command.InitEventHandler() + eh.Handle("create", func(e events.Message) { + if opts.all { + s := formatter.NewContainerStats(e.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := formatter.NewContainerStats(e.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !opts.all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range opts.containers { + s := formatter.NewContainerStats(name) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + if err := c.GetError(); err != nil { + errs = append(errs, err.Error()) + } + } + cStats.mu.Unlock() + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().StatsFormat) > 0 { + format = dockerCli.ConfigFile().StatsFormat + } else { + format = formatter.TableFormatKey + } + } + statsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewStatsFormat(format, daemonOSType), + } + cleanScreen := func() { + if !opts.noStream { + fmt.Fprint(dockerCli.Out(), "\033[2J") + fmt.Fprint(dockerCli.Out(), "\033[H") + } + } + + var err error + for range time.Tick(500 * time.Millisecond) { + cleanScreen() + ccstats := []formatter.StatsEntry{} + cStats.mu.Lock() + for _, c := range cStats.cs { + ccstats = append(ccstats, c.GetStatistics()) + } + cStats.mu.Unlock() + if err = formatter.ContainerStatsWrite(statsCtx, ccstats, daemonOSType, !opts.noTrunc); err != nil { + break + } + if len(cStats.cs) == 0 && !showAll { + break + } + if opts.noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return err +} diff --git a/cli/cli/command/container/stats_helpers.go b/cli/cli/command/container/stats_helpers.go new file mode 100644 index 00000000..2300ce5c --- /dev/null +++ b/cli/cli/command/container/stats_helpers.go @@ -0,0 +1,239 @@ +package container + +import ( + "context" + "encoding/json" + "io" + "strings" + "sync" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type stats struct { + mu sync.Mutex + cs []*formatter.ContainerStats +} + +// daemonOSType is set once we have at least one stat for a container +// from the daemon. It is used to ensure we print the right header based +// on the daemon platform. +var daemonOSType string + +func (s *stats) add(cs *formatter.ContainerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Container); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Container == cid { + return i, true + } + } + return -1, false +} + +func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + logrus.Debugf("collecting stats for %s", s.Container) + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + response, err := cli.ContainerStats(ctx, s.Container, streamStats) + if err != nil { + s.SetError(err) + return + } + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + go func() { + for { + var ( + v *types.StatsJSON + memPercent, cpuPercent float64 + blkRead, blkWrite uint64 // Only used on Linux + mem, memLimit float64 + pidsStatsCurrent uint64 + ) + + if err := dec.Decode(&v); err != nil { + dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) + u <- err + if err == io.EOF { + break + } + time.Sleep(100 * time.Millisecond) + continue + } + + daemonOSType = response.OSType + + if daemonOSType != "windows" { + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) + blkRead, blkWrite = calculateBlockIO(v.BlkioStats) + mem = calculateMemUsageUnixNoCache(v.MemoryStats) + memLimit = float64(v.MemoryStats.Limit) + memPercent = calculateMemPercentUnixNoCache(memLimit, mem) + pidsStatsCurrent = v.PidsStats.Current + } else { + cpuPercent = calculateCPUPercentWindows(v) + blkRead = v.StorageStats.ReadSizeBytes + blkWrite = v.StorageStats.WriteSizeBytes + mem = float64(v.MemoryStats.PrivateWorkingSet) + } + netRx, netTx := calculateNetwork(v.Networks) + s.SetStatistics(formatter.StatsEntry{ + Name: v.Name, + ID: v.ID, + CPUPercentage: cpuPercent, + Memory: mem, + MemoryPercentage: memPercent, + MemoryLimit: memLimit, + NetworkRx: netRx, + NetworkTx: netTx, + BlockRead: float64(blkRead), + BlockWrite: float64(blkWrite), + PidsCurrent: pidsStatsCurrent, + }) + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.SetErrorAndReset(errors.New("timeout waiting for stats")) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + s.SetError(err) + if err == io.EOF { + break + } + if err != nil { + continue + } + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + onlineCPUs = float64(v.CPUStats.OnlineCPUs) + ) + + if onlineCPUs == 0.0 { + onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage)) + } + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 + } + return cpuPercent +} + +func calculateCPUPercentWindows(v *types.StatsJSON) float64 { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + return float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + return 0.00 +} + +func calculateBlockIO(blkio types.BlkioStats) (uint64, uint64) { + var blkRead, blkWrite uint64 + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return blkRead, blkWrite +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} + +// calculateMemUsageUnixNoCache calculate memory usage of the container. +// Page cache is intentionally excluded to avoid misinterpretation of the output. +func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { + return float64(mem.Usage - mem.Stats["cache"]) +} + +func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if limit != 0 { + return usedNoCache / limit * 100.0 + } + return 0 +} diff --git a/cli/cli/command/container/stats_helpers_test.go b/cli/cli/command/container/stats_helpers_test.go new file mode 100644 index 00000000..a9657e2e --- /dev/null +++ b/cli/cli/command/container/stats_helpers_test.go @@ -0,0 +1,47 @@ +package container + +import ( + "fmt" + "testing" + + "github.com/docker/docker/api/types" + "gotest.tools/assert" +) + +func TestCalculateMemUsageUnixNoCache(t *testing.T) { + // Given + stats := types.MemoryStats{Usage: 500, Stats: map[string]uint64{"cache": 400}} + + // When + result := calculateMemUsageUnixNoCache(stats) + + // Then + assert.Assert(t, inDelta(100.0, result, 1e-6)) +} + +func TestCalculateMemPercentUnixNoCache(t *testing.T) { + // Given + someLimit := float64(100.0) + noLimit := float64(0.0) + used := float64(70.0) + + // When and Then + t.Run("Limit is set", func(t *testing.T) { + result := calculateMemPercentUnixNoCache(someLimit, used) + assert.Assert(t, inDelta(70.0, result, 1e-6)) + }) + t.Run("No limit, no cgroup data", func(t *testing.T) { + result := calculateMemPercentUnixNoCache(noLimit, used) + assert.Assert(t, inDelta(0.0, result, 1e-6)) + }) +} + +func inDelta(x, y, delta float64) func() (bool, string) { + return func() (bool, string) { + diff := x - y + if diff < -delta || diff > delta { + return false, fmt.Sprintf("%f != %f within %f", x, y, delta) + } + return true, "" + } +} diff --git a/cli/cli/command/container/stats_unit_test.go b/cli/cli/command/container/stats_unit_test.go new file mode 100644 index 00000000..21e650e2 --- /dev/null +++ b/cli/cli/command/container/stats_unit_test.go @@ -0,0 +1,25 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCalculateBlockIO(t *testing.T) { + blkio := types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{ + {Major: 8, Minor: 0, Op: "read", Value: 1234}, + {Major: 8, Minor: 1, Op: "read", Value: 4567}, + {Major: 8, Minor: 0, Op: "write", Value: 123}, + {Major: 8, Minor: 1, Op: "write", Value: 456}, + }, + } + blkRead, blkWrite := calculateBlockIO(blkio) + if blkRead != 5801 { + t.Fatalf("blkRead = %d, want 5801", blkRead) + } + if blkWrite != 579 { + t.Fatalf("blkWrite = %d, want 579", blkWrite) + } +} diff --git a/cli/cli/command/container/stop.go b/cli/cli/command/container/stop.go new file mode 100644 index 00000000..e2991754 --- /dev/null +++ b/cli/cli/command/container/stop.go @@ -0,0 +1,67 @@ +package container + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type stopOptions struct { + time int + timeChanged bool + + containers []string +} + +// NewStopCommand creates a new cobra.Command for `docker stop` +func NewStopCommand(dockerCli command.Cli) *cobra.Command { + var opts stopOptions + + cmd := &cobra.Command{ + Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Stop one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.timeChanged = cmd.Flags().Changed("time") + return runStop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") + return cmd +} + +func runStop(dockerCli command.Cli, opts *stopOptions) error { + ctx := context.Background() + + var timeout *time.Duration + if opts.timeChanged { + timeoutValue := time.Duration(opts.time) * time.Second + timeout = &timeoutValue + } + + var errs []string + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { + return dockerCli.Client().ContainerStop(ctx, id, timeout) + }) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/testdata/container-list-format-name-name.golden b/cli/cli/command/container/testdata/container-list-format-name-name.golden new file mode 100644 index 00000000..858ec961 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-format-name-name.golden @@ -0,0 +1,2 @@ +c1 c1 +c2 c2 diff --git a/cli/cli/command/container/testdata/container-list-format-with-arg.golden b/cli/cli/command/container/testdata/container-list-format-with-arg.golden new file mode 100644 index 00000000..782ace94 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-format-with-arg.golden @@ -0,0 +1,2 @@ +c1 value +c2 diff --git a/cli/cli/command/container/testdata/container-list-with-config-format.golden b/cli/cli/command/container/testdata/container-list-with-config-format.golden new file mode 100644 index 00000000..6333bf57 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-with-config-format.golden @@ -0,0 +1,2 @@ +c1 busybox:latest some.label=value +c2 busybox:latest foo=bar diff --git a/cli/cli/command/container/testdata/container-list-with-format.golden b/cli/cli/command/container/testdata/container-list-with-format.golden new file mode 100644 index 00000000..6333bf57 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-with-format.golden @@ -0,0 +1,2 @@ +c1 busybox:latest some.label=value +c2 busybox:latest foo=bar diff --git a/cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden b/cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden new file mode 100644 index 00000000..5b0d652e --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden @@ -0,0 +1,3 @@ +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +container_id busybox:latest "top" Less than a second ago Up 1 second c1 +container_id busybox:latest "top" Less than a second ago Up 1 second c2,foo/bar diff --git a/cli/cli/command/container/testdata/container-list-without-format.golden b/cli/cli/command/container/testdata/container-list-without-format.golden new file mode 100644 index 00000000..7acd4045 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-without-format.golden @@ -0,0 +1,6 @@ +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +container_id busybox:latest "top" Less than a second ago Up 1 second c1 +container_id busybox:latest "top" Less than a second ago Up 1 second c2 +container_id busybox:latest "top" Less than a second ago Up 1 second 80-82/tcp c3 +container_id busybox:latest "top" Less than a second ago Up 1 second 81/udp c4 +container_id busybox:latest "top" Less than a second ago Up 1 second 8.8.8.8:82->82/tcp c5 diff --git a/cli/cli/command/container/testdata/utf16.env b/cli/cli/command/container/testdata/utf16.env new file mode 100755 index 0000000000000000000000000000000000000000..3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a GIT binary patch literal 54 ucmezW&yB$!2yGdh7#tab7 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/update.go b/cli/cli/command/container/update.go new file mode 100644 index 00000000..d641a503 --- /dev/null +++ b/cli/cli/command/container/update.go @@ -0,0 +1,133 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + containertypes "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type updateOptions struct { + blkioWeight uint16 + cpuPeriod int64 + cpuQuota int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpusetCpus string + cpusetMems string + cpuShares int64 + memory opts.MemBytes + memoryReservation opts.MemBytes + memorySwap opts.MemSwapBytes + kernelMemory opts.MemBytes + restartPolicy string + cpus opts.NanoCPUs + + nFlag int + + containers []string +} + +// NewUpdateCommand creates a new cobra.Command for `docker update` +func NewUpdateCommand(dockerCli command.Cli) *cobra.Command { + var options updateOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Update configuration of one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.containers = args + options.nFlag = cmd.Flags().NFlag() + return runUpdate(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.Uint16Var(&options.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&options.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) + flags.Int64Var(&options.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) + flags.StringVar(&options.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memoryReservation, "memory-reservation", "Memory soft limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.kernelMemory, "kernel-memory", "Kernel memory limit") + flags.StringVar(&options.restartPolicy, "restart", "", "Restart policy to apply when a container exits") + + flags.Var(&options.cpus, "cpus", "Number of CPUs") + flags.SetAnnotation("cpus", "version", []string{"1.29"}) + + return cmd +} + +func runUpdate(dockerCli command.Cli, options *updateOptions) error { + var err error + + if options.nFlag == 0 { + return errors.New("you must provide one or more flags when using this command") + } + + var restartPolicy containertypes.RestartPolicy + if options.restartPolicy != "" { + restartPolicy, err = opts.ParseRestartPolicy(options.restartPolicy) + if err != nil { + return err + } + } + + resources := containertypes.Resources{ + BlkioWeight: options.blkioWeight, + CpusetCpus: options.cpusetCpus, + CpusetMems: options.cpusetMems, + CPUShares: options.cpuShares, + Memory: options.memory.Value(), + MemoryReservation: options.memoryReservation.Value(), + MemorySwap: options.memorySwap.Value(), + KernelMemory: options.kernelMemory.Value(), + CPUPeriod: options.cpuPeriod, + CPUQuota: options.cpuQuota, + CPURealtimePeriod: options.cpuRealtimePeriod, + CPURealtimeRuntime: options.cpuRealtimeRuntime, + NanoCPUs: options.cpus.Value(), + } + + updateConfig := containertypes.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + ctx := context.Background() + + var ( + warns []string + errs []string + ) + for _, container := range options.containers { + r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintln(dockerCli.Out(), container) + } + warns = append(warns, r.Warnings...) + } + if len(warns) > 0 { + fmt.Fprintln(dockerCli.Out(), strings.Join(warns, "\n")) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/utils.go b/cli/cli/command/container/utils.go new file mode 100644 index 00000000..f3292614 --- /dev/null +++ b/cli/cli/command/container/utils.go @@ -0,0 +1,162 @@ +package container + +import ( + "context" + "strconv" + + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/sirupsen/logrus" +) + +func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int { + if len(containerID) == 0 { + // containerID can never be empty + panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") + } + + // Older versions used the Events API, and even older versions did not + // support server-side removal. This legacyWaitExitOrRemoved method + // preserves that old behavior and any issues it may have. + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") { + return legacyWaitExitOrRemoved(ctx, dockerCli, containerID, waitRemove) + } + + condition := container.WaitConditionNextExit + if waitRemove { + condition = container.WaitConditionRemoved + } + + resultC, errC := dockerCli.Client().ContainerWait(ctx, containerID, condition) + + statusC := make(chan int) + go func() { + select { + case result := <-resultC: + if result.Error != nil { + logrus.Errorf("Error waiting for container: %v", result.Error.Message) + statusC <- 125 + } else { + statusC <- int(result.StatusCode) + } + case err := <-errC: + logrus.Errorf("error waiting for container: %v", err) + statusC <- 125 + } + }() + + return statusC +} + +func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int { + var removeErr error + statusChan := make(chan int) + exitCode := 125 + + // Get events via Events API + f := filters.NewArgs() + f.Add("type", "container") + f.Add("container", containerID) + options := types.EventsOptions{ + Filters: f, + } + eventCtx, cancel := context.WithCancel(ctx) + eventq, errq := dockerCli.Client().Events(eventCtx, options) + + eventProcessor := func(e events.Message) bool { + stopProcessing := false + switch e.Status { + case "die": + if v, ok := e.Actor.Attributes["exitCode"]; ok { + code, cerr := strconv.Atoi(v) + if cerr != nil { + logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) + } else { + exitCode = code + } + } + if !waitRemove { + stopProcessing = true + } else { + // If we are talking to an older daemon, `AutoRemove` is not supported. + // We need to fall back to the old behavior, which is client-side removal + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { + go func() { + removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) + if removeErr != nil { + logrus.Errorf("error removing container: %v", removeErr) + cancel() // cancel the event Q + } + }() + } + } + case "detach": + exitCode = 0 + stopProcessing = true + case "destroy": + stopProcessing = true + } + return stopProcessing + } + + go func() { + defer func() { + statusChan <- exitCode // must always send an exit code or the caller will block + cancel() + }() + + for { + select { + case <-eventCtx.Done(): + if removeErr != nil { + return + } + case evt := <-eventq: + if eventProcessor(evt) { + return + } + case err := <-errq: + logrus.Errorf("error getting events from daemon: %v", err) + return + } + } + }() + + return statusChan +} + +func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { + if len(containers) == 0 { + return nil + } + const defaultParallel int = 50 + sem := make(chan struct{}, defaultParallel) + errChan := make(chan error) + + // make sure result is printed in correct order + output := map[string]chan error{} + for _, c := range containers { + output[c] = make(chan error, 1) + } + go func() { + for _, c := range containers { + err := <-output[c] + errChan <- err + } + }() + + go func() { + for _, c := range containers { + sem <- struct{}{} // Wait for active queue sem to drain. + go func(container string) { + output[container] <- op(ctx, container) + <-sem + }(c) + } + }() + return errChan +} diff --git a/cli/cli/command/container/utils_test.go b/cli/cli/command/container/utils_test.go new file mode 100644 index 00000000..970549c0 --- /dev/null +++ b/cli/cli/command/container/utils_test.go @@ -0,0 +1,70 @@ +package container + +import ( + "context" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func waitFn(cid string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + var res container.ContainerWaitOKBody + + go func() { + switch { + case strings.Contains(cid, "exit-code-42"): + res.StatusCode = 42 + resC <- res + case strings.Contains(cid, "non-existent"): + err := errors.Errorf("No such container: %v", cid) + errC <- err + case strings.Contains(cid, "wait-error"): + res.Error = &container.ContainerWaitOKBodyError{Message: "removal failed"} + resC <- res + default: + // normal exit + resC <- res + } + }() + + return resC, errC +} + +func TestWaitExitOrRemoved(t *testing.T) { + testcases := []struct { + cid string + exitCode int + }{ + { + cid: "normal-container", + exitCode: 0, + }, + { + cid: "give-me-exit-code-42", + exitCode: 42, + }, + { + cid: "i-want-a-wait-error", + exitCode: 125, + }, + { + cid: "non-existent-container-id", + exitCode: 125, + }, + } + + client := test.NewFakeCli(&fakeClient{waitFunc: waitFn, Version: api.DefaultVersion}) + for _, testcase := range testcases { + statusC := waitExitOrRemoved(context.Background(), client, testcase.cid, true) + exitCode := <-statusC + assert.Check(t, is.Equal(testcase.exitCode, exitCode)) + } +} diff --git a/cli/cli/command/container/wait.go b/cli/cli/command/container/wait.go new file mode 100644 index 00000000..8602e253 --- /dev/null +++ b/cli/cli/command/container/wait.go @@ -0,0 +1,53 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type waitOptions struct { + containers []string +} + +// NewWaitCommand creates a new cobra.Command for `docker wait` +func NewWaitCommand(dockerCli command.Cli) *cobra.Command { + var opts waitOptions + + cmd := &cobra.Command{ + Use: "wait CONTAINER [CONTAINER...]", + Short: "Block until one or more containers stop, then print their exit codes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runWait(dockerCli, &opts) + }, + } + + return cmd +} + +func runWait(dockerCli command.Cli, opts *waitOptions) error { + ctx := context.Background() + + var errs []string + for _, container := range opts.containers { + resultC, errC := dockerCli.Client().ContainerWait(ctx, container, "") + + select { + case result := <-resultC: + fmt.Fprintf(dockerCli.Out(), "%d\n", result.StatusCode) + case err := <-errC: + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/events_utils.go b/cli/cli/command/events_utils.go new file mode 100644 index 00000000..16d76892 --- /dev/null +++ b/cli/cli/command/events_utils.go @@ -0,0 +1,47 @@ +package command + +import ( + "sync" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/sirupsen/logrus" +) + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/cli/cli/command/formatter/checkpoint.go b/cli/cli/command/formatter/checkpoint.go new file mode 100644 index 00000000..041fcafb --- /dev/null +++ b/cli/cli/command/formatter/checkpoint.go @@ -0,0 +1,52 @@ +package formatter + +import "github.com/docker/docker/api/types" + +const ( + defaultCheckpointFormat = "table {{.Name}}" + + checkpointNameHeader = "CHECKPOINT NAME" +) + +// NewCheckpointFormat returns a format for use with a checkpoint Context +func NewCheckpointFormat(source string) Format { + switch source { + case TableFormatKey: + return defaultCheckpointFormat + } + return Format(source) +} + +// CheckpointWrite writes formatted checkpoints using the Context +func CheckpointWrite(ctx Context, checkpoints []types.Checkpoint) error { + render := func(format func(subContext subContext) error) error { + for _, checkpoint := range checkpoints { + if err := format(&checkpointContext{c: checkpoint}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newCheckpointContext(), render) +} + +type checkpointContext struct { + HeaderContext + c types.Checkpoint +} + +func newCheckpointContext() *checkpointContext { + cpCtx := checkpointContext{} + cpCtx.header = volumeHeaderContext{ + "Name": checkpointNameHeader, + } + return &cpCtx +} + +func (c *checkpointContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *checkpointContext) Name() string { + return c.c.Name +} diff --git a/cli/cli/command/formatter/checkpoint_test.go b/cli/cli/command/formatter/checkpoint_test.go new file mode 100644 index 00000000..9b8ac5e5 --- /dev/null +++ b/cli/cli/command/formatter/checkpoint_test.go @@ -0,0 +1,52 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/api/types" + "gotest.tools/assert" +) + +func TestCheckpointContextFormatWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + { + Context{Format: NewCheckpointFormat(defaultCheckpointFormat)}, + `CHECKPOINT NAME +checkpoint-1 +checkpoint-2 +checkpoint-3 +`, + }, + { + Context{Format: NewCheckpointFormat("{{.Name}}")}, + `checkpoint-1 +checkpoint-2 +checkpoint-3 +`, + }, + { + Context{Format: NewCheckpointFormat("{{.Name}}:")}, + `checkpoint-1: +checkpoint-2: +checkpoint-3: +`, + }, + } + + checkpoints := []types.Checkpoint{ + {Name: "checkpoint-1"}, + {Name: "checkpoint-2"}, + {Name: "checkpoint-3"}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + err := CheckpointWrite(testcase.context, checkpoints) + assert.NilError(t, err) + assert.Equal(t, out.String(), testcase.expected) + } +} diff --git a/cli/cli/command/formatter/config.go b/cli/cli/command/formatter/config.go new file mode 100644 index 00000000..72203f35 --- /dev/null +++ b/cli/cli/command/formatter/config.go @@ -0,0 +1,171 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultConfigTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" + configIDHeader = "ID" + configCreatedHeader = "CREATED" + configUpdatedHeader = "UPDATED" + configInspectPrettyTemplate Format = `ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Created at: {{.CreatedAt}} +Updated at: {{.UpdatedAt}} +Data: +{{.Data}}` +) + +// NewConfigFormat returns a Format for rendering using a config Context +func NewConfigFormat(source string, quiet bool) Format { + switch source { + case PrettyFormatKey: + return configInspectPrettyTemplate + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultConfigTableFormat + } + return Format(source) +} + +// ConfigWrite writes the context +func ConfigWrite(ctx Context, configs []swarm.Config) error { + render := func(format func(subContext subContext) error) error { + for _, config := range configs { + configCtx := &configContext{c: config} + if err := format(configCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(newConfigContext(), render) +} + +func newConfigContext() *configContext { + cCtx := &configContext{} + + cCtx.header = map[string]string{ + "ID": configIDHeader, + "Name": nameHeader, + "CreatedAt": configCreatedHeader, + "UpdatedAt": configUpdatedHeader, + "Labels": labelsHeader, + } + return cCtx +} + +type configContext struct { + HeaderContext + c swarm.Config +} + +func (c *configContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *configContext) ID() string { + return c.c.ID +} + +func (c *configContext) Name() string { + return c.c.Spec.Annotations.Name +} + +func (c *configContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.c.Meta.CreatedAt)) + " ago" +} + +func (c *configContext) UpdatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.c.Meta.UpdatedAt)) + " ago" +} + +func (c *configContext) Labels() string { + mapLabels := c.c.Spec.Annotations.Labels + if mapLabels == nil { + return "" + } + var joinLabels []string + for k, v := range mapLabels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *configContext) Label(name string) string { + if c.c.Spec.Annotations.Labels == nil { + return "" + } + return c.c.Spec.Annotations.Labels[name] +} + +// ConfigInspectWrite renders the context for a list of configs +func ConfigInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != configInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + configI, _, err := getRef(ref) + if err != nil { + return err + } + config, ok := configI.(swarm.Config) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&configInspectContext{Config: config}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&configInspectContext{}, render) +} + +type configInspectContext struct { + swarm.Config + subContext +} + +func (ctx *configInspectContext) ID() string { + return ctx.Config.ID +} + +func (ctx *configInspectContext) Name() string { + return ctx.Config.Spec.Name +} + +func (ctx *configInspectContext) Labels() map[string]string { + return ctx.Config.Spec.Labels +} + +func (ctx *configInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Config.CreatedAt) +} + +func (ctx *configInspectContext) UpdatedAt() string { + return command.PrettyPrint(ctx.Config.UpdatedAt) +} + +func (ctx *configInspectContext) Data() string { + if ctx.Config.Spec.Data == nil { + return "" + } + return string(ctx.Config.Spec.Data) +} diff --git a/cli/cli/command/formatter/config_test.go b/cli/cli/command/formatter/config_test.go new file mode 100644 index 00000000..7cd310c9 --- /dev/null +++ b/cli/cli/command/formatter/config_test.go @@ -0,0 +1,64 @@ +package formatter + +import ( + "bytes" + "testing" + "time" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigContextFormatWrite(t *testing.T) { + // Check default output format (verbose and non-verbose mode) for table headers + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + {Context{Format: NewConfigFormat("table", false)}, + `ID NAME CREATED UPDATED +1 passwords Less than a second ago Less than a second ago +2 id_rsa Less than a second ago Less than a second ago +`}, + {Context{Format: NewConfigFormat("table {{.Name}}", true)}, + `NAME +passwords +id_rsa +`}, + {Context{Format: NewConfigFormat("{{.ID}}-{{.Name}}", false)}, + `1-passwords +2-id_rsa +`}, + } + + configs := []swarm.Config{ + {ID: "1", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "passwords"}}}, + {ID: "2", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "id_rsa"}}}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + if err := ConfigWrite(testcase.context, configs); err != nil { + assert.ErrorContains(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} diff --git a/cli/cli/command/formatter/container.go b/cli/cli/command/formatter/container.go new file mode 100644 index 00000000..028a9f6e --- /dev/null +++ b/cli/cli/command/formatter/container.go @@ -0,0 +1,344 @@ +package formatter + +import ( + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + + containerIDHeader = "CONTAINER ID" + namesHeader = "NAMES" + commandHeader = "COMMAND" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + mountsHeader = "MOUNTS" + localVolumes = "LOCAL VOLUMES" + networksHeader = "NETWORKS" +) + +// NewContainerFormat returns a Format for rendering using a Context +func NewContainerFormat(source string, quiet bool, size bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + format := defaultContainerTableFormat + if size { + format += `\t{{.Size}}` + } + return Format(format) + case RawFormatKey: + if quiet { + return `container_id: {{.ID}}` + } + format := `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{- pad .Status 1 0}} +names: {{.Names}} +labels: {{- pad .Labels 1 0}} +ports: {{- pad .Ports 1 0}} +` + if size { + format += `size: {{.Size}}\n` + } + return Format(format) + } + return Format(source) +} + +// ContainerWrite renders the context for a list of containers +func ContainerWrite(ctx Context, containers []types.Container) error { + render := func(format func(subContext subContext) error) error { + for _, container := range containers { + err := format(&containerContext{trunc: ctx.Trunc, c: container}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(newContainerContext(), render) +} + +type containerHeaderContext map[string]string + +func (c containerHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +type containerContext struct { + HeaderContext + trunc bool + c types.Container +} + +func newContainerContext() *containerContext { + containerCtx := containerContext{} + containerCtx.header = containerHeaderContext{ + "ID": containerIDHeader, + "Names": namesHeader, + "Image": imageHeader, + "Command": commandHeader, + "CreatedAt": createdAtHeader, + "RunningFor": runningForHeader, + "Ports": portsHeader, + "Status": statusHeader, + "Size": sizeHeader, + "Labels": labelsHeader, + "Mounts": mountsHeader, + "LocalVolumes": localVolumes, + "Networks": networksHeader, + } + return &containerCtx +} + +func (c *containerContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + // truncate digest if no-trunc option was not selected + ref, err := reference.ParseNormalizedNamed(c.c.Image) + if err == nil { + if nt, ok := ref.(reference.NamedTagged); ok { + // case for when a tag is provided + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + return reference.FamiliarString(namedTagged) + } + } else { + // case for when a tag is not provided + named := reference.TrimNamed(ref) + return reference.FamiliarString(named) + } + } + } + + return c.c.Image +} + +func (c *containerContext) Command() string { + command := c.c.Command + if c.trunc { + command = Ellipsis(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + return time.Unix(c.c.Created, 0).String() +} + +func (c *containerContext) RunningFor() string { + createdAt := time.Unix(c.c.Created, 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" +} + +func (c *containerContext) Ports() string { + return DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + return c.c.Status +} + +func (c *containerContext) Size() string { + srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) + sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = Ellipsis(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +func (c *containerContext) LocalVolumes() string { + count := 0 + for _, m := range c.c.Mounts { + if m.Driver == "local" { + count++ + } + } + + return fmt.Sprintf("%d", count) +} + +func (c *containerContext) Networks() string { + if c.c.NetworkSettings == nil { + return "" + } + + networks := []string{} + for k := range c.c.NetworkSettings.Networks { + networks = append(networks, k) + } + + return strings.Join(networks, ",") +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} diff --git a/cli/cli/command/formatter/container_test.go b/cli/cli/command/formatter/container_test.go new file mode 100644 index 00000000..cafb9abd --- /dev/null +++ b/cli/cli/command/formatter/container_test.go @@ -0,0 +1,658 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestContainerPsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + unix := time.Now().Add(-65 * time.Second).Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + call func() string + }{ + {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), ctx.ID}, + {types.Container{ID: containerID}, false, containerID, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", ctx.Image}, + {types.Container{Image: "verylongimagename"}, true, "verylongimagename", ctx.Image}, + {types.Container{Image: "verylongimagename"}, false, "verylongimagename", ctx.Image}, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + true, + "a5a665ff33ec", + ctx.Image, + }, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + false, + "a5a665ff33eced1e0803148700880edab4", + ctx.Image, + }, + {types.Container{Image: ""}, true, "", ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, ctx.Command}, + {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", ctx.Status}, + {types.Container{SizeRw: 10}, true, "10B", ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10B (virtual 20B)", ctx.Size}, + {types.Container{}, true, "", ctx.Labels}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", ctx.Labels}, + {types.Container{Created: unix}, true, "About a minute ago", ctx.RunningFor}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", + Driver: "local", + Source: "/a/path", + }, + }, + }, true, "this-is-a-long…", ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "/a/path", ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", ctx.Mounts}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } + + c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c1, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + c2 := types.Container{} + ctx = containerContext{c: c2, trunc: true} + + label := ctx.Label("anything.really") + if label != "" { + t.Fatalf("Expected an empty string, was %s", label) + } +} + +func TestContainerContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{Format: NewContainerFormat("table", false, true)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE +containerID1 ubuntu "" 24 hours ago foobar_baz 0B +containerID2 ubuntu "" 24 hours ago foobar_bar 0B +`, + }, + { + Context{Format: NewContainerFormat("table", false, false)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +containerID1 ubuntu "" 24 hours ago foobar_baz +containerID2 ubuntu "" 24 hours ago foobar_bar +`, + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table", true, false)}, + "containerID1\ncontainerID2\n", + }, + // Raw Format + { + Context{Format: NewContainerFormat("raw", false, false)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", false, true)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: +size: 0B + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: +size: 0B + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", true, false)}, + "container_id: containerID1\ncontainer_id: containerID2\n", + }, + // Custom Format + { + Context{Format: "{{.Image}}"}, + "ubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("{{.Image}}", false, true)}, + "ubuntu\nubuntu\n", + }, + // Special headers for customized table format + { + Context{Format: NewContainerFormat(`table {{truncate .ID 5}}\t{{json .Image}} {{.RunningFor}}/{{title .Status}}/{{pad .Ports 2 2}}.{{upper .Names}} {{lower .Status}}`, false, true)}, + string(golden.Get(t, "container-context-write-special-headers.golden")), + }, + } + + for _, testcase := range cases { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ContainerWrite(testcase.context, containers) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestContainerContextWriteWithNoContainers(t *testing.T) { + out := bytes.NewBufferString("") + containers := []types.Container{} + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Image}}", + Output: out, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: NewContainerFormat("{{.Image}}", false, true), + Output: out, + }, + "", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}", false, true), + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: "table {{.Image}}\t{{.Size}}", + Output: out, + }, + "IMAGE SIZE\n", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), + Output: out, + }, + "IMAGE SIZE\n", + }, + } + + for _, context := range contexts { + ContainerWrite(context.context, containers) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} + +func TestContainerContextWriteJSON(t *testing.T) { + unix := time.Now().Add(-65 * time.Second).Unix() + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, + } + expectedCreated := time.Unix(unix, 0).String() + expectedJSONs := []map[string]interface{}{ + { + "Command": "\"\"", + "CreatedAt": expectedCreated, + "ID": "containerID1", + "Image": "ubuntu", + "Labels": "", + "LocalVolumes": "0", + "Mounts": "", + "Names": "foobar_baz", + "Networks": "", + "Ports": "", + "RunningFor": "About a minute ago", + "Size": "0B", + "Status": "", + }, + { + "Command": "\"\"", + "CreatedAt": expectedCreated, + "ID": "containerID2", + "Image": "ubuntu", + "Labels": "", + "LocalVolumes": "0", + "Mounts": "", + "Names": "foobar_bar", + "Networks": "", + "Ports": "", + "RunningFor": "About a minute ago", + "Size": "0B", + "Status": "", + }, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} + +func TestContainerContextWriteJSONField(t *testing.T) { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(containers[i].ID, s), msg) + } +} + +func TestContainerBackCompat(t *testing.T) { + containers := []types.Container{{ID: "brewhaha"}} + cases := []string{ + "ID", + "Names", + "Image", + "Command", + "CreatedAt", + "RunningFor", + "Ports", + "Status", + "Size", + "Labels", + "Mounts", + } + buf := bytes.NewBuffer(nil) + for _, c := range cases { + ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} + if err := ContainerWrite(ctx, containers); err != nil { + t.Logf("could not render template for field '%s': %v", c, err) + t.Fail() + } + buf.Reset() + } +} + +type ports struct { + ports []types.Port + expected string +} + +// nolint: lll +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 12345, + Type: "sctp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 12345/sctp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + assert.Check(t, is.Equal(port.expected, actual)) + } +} diff --git a/cli/cli/command/formatter/custom.go b/cli/cli/command/formatter/custom.go new file mode 100644 index 00000000..73487f63 --- /dev/null +++ b/cli/cli/command/formatter/custom.go @@ -0,0 +1,35 @@ +package formatter + +const ( + imageHeader = "IMAGE" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + nameHeader = "NAME" + driverHeader = "DRIVER" + scopeHeader = "SCOPE" +) + +type subContext interface { + FullHeader() interface{} +} + +// HeaderContext provides the subContext interface for managing headers +type HeaderContext struct { + header interface{} +} + +// FullHeader returns the header as an interface +func (c *HeaderContext) FullHeader() interface{} { + return c.header +} + +func stripNamePrefix(ss []string) []string { + sss := make([]string, len(ss)) + for i, s := range ss { + sss[i] = s[1:] + } + + return sss +} diff --git a/cli/cli/command/formatter/custom_test.go b/cli/cli/command/formatter/custom_test.go new file mode 100644 index 00000000..6a4bfec7 --- /dev/null +++ b/cli/cli/command/formatter/custom_test.go @@ -0,0 +1,28 @@ +package formatter + +import ( + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func compareMultipleValues(t *testing.T, value, expected string) { + // comma-separated values means probably a map input, which won't + // be guaranteed to have the same order as our expected value + // We'll create maps and use reflect.DeepEquals to check instead: + entriesMap := make(map[string]string) + expMap := make(map[string]string) + entries := strings.Split(value, ",") + expectedEntries := strings.Split(expected, ",") + for _, entry := range entries { + keyval := strings.Split(entry, "=") + entriesMap[keyval[0]] = keyval[1] + } + for _, expected := range expectedEntries { + keyval := strings.Split(expected, "=") + expMap[keyval[0]] = keyval[1] + } + assert.Check(t, is.DeepEqual(expMap, entriesMap)) +} diff --git a/cli/cli/command/formatter/diff.go b/cli/cli/command/formatter/diff.go new file mode 100644 index 00000000..9b468193 --- /dev/null +++ b/cli/cli/command/formatter/diff.go @@ -0,0 +1,72 @@ +package formatter + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" +) + +const ( + defaultDiffTableFormat = "table {{.Type}}\t{{.Path}}" + + changeTypeHeader = "CHANGE TYPE" + pathHeader = "PATH" +) + +// NewDiffFormat returns a format for use with a diff Context +func NewDiffFormat(source string) Format { + switch source { + case TableFormatKey: + return defaultDiffTableFormat + } + return Format(source) +} + +// DiffWrite writes formatted diff using the Context +func DiffWrite(ctx Context, changes []container.ContainerChangeResponseItem) error { + + render := func(format func(subContext subContext) error) error { + for _, change := range changes { + if err := format(&diffContext{c: change}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newDiffContext(), render) +} + +type diffContext struct { + HeaderContext + c container.ContainerChangeResponseItem +} + +func newDiffContext() *diffContext { + diffCtx := diffContext{} + diffCtx.header = map[string]string{ + "Type": changeTypeHeader, + "Path": pathHeader, + } + return &diffCtx +} + +func (d *diffContext) MarshalJSON() ([]byte, error) { + return marshalJSON(d) +} + +func (d *diffContext) Type() string { + var kind string + switch d.c.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + return kind + +} + +func (d *diffContext) Path() string { + return d.c.Path +} diff --git a/cli/cli/command/formatter/diff_test.go b/cli/cli/command/formatter/diff_test.go new file mode 100644 index 00000000..aad59450 --- /dev/null +++ b/cli/cli/command/formatter/diff_test.go @@ -0,0 +1,60 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDiffContextFormatWrite(t *testing.T) { + // Check default output format (verbose and non-verbose mode) for table headers + cases := []struct { + context Context + expected string + }{ + { + Context{Format: NewDiffFormat("table")}, + `CHANGE TYPE PATH +C /var/log/app.log +A /usr/app/app.js +D /usr/app/old_app.js +`, + }, + { + Context{Format: NewDiffFormat("table {{.Path}}")}, + `PATH +/var/log/app.log +/usr/app/app.js +/usr/app/old_app.js +`, + }, + { + Context{Format: NewDiffFormat("{{.Type}}: {{.Path}}")}, + `C: /var/log/app.log +A: /usr/app/app.js +D: /usr/app/old_app.js +`, + }, + } + + diffs := []container.ContainerChangeResponseItem{ + {Kind: archive.ChangeModify, Path: "/var/log/app.log"}, + {Kind: archive.ChangeAdd, Path: "/usr/app/app.js"}, + {Kind: archive.ChangeDelete, Path: "/usr/app/old_app.js"}, + } + + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + err := DiffWrite(testcase.context, diffs) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/formatter/disk_usage.go b/cli/cli/command/formatter/disk_usage.go new file mode 100644 index 00000000..d6389a14 --- /dev/null +++ b/cli/cli/command/formatter/disk_usage.go @@ -0,0 +1,425 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + defaultBuildCacheVerboseFormat = ` +ID: {{.ID}} +Description: {{.Description}} +Mutable: {{.Mutable}} +Size: {{.Size}} +CreatedAt: {{.CreatedAt}} +LastUsedAt: {{.LastUsedAt}} +UsageCount: {{.UsageCount}} +` + + typeHeader = "TYPE" + totalHeader = "TOTAL" + activeHeader = "ACTIVE" + reclaimableHeader = "RECLAIMABLE" + containersHeader = "CONTAINERS" + sharedSizeHeader = "SHARED SIZE" + uniqueSizeHeader = "UNIQUE SiZE" +) + +// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct. +type DiskUsageContext struct { + Context + Verbose bool + LayersSize int64 + Images []*types.ImageSummary + Containers []*types.Container + Volumes []*types.Volume + BuildCache []*types.BuildCache + BuilderSize int64 +} + +func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { + ctx.buffer = bytes.NewBufferString("") + ctx.header = "" + ctx.Format = Format(format) + ctx.preFormat() + + return ctx.parseFormat() +} + +// +// NewDiskUsageFormat returns a format for rendering an DiskUsageContext +func NewDiskUsageFormat(source string) Format { + switch source { + case TableFormatKey: + format := defaultDiskUsageTableFormat + return Format(format) + case RawFormatKey: + format := `type: {{.Type}} +total: {{.TotalCount}} +active: {{.Active}} +size: {{.Size}} +reclaimable: {{.Reclaimable}} +` + return Format(format) + } + return Format(source) +} + +func (ctx *DiskUsageContext) Write() (err error) { + if ctx.Verbose { + return ctx.verboseWrite() + } + ctx.buffer = bytes.NewBufferString("") + ctx.preFormat() + + tmpl, err := ctx.parseFormat() + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ + totalSize: ctx.LayersSize, + images: ctx.Images, + }) + if err != nil { + return err + } + err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ + containers: ctx.Containers, + }) + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ + volumes: ctx.Volumes, + }) + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{ + builderSize: ctx.BuilderSize, + buildCache: ctx.BuildCache, + }) + if err != nil { + return err + } + + diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}} + diskUsageContainersCtx.header = map[string]string{ + "Type": typeHeader, + "TotalCount": totalHeader, + "Active": activeHeader, + "Size": sizeHeader, + "Reclaimable": reclaimableHeader, + } + ctx.postFormat(tmpl, &diskUsageContainersCtx) + + return err +} + +func (ctx *DiskUsageContext) verboseWrite() error { + // First images + tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) + if err != nil { + return err + } + + ctx.Output.Write([]byte("Images space usage:\n\n")) + for _, i := range ctx.Images { + repo := "" + tag := "" + if len(i.RepoTags) > 0 && !isDangling(*i) { + // Only show the first tag + ref, err := reference.ParseNormalizedNamed(i.RepoTags[0]) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repo = reference.FamiliarName(ref) + tag = nt.Tag() + } + } + + err := ctx.contextFormat(tmpl, &imageContext{ + repo: repo, + tag: tag, + trunc: true, + i: *i, + }) + if err != nil { + return err + } + } + ctx.postFormat(tmpl, newImageContext()) + + // Now containers + ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) + if err != nil { + return err + } + for _, c := range ctx.Containers { + // Don't display the virtual size + c.SizeRootFs = 0 + err := ctx.contextFormat(tmpl, &containerContext{trunc: true, c: *c}) + if err != nil { + return err + } + } + ctx.postFormat(tmpl, newContainerContext()) + + // And volumes + ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) + if err != nil { + return err + } + for _, v := range ctx.Volumes { + if err := ctx.contextFormat(tmpl, &volumeContext{v: *v}); err != nil { + return err + } + } + ctx.postFormat(tmpl, newVolumeContext()) + + // And build cache + fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize))) + + t := template.Must(template.New("buildcache").Parse(defaultBuildCacheVerboseFormat)) + + for _, v := range ctx.BuildCache { + t.Execute(ctx.Output, *v) + } + + return nil +} + +type diskUsageImagesContext struct { + HeaderContext + totalSize int64 + images []*types.ImageSummary +} + +func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageImagesContext) Type() string { + return "Images" +} + +func (c *diskUsageImagesContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.images)) +} + +func (c *diskUsageImagesContext) Active() string { + used := 0 + for _, i := range c.images { + if i.Containers > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageImagesContext) Size() string { + return units.HumanSize(float64(c.totalSize)) + +} + +func (c *diskUsageImagesContext) Reclaimable() string { + var used int64 + + for _, i := range c.images { + if i.Containers != 0 { + if i.VirtualSize == -1 || i.SharedSize == -1 { + continue + } + used += i.VirtualSize - i.SharedSize + } + } + + reclaimable := c.totalSize - used + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + } + return units.HumanSize(float64(reclaimable)) +} + +type diskUsageContainersContext struct { + HeaderContext + containers []*types.Container +} + +func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageContainersContext) Type() string { + return "Containers" +} + +func (c *diskUsageContainersContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.containers)) +} + +func (c *diskUsageContainersContext) isActive(container types.Container) bool { + return strings.Contains(container.State, "running") || + strings.Contains(container.State, "paused") || + strings.Contains(container.State, "restarting") +} + +func (c *diskUsageContainersContext) Active() string { + used := 0 + for _, container := range c.containers { + if c.isActive(*container) { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageContainersContext) Size() string { + var size int64 + + for _, container := range c.containers { + size += container.SizeRw + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageContainersContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + for _, container := range c.containers { + if !c.isActive(*container) { + reclaimable += container.SizeRw + } + totalSize += container.SizeRw + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return units.HumanSize(float64(reclaimable)) +} + +type diskUsageVolumesContext struct { + HeaderContext + volumes []*types.Volume +} + +func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageVolumesContext) Type() string { + return "Local Volumes" +} + +func (c *diskUsageVolumesContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.volumes)) +} + +func (c *diskUsageVolumesContext) Active() string { + + used := 0 + for _, v := range c.volumes { + if v.UsageData.RefCount > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageVolumesContext) Size() string { + var size int64 + + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + size += v.UsageData.Size + } + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageVolumesContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + if v.UsageData.RefCount == 0 { + reclaimable += v.UsageData.Size + } + totalSize += v.UsageData.Size + } + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return units.HumanSize(float64(reclaimable)) +} + +type diskUsageBuilderContext struct { + HeaderContext + builderSize int64 + buildCache []*types.BuildCache +} + +func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageBuilderContext) Type() string { + return "Build Cache" +} + +func (c *diskUsageBuilderContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.buildCache)) +} + +func (c *diskUsageBuilderContext) Active() string { + numActive := 0 + for _, bc := range c.buildCache { + if bc.InUse { + numActive++ + } + } + return fmt.Sprintf("%d", numActive) +} + +func (c *diskUsageBuilderContext) Size() string { + return units.HumanSize(float64(c.builderSize)) +} + +func (c *diskUsageBuilderContext) Reclaimable() string { + var inUseBytes int64 + for _, bc := range c.buildCache { + if bc.InUse { + inUseBytes += bc.Size + } + } + + return units.HumanSize(float64(c.builderSize - inUseBytes)) +} diff --git a/cli/cli/command/formatter/disk_usage_test.go b/cli/cli/command/formatter/disk_usage_test.go new file mode 100644 index 00000000..878aef05 --- /dev/null +++ b/cli/cli/command/formatter/disk_usage_test.go @@ -0,0 +1,110 @@ +package formatter + +import ( + "bytes" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestDiskUsageContextFormatWrite(t *testing.T) { + cases := []struct { + context DiskUsageContext + expected string + }{ + // Check default output format (verbose and non-verbose mode) for table headers + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("table"), + }, + Verbose: false}, + `TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 0 0 0B 0B +Containers 0 0 0B 0B +Local Volumes 0 0 0B 0B +Build Cache 0 0 0B 0B +`, + }, + { + DiskUsageContext{Verbose: true}, + `Images space usage: + +REPOSITORY TAG IMAGE ID CREATED ago SIZE SHARED SIZE UNIQUE SiZE CONTAINERS + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED ago STATUS NAMES + +Local Volumes space usage: + +VOLUME NAME LINKS SIZE + +Build cache usage: 0B + +`, + }, + // Errors + { + DiskUsageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + DiskUsageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("table"), + }, + }, + `TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 0 0 0B 0B +Containers 0 0 0B 0B +Local Volumes 0 0 0B 0B +Build Cache 0 0 0B 0B +`, + }, + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}"), + }, + }, + string(golden.Get(t, "disk-usage-context-write-custom.golden")), + }, + // Raw Format + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("raw"), + }, + }, + string(golden.Get(t, "disk-usage-raw-format.golden")), + }, + } + + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + if err := testcase.context.Write(); err != nil { + assert.Check(t, is.Equal(testcase.expected, err.Error())) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/formatter/displayutils.go b/cli/cli/command/formatter/displayutils.go new file mode 100644 index 00000000..0c3b6ebb --- /dev/null +++ b/cli/cli/command/formatter/displayutils.go @@ -0,0 +1,61 @@ +package formatter + +import ( + "unicode/utf8" + + "golang.org/x/text/width" +) + +// charWidth returns the number of horizontal positions a character occupies, +// and is used to account for wide characters when displaying strings. +// +// In a broad sense, wide characters include East Asian Wide, East Asian Full-width, +// (when not in East Asian context) see http://unicode.org/reports/tr11/. +func charWidth(r rune) int { + switch width.LookupRune(r).Kind() { + case width.EastAsianWide, width.EastAsianFullwidth: + return 2 + default: + return 1 + } +} + +// Ellipsis truncates a string to fit within maxDisplayWidth, and appends ellipsis (…). +// For maxDisplayWidth of 1 and lower, no ellipsis is appended. +// For maxDisplayWidth of 1, first char of string will return even if its width > 1. +func Ellipsis(s string, maxDisplayWidth int) string { + if maxDisplayWidth <= 0 { + return "" + } + rs := []rune(s) + if maxDisplayWidth == 1 { + return string(rs[0]) + } + + byteLen := len(s) + if byteLen == utf8.RuneCountInString(s) { + if byteLen <= maxDisplayWidth { + return s + } + return string(rs[:maxDisplayWidth-1]) + "…" + } + + var ( + display []int + displayWidth int + ) + for _, r := range rs { + cw := charWidth(r) + displayWidth += cw + display = append(display, displayWidth) + } + if displayWidth <= maxDisplayWidth { + return s + } + for i := range display { + if display[i] <= maxDisplayWidth-1 && display[i+1] > maxDisplayWidth-1 { + return string(rs[:i+1]) + "…" + } + } + return s +} diff --git a/cli/cli/command/formatter/displayutils_test.go b/cli/cli/command/formatter/displayutils_test.go new file mode 100644 index 00000000..db60610b --- /dev/null +++ b/cli/cli/command/formatter/displayutils_test.go @@ -0,0 +1,31 @@ +package formatter + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestEllipsis(t *testing.T) { + var testcases = []struct { + source string + width int + expected string + }{ + {source: "t🐳ststring", width: 0, expected: ""}, + {source: "t🐳ststring", width: 1, expected: "t"}, + {source: "t🐳ststring", width: 2, expected: "t…"}, + {source: "t🐳ststring", width: 6, expected: "t🐳st…"}, + {source: "t🐳ststring", width: 20, expected: "t🐳ststring"}, + {source: "你好世界teststring", width: 0, expected: ""}, + {source: "你好世界teststring", width: 1, expected: "ä½ "}, + {source: "你好世界teststring", width: 3, expected: "你…"}, + {source: "你好世界teststring", width: 6, expected: "你好…"}, + {source: "你好世界teststring", width: 20, expected: "你好世界teststring"}, + } + + for _, testcase := range testcases { + assert.Check(t, is.Equal(testcase.expected, Ellipsis(testcase.source, testcase.width))) + } +} diff --git a/cli/cli/command/formatter/formatter.go b/cli/cli/command/formatter/formatter.go new file mode 100644 index 00000000..c63e1a49 --- /dev/null +++ b/cli/cli/command/formatter/formatter.go @@ -0,0 +1,119 @@ +package formatter + +import ( + "bytes" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/cli/templates" + "github.com/pkg/errors" +) + +// Format keys used to specify certain kinds of output formats +const ( + TableFormatKey = "table" + RawFormatKey = "raw" + PrettyFormatKey = "pretty" + + defaultQuietFormat = "{{.ID}}" +) + +// Format is the format string rendered using the Context +type Format string + +// IsTable returns true if the format is a table-type format +func (f Format) IsTable() bool { + return strings.HasPrefix(string(f), TableFormatKey) +} + +// Contains returns true if the format contains the substring +func (f Format) Contains(sub string) bool { + return strings.Contains(string(f), sub) +} + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format Format + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + finalFormat string + header interface{} + buffer *bytes.Buffer +} + +func (c *Context) preFormat() { + c.finalFormat = string(c.Format) + + // TODO: handle this in the Format type + if c.Format.IsTable() { + c.finalFormat = c.finalFormat[len(TableFormatKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + return tmpl, errors.Errorf("Template parsing error: %v\n", err) + } + return tmpl, err +} + +func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { + if c.Format.IsTable() { + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + buffer := bytes.NewBufferString("") + tmpl.Funcs(templates.HeaderFunctions).Execute(buffer, subContext.FullHeader()) + buffer.WriteTo(t) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + return errors.Errorf("Template parsing error: %v\n", err) + } + if c.Format.IsTable() && c.header != nil { + c.header = subContext.FullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// SubFormat is a function type accepted by Write() +type SubFormat func(func(subContext) error) error + +// Write the template to the buffer using this Context +func (c *Context) Write(sub subContext, f SubFormat) error { + c.buffer = bytes.NewBufferString("") + c.preFormat() + + tmpl, err := c.parseFormat() + if err != nil { + return err + } + + subFormat := func(subContext subContext) error { + return c.contextFormat(tmpl, subContext) + } + if err := f(subFormat); err != nil { + return err + } + + c.postFormat(tmpl, sub) + return nil +} diff --git a/cli/cli/command/formatter/history.go b/cli/cli/command/formatter/history.go new file mode 100644 index 00000000..6ad0a605 --- /dev/null +++ b/cli/cli/command/formatter/history.go @@ -0,0 +1,109 @@ +package formatter + +import ( + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + defaultHistoryTableFormat = "table {{.ID}}\t{{.CreatedSince}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}" + nonHumanHistoryTableFormat = "table {{.ID}}\t{{.CreatedAt}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}" + + historyIDHeader = "IMAGE" + createdByHeader = "CREATED BY" + commentHeader = "COMMENT" +) + +// NewHistoryFormat returns a format for rendering an HistoryContext +func NewHistoryFormat(source string, quiet bool, human bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case !human: + return nonHumanHistoryTableFormat + default: + return defaultHistoryTableFormat + } + } + + return Format(source) +} + +// HistoryWrite writes the context +func HistoryWrite(ctx Context, human bool, histories []image.HistoryResponseItem) error { + render := func(format func(subContext subContext) error) error { + for _, history := range histories { + historyCtx := &historyContext{trunc: ctx.Trunc, h: history, human: human} + if err := format(historyCtx); err != nil { + return err + } + } + return nil + } + historyCtx := &historyContext{} + historyCtx.header = map[string]string{ + "ID": historyIDHeader, + "CreatedSince": createdSinceHeader, + "CreatedAt": createdAtHeader, + "CreatedBy": createdByHeader, + "Size": sizeHeader, + "Comment": commentHeader, + } + return ctx.Write(historyCtx, render) +} + +type historyContext struct { + HeaderContext + trunc bool + human bool + h image.HistoryResponseItem +} + +func (c *historyContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *historyContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.h.ID) + } + return c.h.ID +} + +func (c *historyContext) CreatedAt() string { + return time.Unix(c.h.Created, 0).Format(time.RFC3339) +} + +func (c *historyContext) CreatedSince() string { + if !c.human { + return c.CreatedAt() + } + created := units.HumanDuration(time.Now().UTC().Sub(time.Unix(c.h.Created, 0))) + return created + " ago" +} + +func (c *historyContext) CreatedBy() string { + createdBy := strings.Replace(c.h.CreatedBy, "\t", " ", -1) + if c.trunc { + return Ellipsis(createdBy, 45) + } + return createdBy +} + +func (c *historyContext) Size() string { + if c.human { + return units.HumanSizeWithPrecision(float64(c.h.Size), 3) + } + return strconv.FormatInt(c.h.Size, 10) +} + +func (c *historyContext) Comment() string { + return c.h.Comment +} diff --git a/cli/cli/command/formatter/history_test.go b/cli/cli/command/formatter/history_test.go new file mode 100644 index 00000000..7dfc146b --- /dev/null +++ b/cli/cli/command/formatter/history_test.go @@ -0,0 +1,226 @@ +package formatter + +import ( + "bytes" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +type historyCase struct { + historyCtx historyContext + expValue string + call func() string +} + +func TestHistoryContext_ID(t *testing.T) { + id := stringid.GenerateRandomID() + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{ID: id}, + trunc: false, + }, id, ctx.ID, + }, + { + historyContext{ + h: image.HistoryResponseItem{ID: id}, + trunc: true, + }, stringid.TruncateID(id), ctx.ID, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_CreatedSince(t *testing.T) { + dateStr := "2009-11-10T23:00:00Z" + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{Created: time.Now().AddDate(0, 0, -7).Unix()}, + trunc: false, + human: true, + }, "7 days ago", ctx.CreatedSince, + }, + { + historyContext{ + h: image.HistoryResponseItem{Created: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()}, + trunc: false, + human: false, + }, dateStr, ctx.CreatedSince, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_CreatedBy(t *testing.T) { + withTabs := `/bin/sh -c apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 && echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list && apt-get update && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates nginx=${NGINX_VERSION} nginx-module-xslt nginx-module-geoip nginx-module-image-filter nginx-module-perl nginx-module-njs gettext-base && rm -rf /var/lib/apt/lists/*` // nolint: lll + expected := `/bin/sh -c apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 && echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list && apt-get update && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates nginx=${NGINX_VERSION} nginx-module-xslt nginx-module-geoip nginx-module-image-filter nginx-module-perl nginx-module-njs gettext-base && rm -rf /var/lib/apt/lists/*` // nolint: lll + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{CreatedBy: withTabs}, + trunc: false, + }, expected, ctx.CreatedBy, + }, + { + historyContext{ + h: image.HistoryResponseItem{CreatedBy: withTabs}, + trunc: true, + }, Ellipsis(expected, 45), ctx.CreatedBy, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_Size(t *testing.T) { + size := int64(182964289) + expected := "183MB" + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{Size: size}, + trunc: false, + human: true, + }, expected, ctx.Size, + }, { + historyContext{ + h: image.HistoryResponseItem{Size: size}, + trunc: false, + human: false, + }, strconv.Itoa(182964289), ctx.Size, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_Comment(t *testing.T) { + comment := "Some comment" + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{Comment: comment}, + trunc: false, + }, comment, ctx.Comment, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_Table(t *testing.T) { + out := bytes.NewBufferString("") + unixTime := time.Now().AddDate(0, 0, -1).Unix() + histories := []image.HistoryResponseItem{ + { + ID: "imageID1", + Created: unixTime, + CreatedBy: "/bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on", + Size: int64(182964289), + Comment: "Hi", + Tags: []string{"image:tag2"}, + }, + {ID: "imageID2", Created: unixTime, CreatedBy: "/bin/bash echo", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}}, + {ID: "imageID3", Created: unixTime, CreatedBy: "/bin/bash ls", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}}, + {ID: "imageID4", Created: unixTime, CreatedBy: "/bin/bash grep", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}}, + } + // nolint: lll + expectedNoTrunc := `IMAGE CREATED CREATED BY SIZE COMMENT +imageID1 24 hours ago /bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on 183MB Hi +imageID2 24 hours ago /bin/bash echo 183MB Hi +imageID3 24 hours ago /bin/bash ls 183MB Hi +imageID4 24 hours ago /bin/bash grep 183MB Hi +` + expectedTrunc := `IMAGE CREATED CREATED BY SIZE COMMENT +imageID1 24 hours ago /bin/bash ls && npm i && npm run test && kar… 183MB Hi +imageID2 24 hours ago /bin/bash echo 183MB Hi +imageID3 24 hours ago /bin/bash ls 183MB Hi +imageID4 24 hours ago /bin/bash grep 183MB Hi +` + + contexts := []struct { + context Context + expected string + }{ + {Context{ + Format: NewHistoryFormat("table", false, true), + Trunc: true, + Output: out, + }, + expectedTrunc, + }, + {Context{ + Format: NewHistoryFormat("table", false, true), + Trunc: false, + Output: out, + }, + expectedNoTrunc, + }, + } + + for _, context := range contexts { + HistoryWrite(context.context, true, histories) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} diff --git a/cli/cli/command/formatter/image.go b/cli/cli/command/formatter/image.go new file mode 100644 index 00000000..e94785ef --- /dev/null +++ b/cli/cli/command/formatter/image.go @@ -0,0 +1,272 @@ +package formatter + +import ( + "fmt" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" + + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +// ImageContext contains image specific information required by the formatter, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool +} + +func isDangling(image types.ImageSummary) bool { + return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" +} + +// NewImageFormat returns a format for rendering an ImageContext +func NewImageFormat(source string, quiet bool, digest bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case digest: + return defaultImageTableFormatWithDigest + default: + return defaultImageTableFormat + } + case RawFormatKey: + switch { + case quiet: + return `image_id: {{.ID}}` + case digest: + return `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + default: + return `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + + format := Format(source) + if format.IsTable() && digest && !format.Contains("{{.Digest}}") { + format += "\t{{.Digest}}" + } + return format +} + +// ImageWrite writes the formatter images using the ImageContext +func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { + render := func(format func(subContext subContext) error) error { + return imageFormat(ctx, images, format) + } + return ctx.Write(newImageContext(), render) +} + +// needDigest determines whether the image digest should be ignored or not when writing image context +func needDigest(ctx ImageContext) bool { + return ctx.Digest || ctx.Format.Contains("{{.Digest}}") +} + +func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { + for _, image := range images { + formatted := []*imageContext{} + if isDangling(image) { + formatted = append(formatted, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: "", + tag: "", + digest: "", + }) + } else { + formatted = imageFormatTaggedAndDigest(ctx, image) + } + for _, imageCtx := range formatted { + if err := format(imageCtx); err != nil { + return err + } + } + } + return nil +} + +func imageFormatTaggedAndDigest(ctx ImageContext, image types.ImageSummary) []*imageContext { + repoTags := map[string][]string{} + repoDigests := map[string][]string{} + images := []*imageContext{} + + for _, refString := range image.RepoTags { + ref, err := reference.ParseNormalizedNamed(refString) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + familiarRef := reference.FamiliarName(ref) + repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag()) + } + } + for _, refString := range image.RepoDigests { + ref, err := reference.ParseNormalizedNamed(refString) + if err != nil { + continue + } + if c, ok := ref.(reference.Canonical); ok { + familiarRef := reference.FamiliarName(ref) + repoDigests[familiarRef] = append(repoDigests[familiarRef], c.Digest().String()) + } + } + + addImage := func(repo, tag, digest string) { + image := &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: digest, + } + images = append(images, image) + } + + for repo, tags := range repoTags { + digests := repoDigests[repo] + + // Do not display digests as their own row + delete(repoDigests, repo) + + if !needDigest(ctx) { + // Ignore digest references, just show tag once + digests = nil + } + + for _, tag := range tags { + if len(digests) == 0 { + addImage(repo, tag, "") + continue + } + // Display the digests for each tag + for _, dgst := range digests { + addImage(repo, tag, dgst) + } + + } + } + + // Show rows for remaining digest only references + for repo, digests := range repoDigests { + // If digests are displayed, show row per digest + if ctx.Digest { + for _, dgst := range digests { + addImage(repo, "", dgst) + } + } else { + addImage(repo, "", "") + + } + } + return images +} + +type imageContext struct { + HeaderContext + trunc bool + i types.ImageSummary + repo string + tag string + digest string +} + +func newImageContext() *imageContext { + imageCtx := imageContext{} + imageCtx.header = map[string]string{ + "ID": imageIDHeader, + "Repository": repositoryHeader, + "Tag": tagHeader, + "Digest": digestHeader, + "CreatedSince": createdSinceHeader, + "CreatedAt": createdAtHeader, + "Size": sizeHeader, + "Containers": containersHeader, + "VirtualSize": sizeHeader, + "SharedSize": sharedSizeHeader, + "UniqueSize": uniqueSizeHeader, + } + return &imageCtx +} + +func (c *imageContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *imageContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + return c.repo +} + +func (c *imageContext) Tag() string { + return c.tag +} + +func (c *imageContext) Digest() string { + return c.digest +} + +func (c *imageContext) CreatedSince() string { + createdAt := time.Unix(c.i.Created, 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" +} + +func (c *imageContext) CreatedAt() string { + return time.Unix(c.i.Created, 0).String() +} + +func (c *imageContext) Size() string { + return units.HumanSizeWithPrecision(float64(c.i.Size), 3) +} + +func (c *imageContext) Containers() string { + if c.i.Containers == -1 { + return "N/A" + } + return fmt.Sprintf("%d", c.i.Containers) +} + +func (c *imageContext) VirtualSize() string { + return units.HumanSize(float64(c.i.VirtualSize)) +} + +func (c *imageContext) SharedSize() string { + if c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.SharedSize)) +} + +func (c *imageContext) UniqueSize() string { + if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) +} diff --git a/cli/cli/command/formatter/image_test.go b/cli/cli/command/formatter/image_test.go new file mode 100644 index 00000000..7efad0a7 --- /dev/null +++ b/cli/cli/command/formatter/image_test.go @@ -0,0 +1,356 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestImageContext(t *testing.T) { + imageID := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx imageContext + cases := []struct { + imageCtx imageContext + expValue string + call func() string + }{ + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: true, + }, stringid.TruncateID(imageID), ctx.ID}, + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: false, + }, imageID, ctx.ID}, + {imageContext{ + i: types.ImageSummary{Size: 10, VirtualSize: 10}, + trunc: true, + }, "10B", ctx.Size}, + {imageContext{ + i: types.ImageSummary{Created: unix}, + trunc: true, + }, time.Unix(unix, 0).String(), ctx.CreatedAt}, + // FIXME + // {imageContext{ + // i: types.ImageSummary{Created: unix}, + // trunc: true, + // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, + {imageContext{ + i: types.ImageSummary{}, + repo: "busybox", + }, "busybox", ctx.Repository}, + {imageContext{ + i: types.ImageSummary{}, + tag: "latest", + }, "latest", ctx.Tag}, + {imageContext{ + i: types.ImageSummary{}, + digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", + }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", ctx.Digest}, + { + imageContext{ + i: types.ImageSummary{Containers: 10}, + }, "10", ctx.Containers, + }, + { + imageContext{ + i: types.ImageSummary{VirtualSize: 10000}, + }, "10kB", ctx.VirtualSize, + }, + { + imageContext{ + i: types.ImageSummary{SharedSize: 10000}, + }, "10kB", ctx.SharedSize, + }, + { + imageContext{ + i: types.ImageSummary{SharedSize: 5000, VirtualSize: 20000}, + }, "15kB", ctx.UniqueSize, + }, + } + + for _, c := range cases { + ctx = c.imageCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else { + assert.Check(t, is.Equal(c.expValue, v)) + } + } +} + +func TestImageContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context ImageContext + expected string + }{ + // Errors + { + ImageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + ImageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, false), + }, + }, + `REPOSITORY TAG IMAGE ID CREATED SIZE +image tag1 imageID1 24 hours ago 0B +image tag2 imageID2 24 hours ago 0B + imageID3 24 hours ago 0B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + }, + Digest: true, + }, + `REPOSITORY DIGEST +image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image + +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", true, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Digest}}", true, false), + }, + }, + "DIGEST\nsha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf\n\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, false), + }, + }, + "imageID1\nimageID2\nimageID3\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, true), + }, + Digest: true, + }, + `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0B +image tag2 imageID2 24 hours ago 0B + imageID3 24 hours ago 0B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, true), + }, + Digest: true, + }, + "imageID1\nimageID2\nimageID3\n", + }, + // Raw Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, false), + }, + }, + fmt.Sprintf(`repository: image +tag: tag1 +image_id: imageID1 +created_at: %s +virtual_size: 0B + +repository: image +tag: tag2 +image_id: imageID2 +created_at: %s +virtual_size: 0B + +repository: +tag: +image_id: imageID3 +created_at: %s +virtual_size: 0B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, true), + }, + Digest: true, + }, + fmt.Sprintf(`repository: image +tag: tag1 +digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image_id: imageID1 +created_at: %s +virtual_size: 0B + +repository: image +tag: tag2 +digest: +image_id: imageID2 +created_at: %s +virtual_size: 0B + +repository: +tag: +digest: +image_id: imageID3 +created_at: %s +virtual_size: 0B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", true, false), + }, + }, + `image_id: imageID1 +image_id: imageID2 +image_id: imageID3 +`, + }, + // Custom Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + }, + }, + "image\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + }, + Digest: true, + }, + "image\nimage\n\n", + }, + } + + for _, testcase := range cases { + images := []types.ImageSummary{ + {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, + {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, + {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ImageWrite(testcase.context, images) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestImageContextWriteWithNoImage(t *testing.T) { + out := bytes.NewBufferString("") + images := []types.ImageSummary{} + + contexts := []struct { + context ImageContext + expected string + }{ + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + Output: out, + }, + }, + "REPOSITORY\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + Output: out, + }, + }, + "REPOSITORY DIGEST\n", + }, + } + + for _, context := range contexts { + ImageWrite(context.context, images) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} diff --git a/cli/cli/command/formatter/network.go b/cli/cli/command/formatter/network.go new file mode 100644 index 00000000..4aeebd17 --- /dev/null +++ b/cli/cli/command/formatter/network.go @@ -0,0 +1,129 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" + + networkIDHeader = "NETWORK ID" + ipv6Header = "IPV6" + internalHeader = "INTERNAL" +) + +// NewNetworkFormat returns a Format for rendering using a network Context +func NewNetworkFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNetworkTableFormat + case RawFormatKey: + if quiet { + return `network_id: {{.ID}}` + } + return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` + } + return Format(source) +} + +// NetworkWrite writes the context +func NetworkWrite(ctx Context, networks []types.NetworkResource) error { + render := func(format func(subContext subContext) error) error { + for _, network := range networks { + networkCtx := &networkContext{trunc: ctx.Trunc, n: network} + if err := format(networkCtx); err != nil { + return err + } + } + return nil + } + networkCtx := networkContext{} + networkCtx.header = networkHeaderContext{ + "ID": networkIDHeader, + "Name": nameHeader, + "Driver": driverHeader, + "Scope": scopeHeader, + "IPv6": ipv6Header, + "Internal": internalHeader, + "Labels": labelsHeader, + "CreatedAt": createdAtHeader, + } + return ctx.Write(&networkCtx, render) +} + +type networkHeaderContext map[string]string + +func (c networkHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +type networkContext struct { + HeaderContext + trunc bool + n types.NetworkResource +} + +func (c *networkContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *networkContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.n.ID) + } + return c.n.ID +} + +func (c *networkContext) Name() string { + return c.n.Name +} + +func (c *networkContext) Driver() string { + return c.n.Driver +} + +func (c *networkContext) Scope() string { + return c.n.Scope +} + +func (c *networkContext) IPv6() string { + return fmt.Sprintf("%v", c.n.EnableIPv6) +} + +func (c *networkContext) Internal() string { + return fmt.Sprintf("%v", c.n.Internal) +} + +func (c *networkContext) Labels() string { + if c.n.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.n.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *networkContext) Label(name string) string { + if c.n.Labels == nil { + return "" + } + return c.n.Labels[name] +} + +func (c *networkContext) CreatedAt() string { + return c.n.Created.String() +} diff --git a/cli/cli/command/formatter/network_test.go b/cli/cli/command/formatter/network_test.go new file mode 100644 index 00000000..6831926e --- /dev/null +++ b/cli/cli/command/formatter/network_test.go @@ -0,0 +1,213 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworkContext(t *testing.T) { + networkID := stringid.GenerateRandomID() + + var ctx networkContext + cases := []struct { + networkCtx networkContext + expValue string + call func() string + }{ + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: false, + }, networkID, ctx.ID}, + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: true, + }, stringid.TruncateID(networkID), ctx.ID}, + {networkContext{ + n: types.NetworkResource{Name: "network_name"}, + }, "network_name", ctx.Name}, + {networkContext{ + n: types.NetworkResource{Driver: "driver_name"}, + }, "driver_name", ctx.Driver}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: true}, + }, "true", ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: false}, + }, "false", ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{Internal: true}, + }, "true", ctx.Internal}, + {networkContext{ + n: types.NetworkResource{Internal: false}, + }, "false", ctx.Internal}, + {networkContext{ + n: types.NetworkResource{}, + }, "", ctx.Labels}, + {networkContext{ + n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", ctx.Labels}, + } + + for _, c := range cases { + ctx = c.networkCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestNetworkContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewNetworkFormat("table", false)}, + `NETWORK ID NAME DRIVER SCOPE +networkID1 foobar_baz foo local +networkID2 foobar_bar bar local +`, + }, + { + Context{Format: NewNetworkFormat("table", true)}, + `networkID1 +networkID2 +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewNetworkFormat("raw", false)}, + `network_id: networkID1 +name: foobar_baz +driver: foo +scope: local + +network_id: networkID2 +name: foobar_bar +driver: bar +scope: local + +`, + }, + { + Context{Format: NewNetworkFormat("raw", true)}, + `network_id: networkID1 +network_id: networkID2 +`, + }, + // Custom Format + { + Context{Format: NewNetworkFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + // Custom Format with CreatedAt + { + Context{Format: NewNetworkFormat("{{.Name}} {{.CreatedAt}}", false)}, + `foobar_baz 2016-01-01 00:00:00 +0000 UTC +foobar_bar 2017-01-01 00:00:00 +0000 UTC +`, + }, + } + + timestamp1, _ := time.Parse("2006-01-02", "2016-01-01") + timestamp2, _ := time.Parse("2006-01-02", "2017-01-01") + + for _, testcase := range cases { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local", Created: timestamp1}, + {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local", Created: timestamp2}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := NetworkWrite(testcase.context, networks) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestNetworkContextWriteJSON(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"}, + {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"}, + } + + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} + +func TestNetworkContextWriteJSONField(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(networks[i].ID, s), msg) + } +} diff --git a/cli/cli/command/formatter/node.go b/cli/cli/command/formatter/node.go new file mode 100644 index 00000000..6cfc3238 --- /dev/null +++ b/cli/cli/command/formatter/node.go @@ -0,0 +1,336 @@ +package formatter + +import ( + "encoding/base64" + "fmt" + "reflect" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}\t{{.EngineVersion}}" + nodeInspectPrettyTemplate Format = `ID: {{.ID}} +{{- if .Name }} +Name: {{.Name}} +{{- end }} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Hostname: {{.Hostname}} +Joined at: {{.CreatedAt}} +Status: + State: {{.StatusState}} + {{- if .HasStatusMessage}} + Message: {{.StatusMessage}} + {{- end}} + Availability: {{.SpecAvailability}} + {{- if .Status.Addr}} + Address: {{.StatusAddr}} + {{- end}} +{{- if .HasManagerStatus}} +Manager Status: + Address: {{.ManagerStatusAddr}} + Raft Status: {{.ManagerStatusReachability}} + {{- if .IsManagerStatusLeader}} + Leader: Yes + {{- else}} + Leader: No + {{- end}} +{{- end}} +Platform: + Operating System: {{.PlatformOS}} + Architecture: {{.PlatformArchitecture}} +Resources: + CPUs: {{.ResourceNanoCPUs}} + Memory: {{.ResourceMemory}} +{{- if .HasEnginePlugins}} +Plugins: +{{- range $k, $v := .EnginePlugins }} + {{ $k }}:{{if $v }} {{ $v }}{{ end }} +{{- end }} +{{- end }} +Engine Version: {{.EngineVersion}} +{{- if .EngineLabels}} +Engine Labels: +{{- range $k, $v := .EngineLabels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{- end }} +{{- if .HasTLSInfo}} +TLS Info: + TrustRoot: +{{.TLSInfoTrustRoot}} + Issuer Subject: {{.TLSInfoCertIssuerSubject}} + Issuer Public Key: {{.TLSInfoCertIssuerPublicKey}} +{{- end}}` + nodeIDHeader = "ID" + selfHeader = "" + hostnameHeader = "HOSTNAME" + availabilityHeader = "AVAILABILITY" + managerStatusHeader = "MANAGER STATUS" + engineVersionHeader = "ENGINE VERSION" + tlsStatusHeader = "TLS STATUS" +) + +// NewNodeFormat returns a Format for rendering using a node Context +func NewNodeFormat(source string, quiet bool) Format { + switch source { + case PrettyFormatKey: + return nodeInspectPrettyTemplate + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNodeTableFormat + case RawFormatKey: + if quiet { + return `node_id: {{.ID}}` + } + return `node_id: {{.ID}}\nhostname: {{.Hostname}}\nstatus: {{.Status}}\navailability: {{.Availability}}\nmanager_status: {{.ManagerStatus}}\n` + } + return Format(source) +} + +// NodeWrite writes the context +func NodeWrite(ctx Context, nodes []swarm.Node, info types.Info) error { + render := func(format func(subContext subContext) error) error { + for _, node := range nodes { + nodeCtx := &nodeContext{n: node, info: info} + if err := format(nodeCtx); err != nil { + return err + } + } + return nil + } + header := nodeHeaderContext{ + "ID": nodeIDHeader, + "Self": selfHeader, + "Hostname": hostnameHeader, + "Status": statusHeader, + "Availability": availabilityHeader, + "ManagerStatus": managerStatusHeader, + "EngineVersion": engineVersionHeader, + "TLSStatus": tlsStatusHeader, + } + nodeCtx := nodeContext{} + nodeCtx.header = header + return ctx.Write(&nodeCtx, render) +} + +type nodeHeaderContext map[string]string + +type nodeContext struct { + HeaderContext + n swarm.Node + info types.Info +} + +func (c *nodeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *nodeContext) ID() string { + return c.n.ID +} + +func (c *nodeContext) Self() bool { + return c.n.ID == c.info.Swarm.NodeID +} + +func (c *nodeContext) Hostname() string { + return c.n.Description.Hostname +} + +func (c *nodeContext) Status() string { + return command.PrettyPrint(string(c.n.Status.State)) +} + +func (c *nodeContext) Availability() string { + return command.PrettyPrint(string(c.n.Spec.Availability)) +} + +func (c *nodeContext) ManagerStatus() string { + reachability := "" + if c.n.ManagerStatus != nil { + if c.n.ManagerStatus.Leader { + reachability = "Leader" + } else { + reachability = string(c.n.ManagerStatus.Reachability) + } + } + return command.PrettyPrint(reachability) +} + +func (c *nodeContext) TLSStatus() string { + if c.info.Swarm.Cluster == nil || reflect.DeepEqual(c.info.Swarm.Cluster.TLSInfo, swarm.TLSInfo{}) || reflect.DeepEqual(c.n.Description.TLSInfo, swarm.TLSInfo{}) { + return "Unknown" + } + if reflect.DeepEqual(c.n.Description.TLSInfo, c.info.Swarm.Cluster.TLSInfo) { + return "Ready" + } + return "Needs Rotation" +} + +func (c *nodeContext) EngineVersion() string { + return c.n.Description.Engine.EngineVersion +} + +// NodeInspectWrite renders the context for a list of nodes +func NodeInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != nodeInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + nodeI, _, err := getRef(ref) + if err != nil { + return err + } + node, ok := nodeI.(swarm.Node) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&nodeInspectContext{Node: node}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&nodeInspectContext{}, render) +} + +type nodeInspectContext struct { + swarm.Node + subContext +} + +func (ctx *nodeInspectContext) ID() string { + return ctx.Node.ID +} + +func (ctx *nodeInspectContext) Name() string { + return ctx.Node.Spec.Name +} + +func (ctx *nodeInspectContext) Labels() map[string]string { + return ctx.Node.Spec.Labels +} + +func (ctx *nodeInspectContext) Hostname() string { + return ctx.Node.Description.Hostname +} + +func (ctx *nodeInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Node.CreatedAt) +} + +func (ctx *nodeInspectContext) StatusState() string { + return command.PrettyPrint(ctx.Node.Status.State) +} + +func (ctx *nodeInspectContext) HasStatusMessage() bool { + return ctx.Node.Status.Message != "" +} + +func (ctx *nodeInspectContext) StatusMessage() string { + return command.PrettyPrint(ctx.Node.Status.Message) +} + +func (ctx *nodeInspectContext) SpecAvailability() string { + return command.PrettyPrint(ctx.Node.Spec.Availability) +} + +func (ctx *nodeInspectContext) HasStatusAddr() bool { + return ctx.Node.Status.Addr != "" +} + +func (ctx *nodeInspectContext) StatusAddr() string { + return ctx.Node.Status.Addr +} + +func (ctx *nodeInspectContext) HasManagerStatus() bool { + return ctx.Node.ManagerStatus != nil +} + +func (ctx *nodeInspectContext) ManagerStatusAddr() string { + return ctx.Node.ManagerStatus.Addr +} + +func (ctx *nodeInspectContext) ManagerStatusReachability() string { + return command.PrettyPrint(ctx.Node.ManagerStatus.Reachability) +} + +func (ctx *nodeInspectContext) IsManagerStatusLeader() bool { + return ctx.Node.ManagerStatus.Leader +} + +func (ctx *nodeInspectContext) PlatformOS() string { + return ctx.Node.Description.Platform.OS +} + +func (ctx *nodeInspectContext) PlatformArchitecture() string { + return ctx.Node.Description.Platform.Architecture +} + +func (ctx *nodeInspectContext) ResourceNanoCPUs() int { + if ctx.Node.Description.Resources.NanoCPUs == 0 { + return int(0) + } + return int(ctx.Node.Description.Resources.NanoCPUs) / 1e9 +} + +func (ctx *nodeInspectContext) ResourceMemory() string { + if ctx.Node.Description.Resources.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Node.Description.Resources.MemoryBytes)) +} + +func (ctx *nodeInspectContext) HasEnginePlugins() bool { + return len(ctx.Node.Description.Engine.Plugins) > 0 +} + +func (ctx *nodeInspectContext) EnginePlugins() map[string]string { + pluginMap := map[string][]string{} + for _, p := range ctx.Node.Description.Engine.Plugins { + pluginMap[p.Type] = append(pluginMap[p.Type], p.Name) + } + + pluginNamesByType := map[string]string{} + for k, v := range pluginMap { + pluginNamesByType[k] = strings.Join(v, ", ") + } + return pluginNamesByType +} + +func (ctx *nodeInspectContext) EngineLabels() map[string]string { + return ctx.Node.Description.Engine.Labels +} + +func (ctx *nodeInspectContext) EngineVersion() string { + return ctx.Node.Description.Engine.EngineVersion +} + +func (ctx *nodeInspectContext) HasTLSInfo() bool { + tlsInfo := ctx.Node.Description.TLSInfo + return !reflect.DeepEqual(tlsInfo, swarm.TLSInfo{}) +} + +func (ctx *nodeInspectContext) TLSInfoTrustRoot() string { + return ctx.Node.Description.TLSInfo.TrustRoot +} + +func (ctx *nodeInspectContext) TLSInfoCertIssuerPublicKey() string { + return base64.StdEncoding.EncodeToString(ctx.Node.Description.TLSInfo.CertIssuerPublicKey) +} + +func (ctx *nodeInspectContext) TLSInfoCertIssuerSubject() string { + return base64.StdEncoding.EncodeToString(ctx.Node.Description.TLSInfo.CertIssuerSubject) +} diff --git a/cli/cli/command/formatter/node_test.go b/cli/cli/command/formatter/node_test.go new file mode 100644 index 00000000..cf9ccbde --- /dev/null +++ b/cli/cli/command/formatter/node_test.go @@ -0,0 +1,348 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNodeContext(t *testing.T) { + nodeID := stringid.GenerateRandomID() + + var ctx nodeContext + cases := []struct { + nodeCtx nodeContext + expValue string + call func() string + }{ + {nodeContext{ + n: swarm.Node{ID: nodeID}, + }, nodeID, ctx.ID}, + {nodeContext{ + n: swarm.Node{Description: swarm.NodeDescription{Hostname: "node_hostname"}}, + }, "node_hostname", ctx.Hostname}, + {nodeContext{ + n: swarm.Node{Status: swarm.NodeStatus{State: swarm.NodeState("foo")}}, + }, "Foo", ctx.Status}, + {nodeContext{ + n: swarm.Node{Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}}, + }, "Drain", ctx.Availability}, + {nodeContext{ + n: swarm.Node{ManagerStatus: &swarm.ManagerStatus{Leader: true}}, + }, "Leader", ctx.ManagerStatus}, + } + + for _, c := range cases { + ctx = c.nodeCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestNodeContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + clusterInfo swarm.ClusterInfo + }{ + + // Errors + { + context: Context{Format: "{{InvalidFunction}}"}, + expected: `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: Context{Format: "{{nil}}"}, + expected: `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + // Table format + { + context: Context{Format: NewNodeFormat("table", false)}, + expected: `ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION +nodeID1 foobar_baz Foo Drain Leader 18.03.0-ce +nodeID2 foobar_bar Bar Active Reachable 1.2.3 +nodeID3 foobar_boo Boo Active ` + "\n", // (to preserve whitespace) + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: Context{Format: NewNodeFormat("table", true)}, + expected: `nodeID1 +nodeID2 +nodeID3 +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: Context{Format: NewNodeFormat("table {{.Hostname}}", false)}, + expected: `HOSTNAME +foobar_baz +foobar_bar +foobar_boo +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: Context{Format: NewNodeFormat("table {{.Hostname}}", true)}, + expected: `HOSTNAME +foobar_baz +foobar_bar +foobar_boo +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: Context{Format: NewNodeFormat("table {{.ID}}\t{{.Hostname}}\t{{.TLSStatus}}", false)}, + expected: `ID HOSTNAME TLS STATUS +nodeID1 foobar_baz Needs Rotation +nodeID2 foobar_bar Ready +nodeID3 foobar_boo Unknown +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { // no cluster TLS status info, TLS status for all nodes is unknown + context: Context{Format: NewNodeFormat("table {{.ID}}\t{{.Hostname}}\t{{.TLSStatus}}", false)}, + expected: `ID HOSTNAME TLS STATUS +nodeID1 foobar_baz Unknown +nodeID2 foobar_bar Unknown +nodeID3 foobar_boo Unknown +`, + clusterInfo: swarm.ClusterInfo{}, + }, + // Raw Format + { + context: Context{Format: NewNodeFormat("raw", false)}, + expected: `node_id: nodeID1 +hostname: foobar_baz +status: Foo +availability: Drain +manager_status: Leader + +node_id: nodeID2 +hostname: foobar_bar +status: Bar +availability: Active +manager_status: Reachable + +node_id: nodeID3 +hostname: foobar_boo +status: Boo +availability: Active +manager_status: ` + "\n\n", // to preserve whitespace + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: Context{Format: NewNodeFormat("raw", true)}, + expected: `node_id: nodeID1 +node_id: nodeID2 +node_id: nodeID3 +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + // Custom Format + { + context: Context{Format: NewNodeFormat("{{.Hostname}} {{.TLSStatus}}", false)}, + expected: `foobar_baz Needs Rotation +foobar_bar Ready +foobar_boo Unknown +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + } + + for _, testcase := range cases { + nodes := []swarm.Node{ + { + ID: "nodeID1", + Description: swarm.NodeDescription{ + Hostname: "foobar_baz", + TLSInfo: swarm.TLSInfo{TrustRoot: "no"}, + Engine: swarm.EngineDescription{EngineVersion: "18.03.0-ce"}, + }, + Status: swarm.NodeStatus{State: swarm.NodeState("foo")}, + Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}, + ManagerStatus: &swarm.ManagerStatus{Leader: true}, + }, + { + ID: "nodeID2", + Description: swarm.NodeDescription{ + Hostname: "foobar_bar", + TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}, + Engine: swarm.EngineDescription{EngineVersion: "1.2.3"}, + }, + Status: swarm.NodeStatus{State: swarm.NodeState("bar")}, + Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("active")}, + ManagerStatus: &swarm.ManagerStatus{ + Leader: false, + Reachability: swarm.Reachability("Reachable"), + }, + }, + { + ID: "nodeID3", + Description: swarm.NodeDescription{Hostname: "foobar_boo"}, + Status: swarm.NodeStatus{State: swarm.NodeState("boo")}, + Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("active")}, + }, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := NodeWrite(testcase.context, nodes, types.Info{Swarm: swarm.Info{Cluster: &testcase.clusterInfo}}) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestNodeContextWriteJSON(t *testing.T) { + cases := []struct { + expected []map[string]interface{} + info types.Info + }{ + { + expected: []map[string]interface{}{ + {"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "1.2.3"}, + {"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": ""}, + {"Availability": "", "Hostname": "foobar_boo", "ID": "nodeID3", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "18.03.0-ce"}, + }, + info: types.Info{}, + }, + { + expected: []map[string]interface{}{ + {"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Ready", "EngineVersion": "1.2.3"}, + {"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Needs Rotation", "EngineVersion": ""}, + {"Availability": "", "Hostname": "foobar_boo", "ID": "nodeID3", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "18.03.0-ce"}, + }, + info: types.Info{ + Swarm: swarm.Info{ + Cluster: &swarm.ClusterInfo{ + TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}, + RootRotationInProgress: true, + }, + }, + }, + }, + } + + for _, testcase := range cases { + nodes := []swarm.Node{ + {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz", TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}, Engine: swarm.EngineDescription{EngineVersion: "1.2.3"}}}, + {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar", TLSInfo: swarm.TLSInfo{TrustRoot: "no"}}}, + {ID: "nodeID3", Description: swarm.NodeDescription{Hostname: "foobar_boo", Engine: swarm.EngineDescription{EngineVersion: "18.03.0-ce"}}}, + } + out := bytes.NewBufferString("") + err := NodeWrite(Context{Format: "{{json .}}", Output: out}, nodes, testcase.info) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(testcase.expected[i], m), msg) + } + } +} + +func TestNodeContextWriteJSONField(t *testing.T) { + nodes := []swarm.Node{ + {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}}, + {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}}, + } + out := bytes.NewBufferString("") + err := NodeWrite(Context{Format: "{{json .ID}}", Output: out}, nodes, types.Info{}) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(nodes[i].ID, s), msg) + } +} + +func TestNodeInspectWriteContext(t *testing.T) { + node := swarm.Node{ + ID: "nodeID1", + Description: swarm.NodeDescription{ + Hostname: "foobar_baz", + TLSInfo: swarm.TLSInfo{ + TrustRoot: "-----BEGIN CERTIFICATE-----\ndata\n-----END CERTIFICATE-----\n", + CertIssuerPublicKey: []byte("pubKey"), + CertIssuerSubject: []byte("subject"), + }, + Platform: swarm.Platform{ + OS: "linux", + Architecture: "amd64", + }, + Resources: swarm.Resources{ + MemoryBytes: 1, + }, + Engine: swarm.EngineDescription{ + EngineVersion: "0.1.1", + }, + }, + Status: swarm.NodeStatus{ + State: swarm.NodeState("ready"), + Addr: "1.1.1.1", + }, + Spec: swarm.NodeSpec{ + Availability: swarm.NodeAvailability("drain"), + Role: swarm.NodeRole("manager"), + }, + } + out := bytes.NewBufferString("") + context := Context{ + Format: NewNodeFormat("pretty", false), + Output: out, + } + err := NodeInspectWrite(context, []string{"nodeID1"}, func(string) (interface{}, []byte, error) { + return node, nil, nil + }) + if err != nil { + t.Fatal(err) + } + expected := `ID: nodeID1 +Hostname: foobar_baz +Joined at: 0001-01-01 00:00:00 +0000 utc +Status: + State: Ready + Availability: Drain + Address: 1.1.1.1 +Platform: + Operating System: linux + Architecture: amd64 +Resources: + CPUs: 0 + Memory: 1B +Engine Version: 0.1.1 +TLS Info: + TrustRoot: +-----BEGIN CERTIFICATE----- +data +-----END CERTIFICATE----- + + Issuer Subject: c3ViamVjdA== + Issuer Public Key: cHViS2V5 +` + assert.Check(t, is.Equal(expected, out.String())) +} diff --git a/cli/cli/command/formatter/plugin.go b/cli/cli/command/formatter/plugin.go new file mode 100644 index 00000000..08771149 --- /dev/null +++ b/cli/cli/command/formatter/plugin.go @@ -0,0 +1,94 @@ +package formatter + +import ( + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultPluginTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Description}}\t{{.Enabled}}" + + pluginIDHeader = "ID" + descriptionHeader = "DESCRIPTION" + enabledHeader = "ENABLED" +) + +// NewPluginFormat returns a Format for rendering using a plugin Context +func NewPluginFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultPluginTableFormat + case RawFormatKey: + if quiet { + return `plugin_id: {{.ID}}` + } + return `plugin_id: {{.ID}}\nname: {{.Name}}\ndescription: {{.Description}}\nenabled: {{.Enabled}}\n` + } + return Format(source) +} + +// PluginWrite writes the context +func PluginWrite(ctx Context, plugins []*types.Plugin) error { + render := func(format func(subContext subContext) error) error { + for _, plugin := range plugins { + pluginCtx := &pluginContext{trunc: ctx.Trunc, p: *plugin} + if err := format(pluginCtx); err != nil { + return err + } + } + return nil + } + pluginCtx := pluginContext{} + pluginCtx.header = map[string]string{ + "ID": pluginIDHeader, + "Name": nameHeader, + "Description": descriptionHeader, + "Enabled": enabledHeader, + "PluginReference": imageHeader, + } + return ctx.Write(&pluginCtx, render) +} + +type pluginContext struct { + HeaderContext + trunc bool + p types.Plugin +} + +func (c *pluginContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *pluginContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.p.ID) + } + return c.p.ID +} + +func (c *pluginContext) Name() string { + return c.p.Name +} + +func (c *pluginContext) Description() string { + desc := strings.Replace(c.p.Config.Description, "\n", "", -1) + desc = strings.Replace(desc, "\r", "", -1) + if c.trunc { + desc = Ellipsis(desc, 45) + } + + return desc +} + +func (c *pluginContext) Enabled() bool { + return c.p.Enabled +} + +func (c *pluginContext) PluginReference() string { + return c.p.PluginReference +} diff --git a/cli/cli/command/formatter/plugin_test.go b/cli/cli/command/formatter/plugin_test.go new file mode 100644 index 00000000..b8551589 --- /dev/null +++ b/cli/cli/command/formatter/plugin_test.go @@ -0,0 +1,183 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPluginContext(t *testing.T) { + pluginID := stringid.GenerateRandomID() + + var ctx pluginContext + cases := []struct { + pluginCtx pluginContext + expValue string + call func() string + }{ + {pluginContext{ + p: types.Plugin{ID: pluginID}, + trunc: false, + }, pluginID, ctx.ID}, + {pluginContext{ + p: types.Plugin{ID: pluginID}, + trunc: true, + }, stringid.TruncateID(pluginID), ctx.ID}, + {pluginContext{ + p: types.Plugin{Name: "plugin_name"}, + }, "plugin_name", ctx.Name}, + {pluginContext{ + p: types.Plugin{Config: types.PluginConfig{Description: "plugin_description"}}, + }, "plugin_description", ctx.Description}, + } + + for _, c := range cases { + ctx = c.pluginCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestPluginContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewPluginFormat("table", false)}, + `ID NAME DESCRIPTION ENABLED +pluginID1 foobar_baz description 1 true +pluginID2 foobar_bar description 2 false +`, + }, + { + Context{Format: NewPluginFormat("table", true)}, + `pluginID1 +pluginID2 +`, + }, + { + Context{Format: NewPluginFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewPluginFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewPluginFormat("raw", false)}, + `plugin_id: pluginID1 +name: foobar_baz +description: description 1 +enabled: true + +plugin_id: pluginID2 +name: foobar_bar +description: description 2 +enabled: false + +`, + }, + { + Context{Format: NewPluginFormat("raw", true)}, + `plugin_id: pluginID1 +plugin_id: pluginID2 +`, + }, + // Custom Format + { + Context{Format: NewPluginFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + plugins := []*types.Plugin{ + {ID: "pluginID1", Name: "foobar_baz", Config: types.PluginConfig{Description: "description 1"}, Enabled: true}, + {ID: "pluginID2", Name: "foobar_bar", Config: types.PluginConfig{Description: "description 2"}, Enabled: false}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := PluginWrite(testcase.context, plugins) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestPluginContextWriteJSON(t *testing.T) { + plugins := []*types.Plugin{ + {ID: "pluginID1", Name: "foobar_baz"}, + {ID: "pluginID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Description": "", "Enabled": false, "ID": "pluginID1", "Name": "foobar_baz", "PluginReference": ""}, + {"Description": "", "Enabled": false, "ID": "pluginID2", "Name": "foobar_bar", "PluginReference": ""}, + } + + out := bytes.NewBufferString("") + err := PluginWrite(Context{Format: "{{json .}}", Output: out}, plugins) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(expectedJSONs[i], m)) + } +} + +func TestPluginContextWriteJSONField(t *testing.T) { + plugins := []*types.Plugin{ + {ID: "pluginID1", Name: "foobar_baz"}, + {ID: "pluginID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := PluginWrite(Context{Format: "{{json .ID}}", Output: out}, plugins) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(plugins[i].ID, s)) + } +} diff --git a/cli/cli/command/formatter/reflect.go b/cli/cli/command/formatter/reflect.go new file mode 100644 index 00000000..fd59404d --- /dev/null +++ b/cli/cli/command/formatter/reflect.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "encoding/json" + "reflect" + "unicode" + + "github.com/pkg/errors" +) + +func marshalJSON(x interface{}) ([]byte, error) { + m, err := marshalMap(x) + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// marshalMap marshals x to map[string]interface{} +func marshalMap(x interface{}) (map[string]interface{}, error) { + val := reflect.ValueOf(x) + if val.Kind() != reflect.Ptr { + return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind()) + } + if val.IsNil() { + return nil, errors.Errorf("expected a pointer to a struct, got nil pointer") + } + valElem := val.Elem() + if valElem.Kind() != reflect.Struct { + return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + } + typ := val.Type() + m := make(map[string]interface{}) + for i := 0; i < val.NumMethod(); i++ { + k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) + if err != nil { + return nil, err + } + if k != "" { + m[k] = v + } + } + return m, nil +} + +var unmarshallableNames = map[string]struct{}{"FullHeader": {}} + +// marshalForMethod returns the map key and the map value for marshalling the method. +// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") +func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { + if val.Kind() != reflect.Func { + return "", nil, errors.Errorf("expected func, got %v", val.Kind()) + } + name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() + _, blackListed := unmarshallableNames[name] + // FIXME: In text/template, (numOut == 2) is marshallable, + // if the type of the second param is error. + marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && + numIn == 0 && numOut == 1 + if !marshallable { + return "", nil, nil + } + result := val.Call(make([]reflect.Value, numIn)) + intf := result[0].Interface() + return name, intf, nil +} diff --git a/cli/cli/command/formatter/reflect_test.go b/cli/cli/command/formatter/reflect_test.go new file mode 100644 index 00000000..ffda51b8 --- /dev/null +++ b/cli/cli/command/formatter/reflect_test.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "reflect" + "testing" +) + +type dummy struct { +} + +func (d *dummy) Func1() string { + return "Func1" +} + +func (d *dummy) func2() string { // nolint: unused + return "func2(should not be marshalled)" +} + +func (d *dummy) Func3() (string, int) { + return "Func3(should not be marshalled)", -42 +} + +func (d *dummy) Func4() int { + return 4 +} + +type dummyType string + +func (d *dummy) Func5() dummyType { + return dummyType("Func5") +} + +func (d *dummy) FullHeader() string { + return "FullHeader(should not be marshalled)" +} + +var dummyExpected = map[string]interface{}{ + "Func1": "Func1", + "Func4": 4, + "Func5": dummyType("Func5"), +} + +func TestMarshalMap(t *testing.T) { + d := dummy{} + m, err := marshalMap(&d) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(dummyExpected, m) { + t.Fatalf("expected %+v, got %+v", + dummyExpected, m) + } +} + +func TestMarshalMapBad(t *testing.T) { + if _, err := marshalMap(nil); err == nil { + t.Fatal("expected an error (argument is nil)") + } + if _, err := marshalMap(dummy{}); err == nil { + t.Fatal("expected an error (argument is non-pointer)") + } + x := 42 + if _, err := marshalMap(&x); err == nil { + t.Fatal("expected an error (argument is a pointer to non-struct)") + } +} diff --git a/cli/cli/command/formatter/search.go b/cli/cli/command/formatter/search.go new file mode 100644 index 00000000..8b5cfc4f --- /dev/null +++ b/cli/cli/command/formatter/search.go @@ -0,0 +1,103 @@ +package formatter + +import ( + "strconv" + "strings" + + registry "github.com/docker/docker/api/types/registry" +) + +const ( + defaultSearchTableFormat = "table {{.Name}}\t{{.Description}}\t{{.StarCount}}\t{{.IsOfficial}}\t{{.IsAutomated}}" + + starsHeader = "STARS" + officialHeader = "OFFICIAL" + automatedHeader = "AUTOMATED" +) + +// NewSearchFormat returns a Format for rendering using a network Context +func NewSearchFormat(source string) Format { + switch source { + case "": + return defaultSearchTableFormat + case TableFormatKey: + return defaultSearchTableFormat + } + return Format(source) +} + +// SearchWrite writes the context +func SearchWrite(ctx Context, results []registry.SearchResult, auto bool, stars int) error { + render := func(format func(subContext subContext) error) error { + for _, result := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 + if (auto && !result.IsAutomated) || (stars > result.StarCount) { + continue + } + searchCtx := &searchContext{trunc: ctx.Trunc, s: result} + if err := format(searchCtx); err != nil { + return err + } + } + return nil + } + searchCtx := searchContext{} + searchCtx.header = map[string]string{ + "Name": nameHeader, + "Description": descriptionHeader, + "StarCount": starsHeader, + "IsOfficial": officialHeader, + "IsAutomated": automatedHeader, + } + return ctx.Write(&searchCtx, render) +} + +type searchContext struct { + HeaderContext + trunc bool + json bool + s registry.SearchResult +} + +func (c *searchContext) MarshalJSON() ([]byte, error) { + c.json = true + return marshalJSON(c) +} + +func (c *searchContext) Name() string { + return c.s.Name +} + +func (c *searchContext) Description() string { + desc := strings.Replace(c.s.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if c.trunc { + desc = Ellipsis(desc, 45) + } + return desc +} + +func (c *searchContext) StarCount() string { + return strconv.Itoa(c.s.StarCount) +} + +func (c *searchContext) formatBool(value bool) string { + switch { + case value && c.json: + return "true" + case value: + return "[OK]" + case c.json: + return "false" + default: + return "" + } +} + +func (c *searchContext) IsOfficial() string { + return c.formatBool(c.s.IsOfficial) +} + +func (c *searchContext) IsAutomated() string { + return c.formatBool(c.s.IsAutomated) +} diff --git a/cli/cli/command/formatter/search_test.go b/cli/cli/command/formatter/search_test.go new file mode 100644 index 00000000..2e5bac0f --- /dev/null +++ b/cli/cli/command/formatter/search_test.go @@ -0,0 +1,280 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + registrytypes "github.com/docker/docker/api/types/registry" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestSearchContext(t *testing.T) { + name := "nginx" + starCount := 5000 + + var ctx searchContext + cases := []struct { + searchCtx searchContext + expValue string + call func() string + }{ + {searchContext{ + s: registrytypes.SearchResult{Name: name}, + }, name, ctx.Name}, + {searchContext{ + s: registrytypes.SearchResult{StarCount: starCount}, + }, "5000", ctx.StarCount}, + {searchContext{ + s: registrytypes.SearchResult{IsOfficial: true}, + }, "[OK]", ctx.IsOfficial}, + {searchContext{ + s: registrytypes.SearchResult{IsOfficial: false}, + }, "", ctx.IsOfficial}, + {searchContext{ + s: registrytypes.SearchResult{IsAutomated: true}, + }, "[OK]", ctx.IsAutomated}, + {searchContext{ + s: registrytypes.SearchResult{IsAutomated: false}, + }, "", ctx.IsAutomated}, + } + + for _, c := range cases { + ctx = c.searchCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestSearchContextDescription(t *testing.T) { + shortDescription := "Official build of Nginx." + longDescription := "Automated Nginx reverse proxy for docker containers" + descriptionWReturns := "Automated\nNginx reverse\rproxy\rfor docker\ncontainers" + + var ctx searchContext + cases := []struct { + searchCtx searchContext + expValue string + call func() string + }{ + {searchContext{ + s: registrytypes.SearchResult{Description: shortDescription}, + trunc: true, + }, shortDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: shortDescription}, + trunc: false, + }, shortDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: longDescription}, + trunc: false, + }, longDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: longDescription}, + trunc: true, + }, Ellipsis(longDescription, 45), ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: descriptionWReturns}, + trunc: false, + }, longDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: descriptionWReturns}, + trunc: true, + }, Ellipsis(longDescription, 45), ctx.Description}, + } + + for _, c := range cases { + ctx = c.searchCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestSearchContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewSearchFormat("table")}, + string(golden.Get(t, "search-context-write-table.golden")), + }, + { + Context{Format: NewSearchFormat("table {{.Name}}")}, + `NAME +result1 +result2 +`, + }, + // Custom Format + { + Context{Format: NewSearchFormat("{{.Name}}")}, + `result1 +result2 +`, + }, + // Custom Format with CreatedAt + { + Context{Format: NewSearchFormat("{{.Name}} {{.StarCount}}")}, + `result1 5000 +result2 5 +`, + }, + } + + for _, testcase := range cases { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SearchWrite(testcase.context, results, false, 0) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} + +func TestSearchContextWriteAutomated(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Table format + { + Context{Format: NewSearchFormat("table")}, + `NAME DESCRIPTION STARS OFFICIAL AUTOMATED +result2 Not official 5 [OK] +`, + }, + { + Context{Format: NewSearchFormat("table {{.Name}}")}, + `NAME +result2 +`, + }, + } + + for _, testcase := range cases { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SearchWrite(testcase.context, results, true, 0) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} + +func TestSearchContextWriteStars(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Table format + { + Context{Format: NewSearchFormat("table")}, + string(golden.Get(t, "search-context-write-stars-table.golden")), + }, + { + Context{Format: NewSearchFormat("table {{.Name}}")}, + `NAME +result1 +`, + }, + } + + for _, testcase := range cases { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SearchWrite(testcase.context, results, false, 6) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} + +func TestSearchContextWriteJSON(t *testing.T) { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + expectedJSONs := []map[string]interface{}{ + {"Name": "result1", "Description": "Official build", "StarCount": "5000", "IsOfficial": "true", "IsAutomated": "false"}, + {"Name": "result2", "Description": "Not official", "StarCount": "5", "IsOfficial": "false", "IsAutomated": "true"}, + } + + out := bytes.NewBufferString("") + err := SearchWrite(Context{Format: "{{json .}}", Output: out}, results, false, 0) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(m, expectedJSONs[i])) + } +} + +func TestSearchContextWriteJSONField(t *testing.T) { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + err := SearchWrite(Context{Format: "{{json .Name}}", Output: out}, results, false, 0) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(s, results[i].Name)) + } +} diff --git a/cli/cli/command/formatter/secret.go b/cli/cli/command/formatter/secret.go new file mode 100644 index 00000000..d025cd8f --- /dev/null +++ b/cli/cli/command/formatter/secret.go @@ -0,0 +1,178 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultSecretTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" + secretIDHeader = "ID" + secretCreatedHeader = "CREATED" + secretUpdatedHeader = "UPDATED" + secretInspectPrettyTemplate Format = `ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Driver: {{.Driver}} +Created at: {{.CreatedAt}} +Updated at: {{.UpdatedAt}}` +) + +// NewSecretFormat returns a Format for rendering using a secret Context +func NewSecretFormat(source string, quiet bool) Format { + switch source { + case PrettyFormatKey: + return secretInspectPrettyTemplate + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultSecretTableFormat + } + return Format(source) +} + +// SecretWrite writes the context +func SecretWrite(ctx Context, secrets []swarm.Secret) error { + render := func(format func(subContext subContext) error) error { + for _, secret := range secrets { + secretCtx := &secretContext{s: secret} + if err := format(secretCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(newSecretContext(), render) +} + +func newSecretContext() *secretContext { + sCtx := &secretContext{} + + sCtx.header = map[string]string{ + "ID": secretIDHeader, + "Name": nameHeader, + "Driver": driverHeader, + "CreatedAt": secretCreatedHeader, + "UpdatedAt": secretUpdatedHeader, + "Labels": labelsHeader, + } + return sCtx +} + +type secretContext struct { + HeaderContext + s swarm.Secret +} + +func (c *secretContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *secretContext) ID() string { + return c.s.ID +} + +func (c *secretContext) Name() string { + return c.s.Spec.Annotations.Name +} + +func (c *secretContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.CreatedAt)) + " ago" +} + +func (c *secretContext) Driver() string { + if c.s.Spec.Driver == nil { + return "" + } + return c.s.Spec.Driver.Name +} + +func (c *secretContext) UpdatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.UpdatedAt)) + " ago" +} + +func (c *secretContext) Labels() string { + mapLabels := c.s.Spec.Annotations.Labels + if mapLabels == nil { + return "" + } + var joinLabels []string + for k, v := range mapLabels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *secretContext) Label(name string) string { + if c.s.Spec.Annotations.Labels == nil { + return "" + } + return c.s.Spec.Annotations.Labels[name] +} + +// SecretInspectWrite renders the context for a list of secrets +func SecretInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != secretInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + secretI, _, err := getRef(ref) + if err != nil { + return err + } + secret, ok := secretI.(swarm.Secret) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&secretInspectContext{Secret: secret}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&secretInspectContext{}, render) +} + +type secretInspectContext struct { + swarm.Secret + subContext +} + +func (ctx *secretInspectContext) ID() string { + return ctx.Secret.ID +} + +func (ctx *secretInspectContext) Name() string { + return ctx.Secret.Spec.Name +} + +func (ctx *secretInspectContext) Labels() map[string]string { + return ctx.Secret.Spec.Labels +} + +func (ctx *secretInspectContext) Driver() string { + if ctx.Secret.Spec.Driver == nil { + return "" + } + return ctx.Secret.Spec.Driver.Name +} + +func (ctx *secretInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Secret.CreatedAt) +} + +func (ctx *secretInspectContext) UpdatedAt() string { + return command.PrettyPrint(ctx.Secret.UpdatedAt) +} diff --git a/cli/cli/command/formatter/secret_test.go b/cli/cli/command/formatter/secret_test.go new file mode 100644 index 00000000..31119786 --- /dev/null +++ b/cli/cli/command/formatter/secret_test.go @@ -0,0 +1,64 @@ +package formatter + +import ( + "bytes" + "testing" + "time" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSecretContextFormatWrite(t *testing.T) { + // Check default output format (verbose and non-verbose mode) for table headers + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + {Context{Format: NewSecretFormat("table", false)}, + `ID NAME DRIVER CREATED UPDATED +1 passwords Less than a second ago Less than a second ago +2 id_rsa Less than a second ago Less than a second ago +`}, + {Context{Format: NewSecretFormat("table {{.Name}}", true)}, + `NAME +passwords +id_rsa +`}, + {Context{Format: NewSecretFormat("{{.ID}}-{{.Name}}", false)}, + `1-passwords +2-id_rsa +`}, + } + + secrets := []swarm.Secret{ + {ID: "1", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "passwords"}}}, + {ID: "2", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "id_rsa"}}}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + if err := SecretWrite(testcase.context, secrets); err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/formatter/service.go b/cli/cli/command/formatter/service.go new file mode 100644 index 00000000..5dde8006 --- /dev/null +++ b/cli/cli/command/formatter/service.go @@ -0,0 +1,646 @@ +package formatter + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +const serviceInspectPrettyTemplate Format = ` +ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Service Mode: +{{- if .IsModeGlobal }} Global +{{- else if .IsModeReplicated }} Replicated +{{- if .ModeReplicatedReplicas }} + Replicas: {{ .ModeReplicatedReplicas }} +{{- end }}{{ end }} +{{- if .HasUpdateStatus }} +UpdateStatus: + State: {{ .UpdateStatusState }} +{{- if .HasUpdateStatusStarted }} + Started: {{ .UpdateStatusStarted }} +{{- end }} +{{- if .UpdateIsCompleted }} + Completed: {{ .UpdateStatusCompleted }} +{{- end }} + Message: {{ .UpdateStatusMessage }} +{{- end }} +Placement: +{{- if .TaskPlacementConstraints }} + Constraints: {{ .TaskPlacementConstraints }} +{{- end }} +{{- if .TaskPlacementPreferences }} + Preferences: {{ .TaskPlacementPreferences }} +{{- end }} +{{- if .HasUpdateConfig }} +UpdateConfig: + Parallelism: {{ .UpdateParallelism }} +{{- if .HasUpdateDelay}} + Delay: {{ .UpdateDelay }} +{{- end }} + On failure: {{ .UpdateOnFailure }} +{{- if .HasUpdateMonitor}} + Monitoring Period: {{ .UpdateMonitor }} +{{- end }} + Max failure ratio: {{ .UpdateMaxFailureRatio }} + Update order: {{ .UpdateOrder }} +{{- end }} +{{- if .HasRollbackConfig }} +RollbackConfig: + Parallelism: {{ .RollbackParallelism }} +{{- if .HasRollbackDelay}} + Delay: {{ .RollbackDelay }} +{{- end }} + On failure: {{ .RollbackOnFailure }} +{{- if .HasRollbackMonitor}} + Monitoring Period: {{ .RollbackMonitor }} +{{- end }} + Max failure ratio: {{ .RollbackMaxFailureRatio }} + Rollback order: {{ .RollbackOrder }} +{{- end }} +ContainerSpec: + Image: {{ .ContainerImage }} +{{- if .ContainerArgs }} + Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} +{{- end -}} +{{- if .ContainerEnv }} + Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} +{{- end -}} +{{- if .ContainerWorkDir }} + Dir: {{ .ContainerWorkDir }} +{{- end -}} +{{- if .HasContainerInit }} + Init: {{ .ContainerInit }} +{{- end -}} +{{- if .ContainerUser }} + User: {{ .ContainerUser }} +{{- end }} +{{- if .ContainerMounts }} +Mounts: +{{- end }} +{{- range $mount := .ContainerMounts }} + Target: {{ $mount.Target }} + Source: {{ $mount.Source }} + ReadOnly: {{ $mount.ReadOnly }} + Type: {{ $mount.Type }} +{{- end -}} +{{- if .Configs}} +Configs: +{{- range $config := .Configs }} + Target: {{$config.File.Name}} + Source: {{$config.ConfigName}} +{{- end }}{{ end }} +{{- if .Secrets }} +Secrets: +{{- range $secret := .Secrets }} + Target: {{$secret.File.Name}} + Source: {{$secret.SecretName}} +{{- end }}{{ end }} +{{- if .HasResources }} +Resources: +{{- if .HasResourceReservations }} + Reservations: +{{- if gt .ResourceReservationNanoCPUs 0.0 }} + CPU: {{ .ResourceReservationNanoCPUs }} +{{- end }} +{{- if .ResourceReservationMemory }} + Memory: {{ .ResourceReservationMemory }} +{{- end }}{{ end }} +{{- if .HasResourceLimits }} + Limits: +{{- if gt .ResourceLimitsNanoCPUs 0.0 }} + CPU: {{ .ResourceLimitsNanoCPUs }} +{{- end }} +{{- if .ResourceLimitMemory }} + Memory: {{ .ResourceLimitMemory }} +{{- end }}{{ end }}{{ end }} +{{- if .Networks }} +Networks: +{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} +Endpoint Mode: {{ .EndpointMode }} +{{- if .Ports }} +Ports: +{{- range $port := .Ports }} + PublishedPort = {{ $port.PublishedPort }} + Protocol = {{ $port.Protocol }} + TargetPort = {{ $port.TargetPort }} + PublishMode = {{ $port.PublishMode }} +{{- end }} {{ end -}} +` + +// NewServiceFormat returns a Format for rendering using a Context +func NewServiceFormat(source string) Format { + switch source { + case PrettyFormatKey: + return serviceInspectPrettyTemplate + default: + return Format(strings.TrimPrefix(source, RawFormatKey)) + } +} + +func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string { + networkNames := make(map[string]string) + for _, network := range service.Spec.TaskTemplate.Networks { + if resolved, _, err := getNetwork(network.Target); err == nil { + if resolvedNetwork, ok := resolved.(types.NetworkResource); ok { + networkNames[resolvedNetwork.ID] = resolvedNetwork.Name + } + } + } + return networkNames +} + +// ServiceInspectWrite renders the context for a list of services +func ServiceInspectWrite(ctx Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error { + if ctx.Format != serviceInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + serviceI, _, err := getRef(ref) + if err != nil { + return err + } + service, ok := serviceI.(swarm.Service) + if !ok { + return errors.Errorf("got wrong object to inspect") + } + if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&serviceInspectContext{}, render) +} + +type serviceInspectContext struct { + swarm.Service + subContext + + // networkNames is a map from network IDs (as found in + // Networks[x].Target) to network names. + networkNames map[string]string +} + +func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { + return marshalJSON(ctx) +} + +func (ctx *serviceInspectContext) ID() string { + return ctx.Service.ID +} + +func (ctx *serviceInspectContext) Name() string { + return ctx.Service.Spec.Name +} + +func (ctx *serviceInspectContext) Labels() map[string]string { + return ctx.Service.Spec.Labels +} + +func (ctx *serviceInspectContext) Configs() []*swarm.ConfigReference { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Configs +} + +func (ctx *serviceInspectContext) Secrets() []*swarm.SecretReference { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Secrets +} + +func (ctx *serviceInspectContext) IsModeGlobal() bool { + return ctx.Service.Spec.Mode.Global != nil +} + +func (ctx *serviceInspectContext) IsModeReplicated() bool { + return ctx.Service.Spec.Mode.Replicated != nil +} + +func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { + return ctx.Service.Spec.Mode.Replicated.Replicas +} + +func (ctx *serviceInspectContext) HasUpdateStatus() bool { + return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != "" +} + +func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { + return ctx.Service.UpdateStatus.State +} + +func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool { + return ctx.Service.UpdateStatus.StartedAt != nil +} + +func (ctx *serviceInspectContext) UpdateStatusStarted() string { + return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) + " ago" +} + +func (ctx *serviceInspectContext) UpdateIsCompleted() bool { + return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil +} + +func (ctx *serviceInspectContext) UpdateStatusCompleted() string { + return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) + " ago" +} + +func (ctx *serviceInspectContext) UpdateStatusMessage() string { + return ctx.Service.UpdateStatus.Message +} + +func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.Constraints + } + return nil +} + +func (ctx *serviceInspectContext) TaskPlacementPreferences() []string { + if ctx.Service.Spec.TaskTemplate.Placement == nil { + return nil + } + var strings []string + for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + strings = append(strings, "spread="+pref.Spread.SpreadDescriptor) + } + } + return strings +} + +func (ctx *serviceInspectContext) HasUpdateConfig() bool { + return ctx.Service.Spec.UpdateConfig != nil +} + +func (ctx *serviceInspectContext) UpdateParallelism() uint64 { + return ctx.Service.Spec.UpdateConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasUpdateDelay() bool { + return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateDelay() time.Duration { + return ctx.Service.Spec.UpdateConfig.Delay +} + +func (ctx *serviceInspectContext) UpdateOnFailure() string { + return ctx.Service.Spec.UpdateConfig.FailureAction +} + +func (ctx *serviceInspectContext) UpdateOrder() string { + return ctx.Service.Spec.UpdateConfig.Order +} + +func (ctx *serviceInspectContext) HasUpdateMonitor() bool { + return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { + return ctx.Service.Spec.UpdateConfig.Monitor +} + +func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { + return ctx.Service.Spec.UpdateConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) HasRollbackConfig() bool { + return ctx.Service.Spec.RollbackConfig != nil +} + +func (ctx *serviceInspectContext) RollbackParallelism() uint64 { + return ctx.Service.Spec.RollbackConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasRollbackDelay() bool { + return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) RollbackDelay() time.Duration { + return ctx.Service.Spec.RollbackConfig.Delay +} + +func (ctx *serviceInspectContext) RollbackOnFailure() string { + return ctx.Service.Spec.RollbackConfig.FailureAction +} + +func (ctx *serviceInspectContext) HasRollbackMonitor() bool { + return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) RollbackMonitor() time.Duration { + return ctx.Service.Spec.RollbackConfig.Monitor +} + +func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 { + return ctx.Service.Spec.RollbackConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) RollbackOrder() string { + return ctx.Service.Spec.RollbackConfig.Order +} + +func (ctx *serviceInspectContext) ContainerImage() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image +} + +func (ctx *serviceInspectContext) ContainerArgs() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args +} + +func (ctx *serviceInspectContext) ContainerEnv() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env +} + +func (ctx *serviceInspectContext) ContainerWorkDir() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir +} + +func (ctx *serviceInspectContext) ContainerUser() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.User +} + +func (ctx *serviceInspectContext) HasContainerInit() bool { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Init != nil +} + +func (ctx *serviceInspectContext) ContainerInit() bool { + return *ctx.Service.Spec.TaskTemplate.ContainerSpec.Init +} + +func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts +} + +func (ctx *serviceInspectContext) HasResources() bool { + return ctx.Service.Spec.TaskTemplate.Resources != nil +} + +func (ctx *serviceInspectContext) HasResourceReservations() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { + return float64(0) + } + return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceReservationMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) +} + +func (ctx *serviceInspectContext) HasResourceLimits() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { + return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceLimitMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) +} + +func (ctx *serviceInspectContext) Networks() []string { + var out []string + for _, n := range ctx.Service.Spec.TaskTemplate.Networks { + if name, ok := ctx.networkNames[n.Target]; ok { + out = append(out, name) + } else { + out = append(out, n.Target) + } + } + return out +} + +func (ctx *serviceInspectContext) EndpointMode() string { + if ctx.Service.Spec.EndpointSpec == nil { + return "" + } + + return string(ctx.Service.Spec.EndpointSpec.Mode) +} + +func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { + return ctx.Service.Endpoint.Ports +} + +const ( + defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}" + + serviceIDHeader = "ID" + modeHeader = "MODE" + replicasHeader = "REPLICAS" +) + +// NewServiceListFormat returns a Format for rendering using a service Context +func NewServiceListFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultServiceTableFormat + case RawFormatKey: + if quiet { + return `id: {{.ID}}` + } + return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n` + } + return Format(source) +} + +// ServiceListInfo stores the information about mode and replicas to be used by template +type ServiceListInfo struct { + Mode string + Replicas string +} + +// ServiceListWrite writes the context +func ServiceListWrite(ctx Context, services []swarm.Service, info map[string]ServiceListInfo) error { + render := func(format func(subContext subContext) error) error { + for _, service := range services { + serviceCtx := &serviceContext{service: service, mode: info[service.ID].Mode, replicas: info[service.ID].Replicas} + if err := format(serviceCtx); err != nil { + return err + } + } + return nil + } + serviceCtx := serviceContext{} + serviceCtx.header = map[string]string{ + "ID": serviceIDHeader, + "Name": nameHeader, + "Mode": modeHeader, + "Replicas": replicasHeader, + "Image": imageHeader, + "Ports": portsHeader, + } + return ctx.Write(&serviceCtx, render) +} + +type serviceContext struct { + HeaderContext + service swarm.Service + mode string + replicas string +} + +func (c *serviceContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *serviceContext) ID() string { + return stringid.TruncateID(c.service.ID) +} + +func (c *serviceContext) Name() string { + return c.service.Spec.Name +} + +func (c *serviceContext) Mode() string { + return c.mode +} + +func (c *serviceContext) Replicas() string { + return c.replicas +} + +func (c *serviceContext) Image() string { + var image string + if c.service.Spec.TaskTemplate.ContainerSpec != nil { + image = c.service.Spec.TaskTemplate.ContainerSpec.Image + } + if ref, err := reference.ParseNormalizedNamed(image); err == nil { + // update image string for display, (strips any digest) + if nt, ok := ref.(reference.NamedTagged); ok { + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + image = reference.FamiliarString(namedTagged) + } + } + } + + return image +} + +type portRange struct { + pStart uint32 + pEnd uint32 + tStart uint32 + tEnd uint32 + protocol swarm.PortConfigProtocol +} + +func (pr portRange) String() string { + var ( + pub string + tgt string + ) + + if pr.pEnd > pr.pStart { + pub = fmt.Sprintf("%d-%d", pr.pStart, pr.pEnd) + } else { + pub = fmt.Sprintf("%d", pr.pStart) + } + if pr.tEnd > pr.tStart { + tgt = fmt.Sprintf("%d-%d", pr.tStart, pr.tEnd) + } else { + tgt = fmt.Sprintf("%d", pr.tStart) + } + return fmt.Sprintf("*:%s->%s/%s", pub, tgt, pr.protocol) +} + +// Ports formats published ports on the ingress network for output. +// +// Where possible, ranges are grouped to produce a compact output: +// - multiple ports mapped to a single port (80->80, 81->80); is formatted as *:80-81->80 +// - multiple consecutive ports on both sides; (80->80, 81->81) are formatted as: *:80-81->80-81 +// +// The above should not be grouped together, i.e.: +// - 80->80, 81->81, 82->80 should be presented as : *:80-81->80-81, *:82->80 +// +// TODO improve: +// - combine non-consecutive ports mapped to a single port (80->80, 81->80, 84->80, 86->80, 87->80); to be printed as *:80-81,84,86-87->80 +// - combine tcp and udp mappings if their port-mapping is exactly the same (*:80-81->80-81/tcp+udp instead of *:80-81->80-81/tcp, *:80-81->80-81/udp) +func (c *serviceContext) Ports() string { + if c.service.Endpoint.Ports == nil { + return "" + } + + pr := portRange{} + ports := []string{} + + sort.Sort(byProtocolAndPublishedPort(c.service.Endpoint.Ports)) + + for _, p := range c.service.Endpoint.Ports { + if p.PublishMode == swarm.PortConfigPublishModeIngress { + prIsRange := pr.tEnd != pr.tStart + tOverlaps := p.TargetPort <= pr.tEnd + + // Start a new port-range if: + // - the protocol is different from the current port-range + // - published or target port are not consecutive to the current port-range + // - the current port-range is a _range_, and the target port overlaps with the current range's target-ports + if p.Protocol != pr.protocol || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps { + // start a new port-range, and print the previous port-range (if any) + if pr.pStart > 0 { + ports = append(ports, pr.String()) + } + pr = portRange{ + pStart: p.PublishedPort, + pEnd: p.PublishedPort, + tStart: p.TargetPort, + tEnd: p.TargetPort, + protocol: p.Protocol, + } + continue + } + pr.pEnd = p.PublishedPort + pr.tEnd = p.TargetPort + } + } + if pr.pStart > 0 { + ports = append(ports, pr.String()) + } + return strings.Join(ports, ", ") +} + +type byProtocolAndPublishedPort []swarm.PortConfig + +func (a byProtocolAndPublishedPort) Len() int { return len(a) } +func (a byProtocolAndPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byProtocolAndPublishedPort) Less(i, j int) bool { + if a[i].Protocol == a[j].Protocol { + return a[i].PublishedPort < a[j].PublishedPort + } + return a[i].Protocol < a[j].Protocol +} diff --git a/cli/cli/command/formatter/service_test.go b/cli/cli/command/formatter/service_test.go new file mode 100644 index 00000000..243899d1 --- /dev/null +++ b/cli/cli/command/formatter/service_test.go @@ -0,0 +1,359 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestServiceContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewServiceListFormat("table", false)}, + `ID NAME MODE REPLICAS IMAGE PORTS +id_baz baz global 2/4 *:80->8080/tcp +id_bar bar replicated 2/4 *:80->8080/tcp +`, + }, + { + Context{Format: NewServiceListFormat("table", true)}, + `id_baz +id_bar +`, + }, + { + Context{Format: NewServiceListFormat("table {{.Name}}", false)}, + `NAME +baz +bar +`, + }, + { + Context{Format: NewServiceListFormat("table {{.Name}}", true)}, + `NAME +baz +bar +`, + }, + // Raw Format + { + Context{Format: NewServiceListFormat("raw", false)}, + string(golden.Get(t, "service-context-write-raw.golden")), + }, + { + Context{Format: NewServiceListFormat("raw", true)}, + `id: id_baz +id: id_bar +`, + }, + // Custom Format + { + Context{Format: NewServiceListFormat("{{.Name}}", false)}, + `baz +bar +`, + }, + } + + for _, testcase := range cases { + services := []swarm.Service{ + { + ID: "id_baz", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "baz"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + { + ID: "id_bar", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "bar"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + } + info := map[string]ServiceListInfo{ + "id_baz": { + Mode: "global", + Replicas: "2/4", + }, + "id_bar": { + Mode: "replicated", + Replicas: "2/4", + }, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ServiceListWrite(testcase.context, services, info) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestServiceContextWriteJSON(t *testing.T) { + services := []swarm.Service{ + { + ID: "id_baz", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "baz"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + { + ID: "id_bar", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "bar"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + } + info := map[string]ServiceListInfo{ + "id_baz": { + Mode: "global", + Replicas: "2/4", + }, + "id_bar": { + Mode: "replicated", + Replicas: "2/4", + }, + } + expectedJSONs := []map[string]interface{}{ + {"ID": "id_baz", "Name": "baz", "Mode": "global", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"}, + {"ID": "id_bar", "Name": "bar", "Mode": "replicated", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"}, + } + + out := bytes.NewBufferString("") + err := ServiceListWrite(Context{Format: "{{json .}}", Output: out}, services, info) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} +func TestServiceContextWriteJSONField(t *testing.T) { + services := []swarm.Service{ + {ID: "id_baz", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "baz"}}}, + {ID: "id_bar", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "bar"}}}, + } + info := map[string]ServiceListInfo{ + "id_baz": { + Mode: "global", + Replicas: "2/4", + }, + "id_bar": { + Mode: "replicated", + Replicas: "2/4", + }, + } + out := bytes.NewBufferString("") + err := ServiceListWrite(Context{Format: "{{json .Name}}", Output: out}, services, info) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(services[i].Spec.Name, s), msg) + } +} + +func TestServiceContext_Ports(t *testing.T) { + c := serviceContext{ + service: swarm.Service{ + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 81, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 80, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 95, + PublishedPort: 95, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 90, + PublishedPort: 90, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 91, + PublishedPort: 91, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 92, + PublishedPort: 92, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 93, + PublishedPort: 93, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 94, + PublishedPort: 94, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 95, + PublishedPort: 95, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 90, + PublishedPort: 90, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 96, + PublishedPort: 96, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 91, + PublishedPort: 91, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 92, + PublishedPort: 92, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 93, + PublishedPort: 93, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 94, + PublishedPort: 94, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 60, + PublishedPort: 60, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 61, + PublishedPort: 61, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 61, + PublishedPort: 62, + PublishMode: "ingress", + }, + { + Protocol: "sctp", + TargetPort: 97, + PublishedPort: 97, + PublishMode: "ingress", + }, + { + Protocol: "sctp", + TargetPort: 98, + PublishedPort: 98, + PublishMode: "ingress", + }, + }, + }, + }, + } + + assert.Check(t, is.Equal("*:97-98->97-98/sctp, *:60-61->60-61/tcp, *:62->61/tcp, *:80-81->80/tcp, *:90-95->90-95/tcp, *:90-96->90-96/udp", c.Ports())) +} diff --git a/cli/cli/command/formatter/stack.go b/cli/cli/command/formatter/stack.go new file mode 100644 index 00000000..965eaf60 --- /dev/null +++ b/cli/cli/command/formatter/stack.go @@ -0,0 +1,77 @@ +package formatter + +import ( + "strconv" +) + +const ( + // KubernetesStackTableFormat is the default Kubernetes stack format + KubernetesStackTableFormat = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}\t{{.Namespace}}" + // SwarmStackTableFormat is the default Swarm stack format + SwarmStackTableFormat = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}" + + stackServicesHeader = "SERVICES" + stackOrchestrastorHeader = "ORCHESTRATOR" + stackNamespaceHeader = "NAMESPACE" +) + +// Stack contains deployed stack information. +type Stack struct { + // Name is the name of the stack + Name string + // Services is the number of the services + Services int + // Orchestrator is the platform where the stack is deployed + Orchestrator string + // Namespace is the Kubernetes namespace assigned to the stack + Namespace string +} + +// StackWrite writes formatted stacks using the Context +func StackWrite(ctx Context, stacks []*Stack) error { + render := func(format func(subContext subContext) error) error { + for _, stack := range stacks { + if err := format(&stackContext{s: stack}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newStackContext(), render) +} + +type stackContext struct { + HeaderContext + s *Stack +} + +func newStackContext() *stackContext { + stackCtx := stackContext{} + stackCtx.header = map[string]string{ + "Name": nameHeader, + "Services": stackServicesHeader, + "Orchestrator": stackOrchestrastorHeader, + "Namespace": stackNamespaceHeader, + } + return &stackCtx +} + +func (s *stackContext) MarshalJSON() ([]byte, error) { + return marshalJSON(s) +} + +func (s *stackContext) Name() string { + return s.s.Name +} + +func (s *stackContext) Services() string { + return strconv.Itoa(s.s.Services) +} + +func (s *stackContext) Orchestrator() string { + return s.s.Orchestrator +} + +func (s *stackContext) Namespace() string { + return s.s.Namespace +} diff --git a/cli/cli/command/formatter/stack_test.go b/cli/cli/command/formatter/stack_test.go new file mode 100644 index 00000000..44a08406 --- /dev/null +++ b/cli/cli/command/formatter/stack_test.go @@ -0,0 +1,73 @@ +package formatter + +import ( + "bytes" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestStackContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: Format(SwarmStackTableFormat)}, + `NAME SERVICES ORCHESTRATOR +baz 2 orchestrator1 +bar 1 orchestrator2 +`, + }, + // Kubernetes table format adds Namespace column + { + Context{Format: Format(KubernetesStackTableFormat)}, + `NAME SERVICES ORCHESTRATOR NAMESPACE +baz 2 orchestrator1 namespace1 +bar 1 orchestrator2 namespace2 +`, + }, + { + Context{Format: Format("table {{.Name}}")}, + `NAME +baz +bar +`, + }, + // Custom Format + { + Context{Format: Format("{{.Name}}")}, + `baz +bar +`, + }, + } + + stacks := []*Stack{ + {Name: "baz", Services: 2, Orchestrator: "orchestrator1", Namespace: "namespace1"}, + {Name: "bar", Services: 1, Orchestrator: "orchestrator2", Namespace: "namespace2"}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + err := StackWrite(testcase.context, stacks) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} diff --git a/cli/cli/command/formatter/stats.go b/cli/cli/command/formatter/stats.go new file mode 100644 index 00000000..0c210c6f --- /dev/null +++ b/cli/cli/command/formatter/stats.go @@ -0,0 +1,224 @@ +package formatter + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + winOSType = "windows" + defaultStatsTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + winDefaultStatsTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + containerHeader = "CONTAINER" + cpuPercHeader = "CPU %" + netIOHeader = "NET I/O" + blockIOHeader = "BLOCK I/O" + memPercHeader = "MEM %" // Used only on Linux + winMemUseHeader = "PRIV WORKING SET" // Used only on Windows + memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux + pidsHeader = "PIDS" // Used only on Linux +) + +// StatsEntry represents represents the statistics data collected from a container +type StatsEntry struct { + Container string + Name string + ID string + CPUPercentage float64 + Memory float64 // On Windows this is the private working set + MemoryLimit float64 // Not used on Windows + MemoryPercentage float64 // Not used on Windows + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 // Not used on Windows + IsInvalid bool +} + +// ContainerStats represents an entity to store containers statistics synchronously +type ContainerStats struct { + mutex sync.Mutex + StatsEntry + err error +} + +// GetError returns the container statistics error. +// This is used to determine whether the statistics are valid or not +func (cs *ContainerStats) GetError() error { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.err +} + +// SetErrorAndReset zeroes all the container statistics and store the error. +// It is used when receiving time out error during statistics collecting to reduce lock overhead +func (cs *ContainerStats) SetErrorAndReset(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.CPUPercentage = 0 + cs.Memory = 0 + cs.MemoryPercentage = 0 + cs.MemoryLimit = 0 + cs.NetworkRx = 0 + cs.NetworkTx = 0 + cs.BlockRead = 0 + cs.BlockWrite = 0 + cs.PidsCurrent = 0 + cs.err = err + cs.IsInvalid = true +} + +// SetError sets container statistics error +func (cs *ContainerStats) SetError(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.err = err + if err != nil { + cs.IsInvalid = true + } +} + +// SetStatistics set the container statistics +func (cs *ContainerStats) SetStatistics(s StatsEntry) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + s.Container = cs.Container + cs.StatsEntry = s +} + +// GetStatistics returns container statistics with other meta data such as the container name +func (cs *ContainerStats) GetStatistics() StatsEntry { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.StatsEntry +} + +// NewStatsFormat returns a format for rendering an CStatsContext +func NewStatsFormat(source, osType string) Format { + if source == TableFormatKey { + if osType == winOSType { + return Format(winDefaultStatsTableFormat) + } + return Format(defaultStatsTableFormat) + } + return Format(source) +} + +// NewContainerStats returns a new ContainerStats entity and sets in it the given name +func NewContainerStats(container string) *ContainerStats { + return &ContainerStats{StatsEntry: StatsEntry{Container: container}} +} + +// ContainerStatsWrite renders the context for a list of containers statistics +func ContainerStatsWrite(ctx Context, containerStats []StatsEntry, osType string, trunc bool) error { + render := func(format func(subContext subContext) error) error { + for _, cstats := range containerStats { + containerStatsCtx := &containerStatsContext{ + s: cstats, + os: osType, + trunc: trunc, + } + if err := format(containerStatsCtx); err != nil { + return err + } + } + return nil + } + memUsage := memUseHeader + if osType == winOSType { + memUsage = winMemUseHeader + } + containerStatsCtx := containerStatsContext{} + containerStatsCtx.header = map[string]string{ + "Container": containerHeader, + "Name": nameHeader, + "ID": containerIDHeader, + "CPUPerc": cpuPercHeader, + "MemUsage": memUsage, + "MemPerc": memPercHeader, + "NetIO": netIOHeader, + "BlockIO": blockIOHeader, + "PIDs": pidsHeader, + } + containerStatsCtx.os = osType + return ctx.Write(&containerStatsCtx, render) +} + +type containerStatsContext struct { + HeaderContext + s StatsEntry + os string + trunc bool +} + +func (c *containerStatsContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerStatsContext) Container() string { + return c.s.Container +} + +func (c *containerStatsContext) Name() string { + if len(c.s.Name) > 1 { + return c.s.Name[1:] + } + return "--" +} + +func (c *containerStatsContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.s.ID) + } + return c.s.ID +} + +func (c *containerStatsContext) CPUPerc() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) +} + +func (c *containerStatsContext) MemUsage() string { + if c.s.IsInvalid { + return fmt.Sprintf("-- / --") + } + if c.os == winOSType { + return units.BytesSize(c.s.Memory) + } + return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) +} + +func (c *containerStatsContext) MemPerc() string { + if c.s.IsInvalid || c.os == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) +} + +func (c *containerStatsContext) NetIO() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) +} + +func (c *containerStatsContext) BlockIO() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) +} + +func (c *containerStatsContext) PIDs() string { + if c.s.IsInvalid || c.os == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%d", c.s.PidsCurrent) +} diff --git a/cli/cli/command/formatter/stats_test.go b/cli/cli/command/formatter/stats_test.go new file mode 100644 index 00000000..3325dd9f --- /dev/null +++ b/cli/cli/command/formatter/stats_test.go @@ -0,0 +1,301 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestContainerStatsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + + var ctx containerStatsContext + tt := []struct { + stats StatsEntry + osType string + expValue string + expHeader string + call func() string + }{ + {StatsEntry{Container: containerID}, "", containerID, containerHeader, ctx.Container}, + {StatsEntry{CPUPercentage: 5.5}, "", "5.50%", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "", "--", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "", "0.31B / 12.3B", netIOHeader, ctx.NetIO}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "", "--", netIOHeader, ctx.NetIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "", "0.1B / 2.3B", blockIOHeader, ctx.BlockIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "", "--", blockIOHeader, ctx.BlockIO}, + {StatsEntry{MemoryPercentage: 10.2}, "", "10.20%", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "", "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2}, "windows", "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "", "24B / 30B", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "", "-- / --", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "windows", "24B", winMemUseHeader, ctx.MemUsage}, + {StatsEntry{PidsCurrent: 10}, "", "10", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "", "--", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10}, "windows", "--", pidsHeader, ctx.PIDs}, + } + + for _, te := range tt { + ctx = containerStatsContext{s: te.stats, os: te.osType} + if v := te.call(); v != te.expValue { + t.Fatalf("Expected %q, got %q", te.expValue, v) + } + } +} + +func TestContainerStatsContextWrite(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + Context{Format: "table {{.MemUsage}}"}, + `MEM USAGE / LIMIT +20B / 20B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.ID}} {{.Name}}"}, + `container1 abcdef foo +container2 -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + ID: "abcdef", + Name: "/foo", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats, "linux", false) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Check(t, is.Equal(te.expected, out.String())) + } + } +} + +func TestContainerStatsContextWriteWindows(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "table {{.MemUsage}}"}, + `PRIV WORKING SET +20B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + { + Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, + `container1 -- -- +container2 -- -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats, "windows", false) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Check(t, is.Equal(te.expected, out.String())) + } + } +} + +func TestContainerStatsContextWriteWithNoStats(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + Context{ + Format: "table {{.Container}}", + Output: &out, + }, + "CONTAINER\n", + }, + { + Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}", + Output: &out, + }, + "CONTAINER CPU %\n", + }, + } + + for _, context := range contexts { + ContainerStatsWrite(context.context, []StatsEntry{}, "linux", false) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} + +func TestContainerStatsContextWriteWithNoStatsWindows(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + Context{ + Format: "table {{.Container}}\t{{.MemUsage}}", + Output: &out, + }, + "CONTAINER PRIV WORKING SET\n", + }, + { + Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + Output: &out, + }, + "CONTAINER CPU % PRIV WORKING SET\n", + }, + } + + for _, context := range contexts { + ContainerStatsWrite(context.context, []StatsEntry{}, "windows", false) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} + +func TestContainerStatsContextWriteTrunc(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context Context + trunc bool + expected string + }{ + { + Context{ + Format: "{{.ID}}", + Output: &out, + }, + false, + "b95a83497c9161c9b444e3d70e1a9dfba0c1840d41720e146a95a08ebf938afc\n", + }, + { + Context{ + Format: "{{.ID}}", + Output: &out, + }, + true, + "b95a83497c91\n", + }, + } + + for _, context := range contexts { + ContainerStatsWrite(context.context, []StatsEntry{{ID: "b95a83497c9161c9b444e3d70e1a9dfba0c1840d41720e146a95a08ebf938afc"}}, "linux", context.trunc) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} diff --git a/cli/cli/command/formatter/task.go b/cli/cli/command/formatter/task.go new file mode 100644 index 00000000..6172b320 --- /dev/null +++ b/cli/cli/command/formatter/task.go @@ -0,0 +1,150 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + defaultTaskTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Image}}\t{{.Node}}\t{{.DesiredState}}\t{{.CurrentState}}\t{{.Error}}\t{{.Ports}}" + + nodeHeader = "NODE" + taskIDHeader = "ID" + desiredStateHeader = "DESIRED STATE" + currentStateHeader = "CURRENT STATE" + errorHeader = "ERROR" + + maxErrLength = 30 +) + +// NewTaskFormat returns a Format for rendering using a task Context +func NewTaskFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultTaskTableFormat + case RawFormatKey: + if quiet { + return `id: {{.ID}}` + } + return `id: {{.ID}}\nname: {{.Name}}\nimage: {{.Image}}\nnode: {{.Node}}\ndesired_state: {{.DesiredState}}\ncurrent_state: {{.CurrentState}}\nerror: {{.Error}}\nports: {{.Ports}}\n` + } + return Format(source) +} + +// TaskWrite writes the context +func TaskWrite(ctx Context, tasks []swarm.Task, names map[string]string, nodes map[string]string) error { + render := func(format func(subContext subContext) error) error { + for _, task := range tasks { + taskCtx := &taskContext{trunc: ctx.Trunc, task: task, name: names[task.ID], node: nodes[task.ID]} + if err := format(taskCtx); err != nil { + return err + } + } + return nil + } + taskCtx := taskContext{} + taskCtx.header = taskHeaderContext{ + "ID": taskIDHeader, + "Name": nameHeader, + "Image": imageHeader, + "Node": nodeHeader, + "DesiredState": desiredStateHeader, + "CurrentState": currentStateHeader, + "Error": errorHeader, + "Ports": portsHeader, + } + return ctx.Write(&taskCtx, render) +} + +type taskHeaderContext map[string]string + +type taskContext struct { + HeaderContext + trunc bool + task swarm.Task + name string + node string +} + +func (c *taskContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *taskContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.task.ID) + } + return c.task.ID +} + +func (c *taskContext) Name() string { + return c.name +} + +func (c *taskContext) Image() string { + image := c.task.Spec.ContainerSpec.Image + if c.trunc { + ref, err := reference.ParseNormalizedNamed(image) + if err == nil { + // update image string for display, (strips any digest) + if nt, ok := ref.(reference.NamedTagged); ok { + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + image = reference.FamiliarString(namedTagged) + } + } + } + } + return image +} + +func (c *taskContext) Node() string { + return c.node +} + +func (c *taskContext) DesiredState() string { + return command.PrettyPrint(c.task.DesiredState) +} + +func (c *taskContext) CurrentState() string { + return fmt.Sprintf("%s %s ago", + command.PrettyPrint(c.task.Status.State), + strings.ToLower(units.HumanDuration(time.Since(c.task.Status.Timestamp))), + ) +} + +func (c *taskContext) Error() string { + // Trim and quote the error message. + taskErr := c.task.Status.Err + if c.trunc && len(taskErr) > maxErrLength { + taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) + } + if len(taskErr) > 0 { + taskErr = fmt.Sprintf("\"%s\"", taskErr) + } + return taskErr +} + +func (c *taskContext) Ports() string { + if len(c.task.Status.PortStatus.Ports) == 0 { + return "" + } + ports := []string{} + for _, pConfig := range c.task.Status.PortStatus.Ports { + ports = append(ports, fmt.Sprintf("*:%d->%d/%s", + pConfig.PublishedPort, + pConfig.TargetPort, + pConfig.Protocol, + )) + } + return strings.Join(ports, ",") +} diff --git a/cli/cli/command/formatter/task_test.go b/cli/cli/command/formatter/task_test.go new file mode 100644 index 00000000..84bdbfeb --- /dev/null +++ b/cli/cli/command/formatter/task_test.go @@ -0,0 +1,106 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestTaskContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + Context{Format: NewTaskFormat("table", true)}, + `taskID1 +taskID2 +`, + }, + { + Context{Format: NewTaskFormat("table {{.Name}}\t{{.Node}}\t{{.Ports}}", false)}, + string(golden.Get(t, "task-context-write-table-custom.golden")), + }, + { + Context{Format: NewTaskFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewTaskFormat("raw", true)}, + `id: taskID1 +id: taskID2 +`, + }, + { + Context{Format: NewTaskFormat("{{.Name}} {{.Node}}", false)}, + `foobar_baz foo1 +foobar_bar foo2 +`, + }, + } + + for _, testcase := range cases { + tasks := []swarm.Task{ + {ID: "taskID1"}, + {ID: "taskID2"}, + } + names := map[string]string{ + "taskID1": "foobar_baz", + "taskID2": "foobar_bar", + } + nodes := map[string]string{ + "taskID1": "foo1", + "taskID2": "foo2", + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := TaskWrite(testcase.context, tasks, names, nodes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestTaskContextWriteJSONField(t *testing.T) { + tasks := []swarm.Task{ + {ID: "taskID1"}, + {ID: "taskID2"}, + } + names := map[string]string{ + "taskID1": "foobar_baz", + "taskID2": "foobar_bar", + } + out := bytes.NewBufferString("") + err := TaskWrite(Context{Format: "{{json .ID}}", Output: out}, tasks, names, map[string]string{}) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(tasks[i].ID, s)) + } +} diff --git a/cli/cli/command/formatter/testdata/container-context-write-special-headers.golden b/cli/cli/command/formatter/testdata/container-context-write-special-headers.golden new file mode 100644 index 00000000..3fe21c8e --- /dev/null +++ b/cli/cli/command/formatter/testdata/container-context-write-special-headers.golden @@ -0,0 +1,3 @@ +CONTAINER ID IMAGE CREATED/STATUS/ PORTS .NAMES STATUS +conta "ubuntu" 24 hours ago//.FOOBAR_BAZ +conta "ubuntu" 24 hours ago//.FOOBAR_BAR diff --git a/cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden b/cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden new file mode 100644 index 00000000..6f2d9a9b --- /dev/null +++ b/cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden @@ -0,0 +1,5 @@ +TYPE ACTIVE +Images 0 +Containers 0 +Local Volumes 0 +Build Cache 0 diff --git a/cli/cli/command/formatter/testdata/disk-usage-raw-format.golden b/cli/cli/command/formatter/testdata/disk-usage-raw-format.golden new file mode 100644 index 00000000..7b9d11eb --- /dev/null +++ b/cli/cli/command/formatter/testdata/disk-usage-raw-format.golden @@ -0,0 +1,24 @@ +type: Images +total: 0 +active: 0 +size: 0B +reclaimable: 0B + +type: Containers +total: 0 +active: 0 +size: 0B +reclaimable: 0B + +type: Local Volumes +total: 0 +active: 0 +size: 0B +reclaimable: 0B + +type: Build Cache +total: 0 +active: 0 +size: 0B +reclaimable: 0B + diff --git a/cli/cli/command/formatter/testdata/search-context-write-stars-table.golden b/cli/cli/command/formatter/testdata/search-context-write-stars-table.golden new file mode 100644 index 00000000..1a66b429 --- /dev/null +++ b/cli/cli/command/formatter/testdata/search-context-write-stars-table.golden @@ -0,0 +1,2 @@ +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +result1 Official build 5000 [OK] diff --git a/cli/cli/command/formatter/testdata/search-context-write-table.golden b/cli/cli/command/formatter/testdata/search-context-write-table.golden new file mode 100644 index 00000000..72784fd0 --- /dev/null +++ b/cli/cli/command/formatter/testdata/search-context-write-table.golden @@ -0,0 +1,3 @@ +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +result1 Official build 5000 [OK] +result2 Not official 5 [OK] diff --git a/cli/cli/command/formatter/testdata/service-context-write-raw.golden b/cli/cli/command/formatter/testdata/service-context-write-raw.golden new file mode 100644 index 00000000..d62b9a24 --- /dev/null +++ b/cli/cli/command/formatter/testdata/service-context-write-raw.golden @@ -0,0 +1,14 @@ +id: id_baz +name: baz +mode: global +replicas: 2/4 +image: +ports: *:80->8080/tcp + +id: id_bar +name: bar +mode: replicated +replicas: 2/4 +image: +ports: *:80->8080/tcp + diff --git a/cli/cli/command/formatter/testdata/task-context-write-table-custom.golden b/cli/cli/command/formatter/testdata/task-context-write-table-custom.golden new file mode 100644 index 00000000..0f931ea9 --- /dev/null +++ b/cli/cli/command/formatter/testdata/task-context-write-table-custom.golden @@ -0,0 +1,3 @@ +NAME NODE PORTS +foobar_baz foo1 +foobar_bar foo2 diff --git a/cli/cli/command/formatter/trust.go b/cli/cli/command/formatter/trust.go new file mode 100644 index 00000000..c16df2e1 --- /dev/null +++ b/cli/cli/command/formatter/trust.go @@ -0,0 +1,150 @@ +package formatter + +import ( + "sort" + "strings" + + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultTrustTagTableFormat = "table {{.SignedTag}}\t{{.Digest}}\t{{.Signers}}" + signedTagNameHeader = "SIGNED TAG" + trustedDigestHeader = "DIGEST" + signersHeader = "SIGNERS" + defaultSignerInfoTableFormat = "table {{.Signer}}\t{{.Keys}}" + signerNameHeader = "SIGNER" + keysHeader = "KEYS" +) + +// SignedTagInfo represents all formatted information needed to describe a signed tag: +// Name: name of the signed tag +// Digest: hex encoded digest of the contents +// Signers: list of entities who signed the tag +type SignedTagInfo struct { + Name string + Digest string + Signers []string +} + +// SignerInfo represents all formatted information needed to describe a signer: +// Name: name of the signer role +// Keys: the keys associated with the signer +type SignerInfo struct { + Name string + Keys []string +} + +// NewTrustTagFormat returns a Format for rendering using a trusted tag Context +func NewTrustTagFormat() Format { + return defaultTrustTagTableFormat +} + +// NewSignerInfoFormat returns a Format for rendering a signer role info Context +func NewSignerInfoFormat() Format { + return defaultSignerInfoTableFormat +} + +// TrustTagWrite writes the context +func TrustTagWrite(ctx Context, signedTagInfoList []SignedTagInfo) error { + render := func(format func(subContext subContext) error) error { + for _, signedTag := range signedTagInfoList { + if err := format(&trustTagContext{s: signedTag}); err != nil { + return err + } + } + return nil + } + trustTagCtx := trustTagContext{} + trustTagCtx.header = trustTagHeaderContext{ + "SignedTag": signedTagNameHeader, + "Digest": trustedDigestHeader, + "Signers": signersHeader, + } + return ctx.Write(&trustTagCtx, render) +} + +type trustTagHeaderContext map[string]string + +type trustTagContext struct { + HeaderContext + s SignedTagInfo +} + +// SignedTag returns the name of the signed tag +func (c *trustTagContext) SignedTag() string { + return c.s.Name +} + +// Digest returns the hex encoded digest associated with this signed tag +func (c *trustTagContext) Digest() string { + return c.s.Digest +} + +// Signers returns the sorted list of entities who signed this tag +func (c *trustTagContext) Signers() string { + sort.Strings(c.s.Signers) + return strings.Join(c.s.Signers, ", ") +} + +// SignerInfoWrite writes the context +func SignerInfoWrite(ctx Context, signerInfoList []SignerInfo) error { + render := func(format func(subContext subContext) error) error { + for _, signerInfo := range signerInfoList { + if err := format(&signerInfoContext{ + trunc: ctx.Trunc, + s: signerInfo, + }); err != nil { + return err + } + } + return nil + } + signerInfoCtx := signerInfoContext{} + signerInfoCtx.header = signerInfoHeaderContext{ + "Signer": signerNameHeader, + "Keys": keysHeader, + } + return ctx.Write(&signerInfoCtx, render) +} + +type signerInfoHeaderContext map[string]string + +type signerInfoContext struct { + HeaderContext + trunc bool + s SignerInfo +} + +// Keys returns the sorted list of keys associated with the signer +func (c *signerInfoContext) Keys() string { + sort.Strings(c.s.Keys) + truncatedKeys := []string{} + if c.trunc { + for _, keyID := range c.s.Keys { + truncatedKeys = append(truncatedKeys, stringid.TruncateID(keyID)) + } + return strings.Join(truncatedKeys, ", ") + } + return strings.Join(c.s.Keys, ", ") +} + +// Signer returns the name of the signer +func (c *signerInfoContext) Signer() string { + return c.s.Name +} + +// SignerInfoList helps sort []SignerInfo by signer names +type SignerInfoList []SignerInfo + +func (signerInfoComp SignerInfoList) Len() int { + return len(signerInfoComp) +} + +func (signerInfoComp SignerInfoList) Less(i, j int) bool { + return signerInfoComp[i].Name < signerInfoComp[j].Name +} + +func (signerInfoComp SignerInfoList) Swap(i, j int) { + signerInfoComp[i], signerInfoComp[j] = signerInfoComp[j], signerInfoComp[i] +} diff --git a/cli/cli/command/formatter/trust_test.go b/cli/cli/command/formatter/trust_test.go new file mode 100644 index 00000000..7e1d894f --- /dev/null +++ b/cli/cli/command/formatter/trust_test.go @@ -0,0 +1,239 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustTag(t *testing.T) { + digest := stringid.GenerateRandomID() + trustedTag := "tag" + + var ctx trustTagContext + + cases := []struct { + trustTagCtx trustTagContext + expValue string + call func() string + }{ + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: nil, + }, + }, + digest, + ctx.Digest, + }, + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: nil, + }, + }, + trustedTag, + ctx.SignedTag, + }, + // Empty signers makes a row with empty string + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: nil, + }, + }, + "", + ctx.Signers, + }, + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: []string{"alice", "bob", "claire"}, + }, + }, + "alice, bob, claire", + ctx.Signers, + }, + // alphabetic signing on Signers + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: []string{"claire", "bob", "alice"}, + }, + }, + "alice, bob, claire", + ctx.Signers, + }, + } + + for _, c := range cases { + ctx = c.trustTagCtx + v := c.call() + if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestTrustTagContextWrite(t *testing.T) { + + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{ + Format: "{{InvalidFunction}}", + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{ + Format: "{{nil}}", + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{ + Format: NewTrustTagFormat(), + }, + `SIGNED TAG DIGEST SIGNERS +tag1 deadbeef alice +tag2 aaaaaaaa alice, bob +tag3 bbbbbbbb +`, + }, + } + + for _, testcase := range cases { + signedTags := []SignedTagInfo{ + {Name: "tag1", Digest: "deadbeef", Signers: []string{"alice"}}, + {Name: "tag2", Digest: "aaaaaaaa", Signers: []string{"alice", "bob"}}, + {Name: "tag3", Digest: "bbbbbbbb", Signers: []string{}}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := TrustTagWrite(testcase.context, signedTags) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +// With no trust data, the TrustTagWrite will print an empty table: +// it's up to the caller to decide whether or not to print this versus an error +func TestTrustTagContextEmptyWrite(t *testing.T) { + + emptyCase := struct { + context Context + expected string + }{ + Context{ + Format: NewTrustTagFormat(), + }, + `SIGNED TAG DIGEST SIGNERS +`, + } + + emptySignedTags := []SignedTagInfo{} + out := bytes.NewBufferString("") + emptyCase.context.Output = out + err := TrustTagWrite(emptyCase.context, emptySignedTags) + assert.NilError(t, err) + assert.Check(t, is.Equal(emptyCase.expected, out.String())) +} + +func TestSignerInfoContextEmptyWrite(t *testing.T) { + emptyCase := struct { + context Context + expected string + }{ + Context{ + Format: NewSignerInfoFormat(), + }, + `SIGNER KEYS +`, + } + emptySignerInfo := []SignerInfo{} + out := bytes.NewBufferString("") + emptyCase.context.Output = out + err := SignerInfoWrite(emptyCase.context, emptySignerInfo) + assert.NilError(t, err) + assert.Check(t, is.Equal(emptyCase.expected, out.String())) +} + +func TestSignerInfoContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{ + Format: "{{InvalidFunction}}", + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{ + Format: "{{nil}}", + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{ + Format: NewSignerInfoFormat(), + Trunc: true, + }, + `SIGNER KEYS +alice key11, key12 +bob key21 +eve foobarbazqux, key31, key32 +`, + }, + // No truncation + { + Context{ + Format: NewSignerInfoFormat(), + }, + `SIGNER KEYS +alice key11, key12 +bob key21 +eve foobarbazquxquux, key31, key32 +`, + }, + } + + for _, testcase := range cases { + signerInfo := SignerInfoList{ + {Name: "alice", Keys: []string{"key11", "key12"}}, + {Name: "bob", Keys: []string{"key21"}}, + {Name: "eve", Keys: []string{"key31", "key32", "foobarbazquxquux"}}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SignerInfoWrite(testcase.context, signerInfo) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/formatter/volume.go b/cli/cli/command/formatter/volume.go new file mode 100644 index 00000000..342f2fb9 --- /dev/null +++ b/cli/cli/command/formatter/volume.go @@ -0,0 +1,131 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultVolumeQuietFormat = "{{.Name}}" + defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" + + volumeNameHeader = "VOLUME NAME" + mountpointHeader = "MOUNTPOINT" + linksHeader = "LINKS" + // Status header ? +) + +// NewVolumeFormat returns a format for use with a volume Context +func NewVolumeFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultVolumeQuietFormat + } + return defaultVolumeTableFormat + case RawFormatKey: + if quiet { + return `name: {{.Name}}` + } + return `name: {{.Name}}\ndriver: {{.Driver}}\n` + } + return Format(source) +} + +// VolumeWrite writes formatted volumes using the Context +func VolumeWrite(ctx Context, volumes []*types.Volume) error { + render := func(format func(subContext subContext) error) error { + for _, volume := range volumes { + if err := format(&volumeContext{v: *volume}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newVolumeContext(), render) +} + +type volumeHeaderContext map[string]string + +func (c volumeHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +type volumeContext struct { + HeaderContext + v types.Volume +} + +func newVolumeContext() *volumeContext { + volumeCtx := volumeContext{} + volumeCtx.header = volumeHeaderContext{ + "Name": volumeNameHeader, + "Driver": driverHeader, + "Scope": scopeHeader, + "Mountpoint": mountpointHeader, + "Labels": labelsHeader, + "Links": linksHeader, + "Size": sizeHeader, + } + return &volumeCtx +} + +func (c *volumeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *volumeContext) Name() string { + return c.v.Name +} + +func (c *volumeContext) Driver() string { + return c.v.Driver +} + +func (c *volumeContext) Scope() string { + return c.v.Scope +} + +func (c *volumeContext) Mountpoint() string { + return c.v.Mountpoint +} + +func (c *volumeContext) Labels() string { + if c.v.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.v.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *volumeContext) Label(name string) string { + if c.v.Labels == nil { + return "" + } + return c.v.Labels[name] +} + +func (c *volumeContext) Links() string { + if c.v.UsageData == nil { + return "N/A" + } + return fmt.Sprintf("%d", c.v.UsageData.RefCount) +} + +func (c *volumeContext) Size() string { + if c.v.UsageData == nil { + return "N/A" + } + return units.HumanSize(float64(c.v.UsageData.Size)) +} diff --git a/cli/cli/command/formatter/volume_test.go b/cli/cli/command/formatter/volume_test.go new file mode 100644 index 00000000..43c6061d --- /dev/null +++ b/cli/cli/command/formatter/volume_test.go @@ -0,0 +1,183 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestVolumeContext(t *testing.T) { + volumeName := stringid.GenerateRandomID() + + var ctx volumeContext + cases := []struct { + volumeCtx volumeContext + expValue string + call func() string + }{ + {volumeContext{ + v: types.Volume{Name: volumeName}, + }, volumeName, ctx.Name}, + {volumeContext{ + v: types.Volume{Driver: "driver_name"}, + }, "driver_name", ctx.Driver}, + {volumeContext{ + v: types.Volume{Scope: "local"}, + }, "local", ctx.Scope}, + {volumeContext{ + v: types.Volume{Mountpoint: "mountpoint"}, + }, "mountpoint", ctx.Mountpoint}, + {volumeContext{ + v: types.Volume{}, + }, "", ctx.Labels}, + {volumeContext{ + v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", ctx.Labels}, + } + + for _, c := range cases { + ctx = c.volumeCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestVolumeContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewVolumeFormat("table", false)}, + `DRIVER VOLUME NAME +foo foobar_baz +bar foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table", true)}, + `foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", false)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", true)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewVolumeFormat("raw", false)}, + `name: foobar_baz +driver: foo + +name: foobar_bar +driver: bar + +`, + }, + { + Context{Format: NewVolumeFormat("raw", true)}, + `name: foobar_baz +name: foobar_bar +`, + }, + // Custom Format + { + Context{Format: NewVolumeFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + volumes := []*types.Volume{ + {Name: "foobar_baz", Driver: "foo"}, + {Name: "foobar_bar", Driver: "bar"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := VolumeWrite(testcase.context, volumes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestVolumeContextWriteJSON(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, + {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} + +func TestVolumeContextWriteJSONField(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(volumes[i].Name, s), msg) + } +} diff --git a/cli/cli/command/idresolver/client_test.go b/cli/cli/command/idresolver/client_test.go new file mode 100644 index 00000000..c53cfc6a --- /dev/null +++ b/cli/cli/command/idresolver/client_test.go @@ -0,0 +1,29 @@ +package idresolver + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + nodeInspectFunc func(string) (swarm.Node, []byte, error) + serviceInspectFunc func(string) (swarm.Service, []byte, error) +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if cli.nodeInspectFunc != nil { + return cli.nodeInspectFunc(nodeID) + } + return swarm.Node{}, []byte{}, nil +} + +func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if cli.serviceInspectFunc != nil { + return cli.serviceInspectFunc(serviceID) + } + return swarm.Service{}, []byte{}, nil +} diff --git a/cli/cli/command/idresolver/idresolver.go b/cli/cli/command/idresolver/idresolver.go new file mode 100644 index 00000000..3d1f71a0 --- /dev/null +++ b/cli/cli/command/idresolver/idresolver.go @@ -0,0 +1,70 @@ +package idresolver + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +// IDResolver provides ID to Name resolution. +type IDResolver struct { + client client.APIClient + noResolve bool + cache map[string]string +} + +// New creates a new IDResolver. +func New(client client.APIClient, noResolve bool) *IDResolver { + return &IDResolver{ + client: client, + noResolve: noResolve, + cache: make(map[string]string), + } +} + +func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { + switch t.(type) { + case swarm.Node: + node, _, err := r.client.NodeInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + if node.Spec.Annotations.Name != "" { + return node.Spec.Annotations.Name, nil + } + if node.Description.Hostname != "" { + return node.Description.Hostname, nil + } + return id, nil + case swarm.Service: + service, _, err := r.client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{}) + if err != nil { + return id, nil + } + return service.Spec.Annotations.Name, nil + default: + return "", errors.Errorf("unsupported type") + } + +} + +// Resolve will attempt to resolve an ID to a Name by querying the manager. +// Results are stored into a cache. +// If the `-n` flag is used in the command-line, resolution is disabled. +func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { + if r.noResolve { + return id, nil + } + if name, ok := r.cache[id]; ok { + return name, nil + } + name, err := r.get(ctx, t, id) + if err != nil { + return "", err + } + r.cache[id] = name + return name, nil +} diff --git a/cli/cli/command/idresolver/idresolver_test.go b/cli/cli/command/idresolver/idresolver_test.go new file mode 100644 index 00000000..f667b106 --- /dev/null +++ b/cli/cli/command/idresolver/idresolver_test.go @@ -0,0 +1,146 @@ +package idresolver + +import ( + "testing" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + // Import builders to get the builder function as package function + "context" + + . "github.com/docker/cli/internal/test/builders" + "github.com/pkg/errors" +) + +func TestResolveError(t *testing.T) { + cli := &fakeClient{ + nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") + }, + } + + idResolver := New(cli, false) + _, err := idResolver.Resolve(context.Background(), struct{}{}, "nodeID") + + assert.Error(t, err, "unsupported type") +} + +func TestResolveWithNoResolveOption(t *testing.T) { + resolved := false + cli := &fakeClient{ + nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { + resolved = true + return swarm.Node{}, []byte{}, nil + }, + serviceInspectFunc: func(serviceID string) (swarm.Service, []byte, error) { + resolved = true + return swarm.Service{}, []byte{}, nil + }, + } + + idResolver := New(cli, true) + id, err := idResolver.Resolve(context.Background(), swarm.Node{}, "nodeID") + + assert.NilError(t, err) + assert.Check(t, is.Equal("nodeID", id)) + assert.Check(t, !resolved) +} + +func TestResolveWithCache(t *testing.T) { + inspectCounter := 0 + cli := &fakeClient{ + nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { + inspectCounter++ + return *Node(NodeName("node-foo")), []byte{}, nil + }, + } + + idResolver := New(cli, false) + + ctx := context.Background() + for i := 0; i < 2; i++ { + id, err := idResolver.Resolve(ctx, swarm.Node{}, "nodeID") + assert.NilError(t, err) + assert.Check(t, is.Equal("node-foo", id)) + } + + assert.Check(t, is.Equal(1, inspectCounter)) +} + +func TestResolveNode(t *testing.T) { + testCases := []struct { + nodeID string + nodeInspectFunc func(string) (swarm.Node, []byte, error) + expectedID string + }{ + { + nodeID: "nodeID", + nodeInspectFunc: func(string) (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") + }, + expectedID: "nodeID", + }, + { + nodeID: "nodeID", + nodeInspectFunc: func(string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-foo")), []byte{}, nil + }, + expectedID: "node-foo", + }, + { + nodeID: "nodeID", + nodeInspectFunc: func(string) (swarm.Node, []byte, error) { + return *Node(NodeName(""), Hostname("node-hostname")), []byte{}, nil + }, + expectedID: "node-hostname", + }, + } + + ctx := context.Background() + for _, tc := range testCases { + cli := &fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + } + idResolver := New(cli, false) + id, err := idResolver.Resolve(ctx, swarm.Node{}, tc.nodeID) + + assert.NilError(t, err) + assert.Check(t, is.Equal(tc.expectedID, id)) + } +} + +func TestResolveService(t *testing.T) { + testCases := []struct { + serviceID string + serviceInspectFunc func(string) (swarm.Service, []byte, error) + expectedID string + }{ + { + serviceID: "serviceID", + serviceInspectFunc: func(string) (swarm.Service, []byte, error) { + return swarm.Service{}, []byte{}, errors.Errorf("error inspecting service") + }, + expectedID: "serviceID", + }, + { + serviceID: "serviceID", + serviceInspectFunc: func(string) (swarm.Service, []byte, error) { + return *Service(ServiceName("service-foo")), []byte{}, nil + }, + expectedID: "service-foo", + }, + } + + ctx := context.Background() + for _, tc := range testCases { + cli := &fakeClient{ + serviceInspectFunc: tc.serviceInspectFunc, + } + idResolver := New(cli, false) + id, err := idResolver.Resolve(ctx, swarm.Service{}, tc.serviceID) + + assert.NilError(t, err) + assert.Check(t, is.Equal(tc.expectedID, id)) + } +} diff --git a/cli/cli/command/image/build.go b/cli/cli/command/image/build.go new file mode 100644 index 00000000..fc660fad --- /dev/null +++ b/cli/cli/command/image/build.go @@ -0,0 +1,627 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + "github.com/docker/cli/opts" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile") + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + extraHosts opts.ListOpts + ulimits *opts.UlimitOpt + memory opts.MemBytes + memorySwap opts.MemSwapBytes + shmSize opts.MemBytes + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + console opts.NullableBool + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool + target string + imageIDFile string + stream bool + platform string + untrusted bool +} + +// dockerfileFromStdin returns true when the user specified that the Dockerfile +// should be read from stdin instead of a file +func (o buildOptions) dockerfileFromStdin() bool { + return o.dockerfileName == "-" +} + +// contextFromStdin returns true when the user specified that the build context +// should be read from stdin +func (o buildOptions) contextFromStdin() bool { + return o.context == "-" +} + +func newBuildOptions() buildOptions { + ulimits := make(map[string]*units.Ulimit) + return buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(opts.ValidateEnv), + ulimits: opts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(opts.ValidateEnv), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + } +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli command.Cli) *cobra.Command { + options := newBuildOptions() + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + flags.SetAnnotation("network", "version", []string{"1.25"}) + flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") + flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file") + + command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled()) + command.AddPlatformFlag(flags, &options.platform) + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + + flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context") + flags.SetAnnotation("stream", "experimental", nil) + flags.SetAnnotation("stream", "version", []string{"1.31"}) + + flags.Var(&options.console, "console", "Show console output (with buildkit only) (true, false, auto)") + flags.SetAnnotation("console", "experimental", nil) + flags.SetAnnotation("console", "version", []string{"1.38"}) + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to suppress verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +// nolint: gocyclo +func runBuild(dockerCli command.Cli, options buildOptions) error { + if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" { + enableBuildkit, err := strconv.ParseBool(buildkitEnv) + if err != nil { + return errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") + } + if enableBuildkit { + return runBuildBuildKit(dockerCli, options) + } + } + + var ( + buildCtx io.ReadCloser + dockerfileCtx io.ReadCloser + err error + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + remote string + ) + + if options.compress && options.stream { + return errors.New("--compress conflicts with --stream options") + } + + if options.dockerfileFromStdin() { + if options.contextFromStdin() { + return errStdinConflict + } + dockerfileCtx = dockerCli.In() + } + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + if options.imageIDFile != "" { + // Avoid leaving a stale file if we eventually fail + if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "Removing image ID file") + } + } + + switch { + case options.contextFromStdin(): + // buildCtx is tar archive. if stdin was dockerfile then it is wrapped + buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case isLocalDir(specifiedContext): + contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + // Dockerfile is outside of build-context; read the Dockerfile and pass it as dockerfileCtx + dockerfileCtx, err = os.Open(options.dockerfileName) + if err != nil { + return errors.Errorf("unable to open Dockerfile: %v", err) + } + defer dockerfileCtx.Close() + } + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + return errors.Errorf("unable to prepare context: path %q not found", specifiedContext) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return errors.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + // read from a directory into tar archive + if buildCtx == nil && !options.stream { + excludes, err := build.ReadDockerignore(contextDir) + if err != nil { + return err + } + + if err := build.ValidateContextDirectory(contextDir, excludes); err != nil { + return errors.Errorf("error checking context: '%s'.", err) + } + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin()) + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + ExcludePatterns: excludes, + ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, + }) + if err != nil { + return err + } + } + + // replace Dockerfile if it was added from stdin or a file outside the build-context, and there is archive context + if dockerfileCtx != nil && buildCtx != nil { + buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx) + if err != nil { + return err + } + } + + // if streaming and Dockerfile was not from stdin then read from file + // to the same reader that is usually stdin + if options.stream && dockerfileCtx == nil { + dockerfileCtx, err = os.Open(relDockerfile) + if err != nil { + return errors.Wrapf(err, "failed to open %s", relDockerfile) + } + defer dockerfileCtx.Close() + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var resolvedTags []*resolvedTag + if !options.untrusted { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // if there is a tar wrapper, the dockerfile needs to be replaced inside it + if buildCtx != nil { + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileForContentTrust(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } else if dockerfileCtx != nil { + // if there was not archive context still do the possible replacements in Dockerfile + newDockerfile, _, err := rewriteDockerfileFromForContentTrust(ctx, dockerfileCtx, translator) + if err != nil { + return err + } + dockerfileCtx = ioutil.NopCloser(bytes.NewBuffer(newDockerfile)) + } + } + + if options.compress { + buildCtx, err = build.Compress(buildCtx) + if err != nil { + return err + } + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewProgressOutput(progBuff) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + // if up to this point nothing has set the context then we must have another + // way for sending it(streaming) and set the context to the Dockerfile + if dockerfileCtx != nil && buildCtx == nil { + buildCtx = dockerfileCtx + } + + s, err := trySession(dockerCli, contextDir) + if err != nil { + return err + } + + var body io.Reader + if buildCtx != nil && !options.stream { + body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + } + + // add context stream to the session + if options.stream && s != nil { + syncDone := make(chan error) // used to signal first progress reporting completed. + // progress would also send errors but don't need it here as errors + // are handled by session.Run() and ImageBuild() + if err := addDirToSession(s, contextDir, progressOutput, syncDone); err != nil { + return err + } + + buf := newBufferedWriter(syncDone, buildBuff) + defer func() { + select { + case <-buf.flushed: + case <-ctx.Done(): + } + }() + buildBuff = buf + + remote = clientSessionRemote + body = buildCtx + } + + configFile := dockerCli.ConfigFile() + authConfigs, _ := configFile.GetAllCredentials() + buildOptions := imageBuildOptions(dockerCli, options) + buildOptions.Version = types.BuilderV1 + buildOptions.Dockerfile = relDockerfile + buildOptions.AuthConfigs = authConfigs + buildOptions.RemoteContext = remote + + if s != nil { + go func() { + logrus.Debugf("running session: %v", s.ID()) + if err := s.Run(ctx, dockerCli.Client().DialSession); err != nil { + logrus.Error(err) + cancel() // cancel progress context + } + }() + buildOptions.SessionID = s.ID() + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + cancel() + return err + } + defer response.Body.Close() + + imageID := "" + aux := func(msg jsonmessage.JSONMessage) { + var result types.BuildResult + if err := json.Unmarshal(*msg.Aux, &result); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err) + } else { + imageID = result.ID + } + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+ + "image from Windows against a non-Windows Docker host. All files and "+ + "directories added to build context will have '-rwxr-xr-x' permissions. "+ + "It is recommended to double check and reset permissions for sensitive "+ + "files and directories.") + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + imageID = fmt.Sprintf("%s", buildBuff) + fmt.Fprintf(dockerCli.Out(), imageID) + } + + if options.imageIDFile != "" { + if imageID == "" { + return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile) + } + if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { + return err + } + } + if !options.untrusted { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +func isLocalDir(c string) bool { + _, err := os.Stat(c) + return err == nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNormalizedNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFromForContentTrust rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +// This should be called *only* when content trust is enabled +func rewriteDockerfileFromForContentTrust(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + var ref reference.Named + ref, err = reference.ParseNormalizedNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.TagNameOnly(ref) + if ref, ok := ref.(reference.NamedTagged); ok { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef))) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileForContentTrust wraps the given input tar archive stream and +// uses the translator to replace the Dockerfile which uses a trusted reference. +// Returns a new tar archive stream with the replaced Dockerfile. +func replaceDockerfileForContentTrust(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFromForContentTrust(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} + +func imageBuildOptions(dockerCli command.Cli, options buildOptions) types.ImageBuildOptions { + configFile := dockerCli.ConfigFile() + return types.ImageBuildOptions{ + Memory: options.memory.Value(), + MemorySwap: options.memorySwap.Value(), + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + ShmSize: options.shmSize.Value(), + Ulimits: options.ulimits.GetList(), + BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()), + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + ExtraHosts: options.extraHosts.GetAll(), + Target: options.target, + Platform: options.platform, + } +} diff --git a/cli/cli/command/image/build/context.go b/cli/cli/command/image/build/context.go new file mode 100644 index 00000000..b7170df8 --- /dev/null +++ b/cli/cli/command/image/build/context.go @@ -0,0 +1,425 @@ +package build + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" + // archiveHeaderSize is the number of bytes in an archive header + archiveHeaderSize = 512 +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return errors.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return errors.Errorf("file ('%s') not found or excluded by .dockerignore", filePath) + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return errors.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// DetectArchiveReader detects whether the input stream is an archive or a +// Dockerfile and returns a buffered version of input, safe to consume in lieu +// of input. If an archive is detected, isArchive is set to true, and to false +// otherwise, in which case it is safe to assume input represents the contents +// of a Dockerfile. +func DetectArchiveReader(input io.ReadCloser) (rc io.ReadCloser, isArchive bool, err error) { + buf := bufio.NewReader(input) + + magic, err := buf.Peek(archiveHeaderSize) + if err != nil && err != io.EOF { + return nil, false, errors.Errorf("failed to peek context header from STDIN: %v", err) + } + + return ioutils.NewReadCloserWrapper(buf, func() error { return input.Close() }), IsArchive(magic), nil +} + +// WriteTempDockerfile writes a Dockerfile stream to a temporary file with a +// name specified by DefaultDockerfileName and returns the path to the +// temporary directory containing the Dockerfile. +func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { + // err is a named return value, due to the defer call below. + dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") + if err != nil { + return "", errors.Errorf("unable to create temporary context directory: %v", err) + } + defer func() { + if err != nil { + os.RemoveAll(dockerfileDir) + } + }() + + f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) + if err != nil { + return "", err + } + defer f.Close() + if _, err := io.Copy(f, rc); err != nil { + return "", err + } + return dockerfileDir, rc.Close() +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(rc io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + rc, isArchive, err := DetectArchiveReader(rc) + if err != nil { + return nil, "", err + } + + if isArchive { + return rc, dockerfileName, nil + } + + // Input should be read as a Dockerfile. + + if dockerfileName == "-" { + return nil, "", errors.New("build context is not an archive") + } + + dockerfileDir, err := WriteTempDockerfile(rc) + if err != nil { + return nil, "", err + } + + tar, err := archive.Tar(dockerfileDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(dockerfileDir) + return err + }), DefaultDockerfileName, nil +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := archive.DetectCompression(header) + if compression != archive.Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", errors.Wrapf(err, "unable to find 'git'") + } + absContextDir, err := git.Clone(gitURL) + if err != nil { + return "", "", errors.Wrapf(err, "unable to 'git clone' to temporary context directory") + } + + absContextDir, err = ResolveAndValidateContextPath(absContextDir) + if err != nil { + return "", "", err + } + relDockerfile, err := getDockerfileRelPath(absContextDir, dockerfileName) + if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", errors.Errorf("the Dockerfile (%s) must be within the build context", dockerfileName) + } + + return absContextDir, relDockerfile, err +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := getWithStatusError(remoteURL) + if err != nil { + return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewProgressOutput(out) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// getWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func getWithStatusError(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errors.Wrapf(err, msg+": error reading body") + } + return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body)) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (string, string, error) { + localDir, err := ResolveAndValidateContextPath(localDir) + if err != nil { + return "", "", err + } + + // When using a local context directory, and the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" && dockerfileName != "-" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + relDockerfile, err := getDockerfileRelPath(localDir, dockerfileName) + return localDir, relDockerfile, err +} + +// ResolveAndValidateContextPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory. +func ResolveAndValidateContextPath(givenContextDir string) (string, error) { + absContextDir, err := filepath.Abs(givenContextDir) + if err != nil { + return "", errors.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", errors.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", errors.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", errors.Errorf("context must be a directory: %s", absContextDir) + } + return absContextDir, err +} + +// getDockerfileRelPath returns the dockerfile path relative to the context +// directory +func getDockerfileRelPath(absContextDir, givenDockerfile string) (string, error) { + var err error + + if givenDockerfile == "-" { + return givenDockerfile, nil + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", errors.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", errors.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", errors.Errorf("unable to stat Dockerfile: %v", err) + } + + relDockerfile, err := filepath.Rel(absContextDir, absDockerfile) + if err != nil { + return "", errors.Errorf("unable to get relative Dockerfile path: %v", err) + } + + return relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} + +// AddDockerfileToBuildContext from a ReadCloser, returns a new archive and +// the relative path to the dockerfile in the context. +func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) { + file, err := ioutil.ReadAll(dockerfileCtx) + dockerfileCtx.Close() + if err != nil { + return nil, "", err + } + now := time.Now() + hdrTmpl := &tar.Header{ + Mode: 0600, + Uid: 0, + Gid: 0, + ModTime: now, + Typeflag: tar.TypeReg, + AccessTime: now, + ChangeTime: now, + } + randomName := ".dockerfile." + stringid.GenerateRandomID()[:20] + + buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{ + // Add the dockerfile with a random filename + randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + return hdrTmpl, file, nil + }, + // Update .dockerignore to include the random filename + ".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + if h == nil { + h = hdrTmpl + } + + b := &bytes.Buffer{} + if content != nil { + if _, err := b.ReadFrom(content); err != nil { + return nil, nil, err + } + } else { + b.WriteString(".dockerignore") + } + b.WriteString("\n" + randomName + "\n") + return h, b.Bytes(), nil + }, + }) + return buildCtx, randomName, nil +} + +// Compress the build context for sending to the API +func Compress(buildCtx io.ReadCloser) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + + go func() { + compressWriter, err := archive.CompressStream(pipeWriter, archive.Gzip) + if err != nil { + pipeWriter.CloseWithError(err) + } + defer buildCtx.Close() + + if _, err := pools.Copy(compressWriter, buildCtx); err != nil { + pipeWriter.CloseWithError( + errors.Wrap(err, "failed to compress context")) + compressWriter.Close() + return + } + compressWriter.Close() + pipeWriter.Close() + }() + + return pipeReader, nil +} diff --git a/cli/cli/command/image/build/context_test.go b/cli/cli/command/image/build/context_test.go new file mode 100644 index 00000000..d74add88 --- /dev/null +++ b/cli/cli/command/image/build/context_test.go @@ -0,0 +1,299 @@ +package build + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +const dockerfileContents = "FROM busybox" + +var prepareEmpty = func(t *testing.T) (string, func()) { + return "", func() {} +} + +var prepareNoFiles = func(t *testing.T) (string, func()) { + return createTestTempDir(t, "", "builder-context-test") +} + +var prepareOneFile = func(t *testing.T) (string, func()) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + return contextDir, cleanup +} + +func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { + contextDir, cleanup := prepare(t) + defer cleanup() + + err := ValidateContextDirectory(contextDir, excludes) + assert.NilError(t, err) +} + +func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + _, _, err := GetContextFromLocalDir(contextDir, "") + assert.ErrorContains(t, err, "Dockerfile") +} + +func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + _, _, err := GetContextFromLocalDir(fakePath, "") + assert.ErrorContains(t, err, "fake") +} + +func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + _, _, err := GetContextFromLocalDir(contextDir, fakePath) + assert.ErrorContains(t, err, "fake") +} + +func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { + contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") + defer dirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + assert.NilError(t, err) + + assert.Check(t, is.Equal(contextDir, absContextDir)) + assert.Check(t, is.Equal(DefaultDockerfileName, relDockerfile)) +} + +func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + assert.NilError(t, err) + + assert.Check(t, is.Equal(contextDir, absContextDir)) + assert.Check(t, is.Equal(DefaultDockerfileName, relDockerfile)) +} + +func TestGetContextFromLocalDirLocalFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) + assert.NilError(t, err) + + assert.Check(t, is.Equal(contextDir, absContextDir)) + assert.Check(t, is.Equal(DefaultDockerfileName, relDockerfile)) +} + +func TestGetContextFromReaderString(t *testing.T) { + tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + _, err = tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + assert.NilError(t, tarArchive.Close()) + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromReaderTar(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + assert.NilError(t, err) + + tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) + assert.NilError(t, err) + + tarReader := tar.NewReader(tarArchive) + + header, err := tarReader.Next() + assert.NilError(t, err) + + if header.Name != DefaultDockerfileName { + t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + assert.NilError(t, tarArchive.Close()) + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestValidateContextDirectoryEmptyContext(t *testing.T) { + // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. + // The test will ultimately end up calling filepath.Abs(""). On Windows, + // golang will error. On Linux, golang will return /. Due to there being + // drive letters on Windows, this is probably the correct behaviour for + // Windows. + if runtime.GOOS == "windows" { + t.Skip("Invalid test on Windows") + } + testValidateContextDirectory(t, prepareEmpty, []string{}) +} + +func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { + testValidateContextDirectory(t, prepareNoFiles, []string{}) +} + +func TestValidateContextDirectoryWithOneFile(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{}) +} + +func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) +} + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + assert.NilError(t, err) + return path, func() { assert.NilError(t, os.RemoveAll(path)) } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + assert.NilError(t, err) + return filePath +} + +// chdir changes current working directory to dir. +// It returns a function which changes working directory back to the previous one. +// This function is meant to be executed as a deferred call. +// When an error occurs, it terminates the test. +func chdir(t *testing.T, dir string) func() { + workingDirectory, err := os.Getwd() + assert.NilError(t, err) + assert.NilError(t, os.Chdir(dir)) + return func() { assert.NilError(t, os.Chdir(workingDirectory)) } +} + +func TestIsArchive(t *testing.T) { + var testcases = []struct { + doc string + header []byte + expected bool + }{ + { + doc: "nil is not a valid header", + header: nil, + expected: false, + }, + { + doc: "invalid header bytes", + header: []byte{0x00, 0x01, 0x02}, + expected: false, + }, + { + doc: "header for bzip2 archive", + header: []byte{0x42, 0x5A, 0x68}, + expected: true, + }, + { + doc: "header for 7zip archive is not supported", + header: []byte{0x50, 0x4b, 0x03, 0x04}, + expected: false, + }, + } + for _, testcase := range testcases { + assert.Check(t, is.Equal(testcase.expected, IsArchive(testcase.header)), testcase.doc) + } +} diff --git a/cli/cli/command/image/build/context_unix.go b/cli/cli/command/image/build/context_unix.go new file mode 100644 index 00000000..cb2634f0 --- /dev/null +++ b/cli/cli/command/image/build/context_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package build + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/cli/cli/command/image/build/context_windows.go b/cli/cli/command/image/build/context_windows.go new file mode 100644 index 00000000..c577cfa7 --- /dev/null +++ b/cli/cli/command/image/build/context_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package build + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/cli/cli/command/image/build/dockerignore.go b/cli/cli/command/image/build/dockerignore.go new file mode 100644 index 00000000..497c3f24 --- /dev/null +++ b/cli/cli/command/image/build/dockerignore.go @@ -0,0 +1,39 @@ +package build + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// ReadDockerignore reads the .dockerignore file in the context directory and +// returns the list of paths to exclude +func ReadDockerignore(contextDir string) ([]string, error) { + var excludes []string + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + switch { + case os.IsNotExist(err): + return excludes, nil + case err != nil: + return nil, err + } + defer f.Close() + + return dockerignore.ReadAll(f) +} + +// TrimBuildFilesFromExcludes removes the named Dockerfile and .dockerignore from +// the list of excluded files. The daemon will remove them from the final context +// but they must be in available in the context when passed to the API. +func TrimBuildFilesFromExcludes(excludes []string, dockerfile string, dockerfileFromStdin bool) []string { + if keep, _ := fileutils.Matches(".dockerignore", excludes); keep { + excludes = append(excludes, "!.dockerignore") + } + if keep, _ := fileutils.Matches(dockerfile, excludes); keep && !dockerfileFromStdin { + excludes = append(excludes, "!"+dockerfile) + } + return excludes +} diff --git a/cli/cli/command/image/build_buildkit.go b/cli/cli/command/image/build_buildkit.go new file mode 100644 index 00000000..4427bb18 --- /dev/null +++ b/cli/cli/command/image/build_buildkit.go @@ -0,0 +1,346 @@ +package image + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containerd/console" + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/urlutil" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/session/auth/authprovider" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/util/appcontext" + "github.com/moby/buildkit/util/progress/progressui" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "golang.org/x/sync/errgroup" +) + +const uploadRequestRemote = "upload-request" + +var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles") + +//nolint: gocyclo +func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { + ctx := appcontext.Context() + + s, err := trySession(dockerCli, options.context) + if err != nil { + return err + } + if s == nil { + return errors.Errorf("buildkit not supported by daemon") + } + + if options.imageIDFile != "" { + // Avoid leaving a stale file if we eventually fail + if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "removing image ID file") + } + } + + var ( + remote string + body io.Reader + dockerfileName = options.dockerfileName + dockerfileReader io.ReadCloser + dockerfileDir string + contextDir string + ) + + switch { + case options.contextFromStdin(): + if options.dockerfileFromStdin() { + return errStdinConflict + } + rc, isArchive, err := build.DetectArchiveReader(os.Stdin) + if err != nil { + return err + } + if isArchive { + body = rc + remote = uploadRequestRemote + } else { + if options.dockerfileName != "" { + return errDockerfileConflict + } + dockerfileReader = rc + remote = clientSessionRemote + // TODO: make fssync handle empty contextdir + contextDir, _ = ioutil.TempDir("", "empty-dir") + defer os.RemoveAll(contextDir) + } + case isLocalDir(options.context): + contextDir = options.context + if options.dockerfileFromStdin() { + dockerfileReader = os.Stdin + } else if options.dockerfileName != "" { + dockerfileName = filepath.Base(options.dockerfileName) + dockerfileDir = filepath.Dir(options.dockerfileName) + } else { + dockerfileDir = options.context + } + remote = clientSessionRemote + case urlutil.IsGitURL(options.context): + remote = options.context + case urlutil.IsURL(options.context): + remote = options.context + default: + return errors.Errorf("unable to prepare context: path %q not found", options.context) + } + + if dockerfileReader != nil { + dockerfileName = build.DefaultDockerfileName + dockerfileDir, err = build.WriteTempDockerfile(dockerfileReader) + if err != nil { + return err + } + defer os.RemoveAll(dockerfileDir) + } + + if dockerfileDir != "" { + s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{ + { + Name: "context", + Dir: contextDir, + Map: resetUIDAndGID, + }, + { + Name: "dockerfile", + Dir: dockerfileDir, + }, + })) + } + + s.Allow(authprovider.NewDockerAuthProvider()) + + eg, ctx := errgroup.WithContext(ctx) + + eg.Go(func() error { + return s.Run(context.TODO(), dockerCli.Client().DialSession) + }) + + buildID := stringid.GenerateRandomID() + if body != nil { + eg.Go(func() error { + buildOptions := types.ImageBuildOptions{ + Version: types.BuilderBuildKit, + BuildID: uploadRequestRemote + ":" + buildID, + } + + response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions) + if err != nil { + return err + } + defer response.Body.Close() + return nil + }) + } + + eg.Go(func() error { + defer func() { // make sure the Status ends cleanly on build errors + s.Close() + }() + + buildOptions := imageBuildOptions(dockerCli, options) + buildOptions.Version = types.BuilderBuildKit + buildOptions.Dockerfile = dockerfileName + //buildOptions.AuthConfigs = authConfigs // handled by session + buildOptions.RemoteContext = remote + buildOptions.SessionID = s.ID() + buildOptions.BuildID = buildID + return doBuild(ctx, eg, dockerCli, options, buildOptions) + }) + + return eg.Wait() +} + +//nolint: gocyclo +func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) { + response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions) + if err != nil { + return err + } + defer response.Body.Close() + + done := make(chan struct{}) + defer close(done) + eg.Go(func() error { + select { + case <-ctx.Done(): + return dockerCli.Client().BuildCancel(context.TODO(), buildOptions.BuildID) + case <-done: + } + return nil + }) + + t := newTracer() + ssArr := []*client.SolveStatus{} + + displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) { + var c console.Console + // TODO: Handle interactive output in non-interactive environment. + consoleOpt := options.console.Value() + if cons, err := console.ConsoleFromFile(out); err == nil && (consoleOpt == nil || *consoleOpt) { + c = cons + } + // not using shared context to not disrupt display but let is finish reporting errors + eg.Go(func() error { + return progressui.DisplaySolveStatus(context.TODO(), c, out, displayCh) + }) + } + + if options.quiet { + eg.Go(func() error { + // TODO: make sure t.displayCh closes + for ss := range t.displayCh { + ssArr = append(ssArr, ss) + } + <-done + // TODO: verify that finalErr is indeed set when error occurs + if finalErr != nil { + displayCh := make(chan *client.SolveStatus) + go func() { + for _, ss := range ssArr { + displayCh <- ss + } + close(displayCh) + }() + displayStatus(os.Stderr, displayCh) + } + return nil + }) + } else { + displayStatus(os.Stdout, t.displayCh) + } + defer close(t.displayCh) + + buf := bytes.NewBuffer(nil) + + imageID := "" + writeAux := func(msg jsonmessage.JSONMessage) { + if msg.ID == "moby.image.id" { + var result types.BuildResult + if err := json.Unmarshal(*msg.Aux, &result); err != nil { + fmt.Fprintf(dockerCli.Err(), "failed to parse aux message: %v", err) + } + imageID = result.ID + return + } + t.write(msg) + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buf, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), writeAux) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + // + // TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1) + // instead of assuming that output is image ID if options.quiet. + if options.quiet { + imageID = buf.String() + fmt.Fprint(dockerCli.Out(), imageID) + } + + if options.imageIDFile != "" { + if imageID == "" { + return errors.Errorf("cannot write %s because server did not provide an image ID", options.imageIDFile) + } + imageID = strings.TrimSpace(imageID) + if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { + return errors.Wrap(err, "cannot write image ID file") + } + } + return err +} + +func resetUIDAndGID(s *fsutil.Stat) bool { + s.Uid = 0 + s.Gid = 0 + return true +} + +type tracer struct { + displayCh chan *client.SolveStatus +} + +func newTracer() *tracer { + return &tracer{ + displayCh: make(chan *client.SolveStatus), + } +} + +func (t *tracer) write(msg jsonmessage.JSONMessage) { + var resp controlapi.StatusResponse + + if msg.ID != "moby.buildkit.trace" { + return + } + + var dt []byte + // ignoring all messages that are not understood + if err := json.Unmarshal(*msg.Aux, &dt); err != nil { + return + } + if err := (&resp).Unmarshal(dt); err != nil { + return + } + + s := client.SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &client.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &client.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &client.VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + + t.displayCh <- &s +} diff --git a/cli/cli/command/image/build_session.go b/cli/cli/command/image/build_session.go new file mode 100644 index 00000000..d4cda82f --- /dev/null +++ b/cli/cli/command/image/build_session.go @@ -0,0 +1,158 @@ +package image + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/progress" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" + "golang.org/x/time/rate" +) + +const clientSessionRemote = "client-session" + +func isSessionSupported(dockerCli command.Cli) bool { + return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31") +} + +func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) { + var s *session.Session + if isSessionSupported(dockerCli) { + sharedKey, err := getBuildSharedKey(contextDir) + if err != nil { + return nil, errors.Wrap(err, "failed to get build shared key") + } + s, err = session.NewSession(context.Background(), filepath.Base(contextDir), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create session") + } + } + return s, nil +} + +func addDirToSession(session *session.Session, contextDir string, progressOutput progress.Output, done chan error) error { + excludes, err := build.ReadDockerignore(contextDir) + if err != nil { + return err + } + + p := &sizeProgress{out: progressOutput, action: "Streaming build context to Docker daemon"} + + workdirProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ + {Dir: contextDir, Excludes: excludes}, + }) + session.Allow(workdirProvider) + + // this will be replaced on parallel build jobs. keep the current + // progressbar for now + if snpc, ok := workdirProvider.(interface { + SetNextProgressCallback(func(int, bool), chan error) + }); ok { + snpc.SetNextProgressCallback(p.update, done) + } + + return nil +} + +type sizeProgress struct { + out progress.Output + action string + limiter *rate.Limiter +} + +func (sp *sizeProgress) update(size int, last bool) { + if sp.limiter == nil { + sp.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + } + if last || sp.limiter.Allow() { + sp.out.WriteProgress(progress.Progress{Action: sp.action, Current: int64(size), LastUpdate: last}) + } +} + +type bufferedWriter struct { + done chan error + io.Writer + buf *bytes.Buffer + flushed chan struct{} + mu sync.Mutex +} + +func newBufferedWriter(done chan error, w io.Writer) *bufferedWriter { + bw := &bufferedWriter{done: done, Writer: w, buf: new(bytes.Buffer), flushed: make(chan struct{})} + go func() { + <-done + bw.flushBuffer() + }() + return bw +} + +func (bw *bufferedWriter) Write(dt []byte) (int, error) { + select { + case <-bw.done: + bw.flushBuffer() + return bw.Writer.Write(dt) + default: + return bw.buf.Write(dt) + } +} + +func (bw *bufferedWriter) flushBuffer() { + bw.mu.Lock() + select { + case <-bw.flushed: + default: + bw.Writer.Write(bw.buf.Bytes()) + close(bw.flushed) + } + bw.mu.Unlock() +} + +func (bw *bufferedWriter) String() string { + return fmt.Sprintf("%s", bw.Writer) +} + +func getBuildSharedKey(dir string) (string, error) { + // build session is hash of build dir with node based randomness + s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir))) + return hex.EncodeToString(s[:]), nil +} + +func tryNodeIdentifier() string { + out := cliconfig.Dir() // return config dir as default on permission error + if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil { + sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID") + if _, err := os.Lstat(sessionFile); err != nil { + if os.IsNotExist(err) { // create a new file with stored randomness + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return out + } + if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil { + return out + } + } + } + + dt, err := ioutil.ReadFile(sessionFile) + if err == nil { + return string(dt) + } + } + return out +} diff --git a/cli/cli/command/image/build_test.go b/cli/cli/command/image/build_test.go new file mode 100644 index 00000000..adcacd49 --- /dev/null +++ b/cli/cli/command/image/build_test.go @@ -0,0 +1,216 @@ +package image + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + "gotest.tools/fs" + "gotest.tools/skip" +) + +func TestRunBuildDockerfileFromStdinWithCompress(t *testing.T) { + buffer := new(bytes.Buffer) + fakeBuild := newFakeBuild() + fakeImageBuild := func(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + tee := io.TeeReader(context, buffer) + gzipReader, err := gzip.NewReader(tee) + assert.NilError(t, err) + return fakeBuild.build(ctx, gzipReader, options) + } + + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeImageBuild}) + dockerfile := bytes.NewBufferString(` + FROM alpine:3.6 + COPY foo / + `) + cli.SetIn(command.NewInStream(ioutil.NopCloser(dockerfile))) + + dir := fs.NewDir(t, t.Name(), + fs.WithFile("foo", "some content")) + defer dir.Remove() + + options := newBuildOptions() + options.compress = true + options.dockerfileName = "-" + options.context = dir.Path() + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + expected := []string{fakeBuild.options.Dockerfile, ".dockerignore", "foo"} + assert.DeepEqual(t, expected, fakeBuild.filenames(t)) + + header := buffer.Bytes()[:10] + assert.Equal(t, archive.Gzip, archive.DetectCompression(header)) +} + +func TestRunBuildResetsUidAndGidInContext(t *testing.T) { + skip.If(t, os.Getuid() != 0, "root is required to chown files") + fakeBuild := newFakeBuild() + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build}) + + dir := fs.NewDir(t, "test-build-context", + fs.WithFile("foo", "some content", fs.AsUser(65534, 65534)), + fs.WithFile("Dockerfile", ` + FROM alpine:3.6 + COPY foo bar / + `), + ) + defer dir.Remove() + + options := newBuildOptions() + options.context = dir.Path() + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + headers := fakeBuild.headers(t) + expected := []*tar.Header{ + {Name: "Dockerfile"}, + {Name: "foo"}, + } + var cmpTarHeaderNameAndOwner = cmp.Comparer(func(x, y tar.Header) bool { + return x.Name == y.Name && x.Uid == y.Uid && x.Gid == y.Gid + }) + assert.DeepEqual(t, expected, headers, cmpTarHeaderNameAndOwner) +} + +func TestRunBuildDockerfileOutsideContext(t *testing.T) { + dir := fs.NewDir(t, t.Name(), + fs.WithFile("data", "data file")) + defer dir.Remove() + + // Dockerfile outside of build-context + df := fs.NewFile(t, t.Name(), + fs.WithContent(` +FROM FOOBAR +COPY data /data + `), + ) + defer df.Remove() + + fakeBuild := newFakeBuild() + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build}) + + options := newBuildOptions() + options.context = dir.Path() + options.dockerfileName = df.Path() + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + expected := []string{fakeBuild.options.Dockerfile, ".dockerignore", "data"} + assert.DeepEqual(t, expected, fakeBuild.filenames(t)) +} + +// TestRunBuildFromLocalGitHubDirNonExistingRepo tests that build contexts +// starting with `github.com/` are special-cased, and the build command attempts +// to clone the remote repo. +// TODO: test "context selection" logic directly when runBuild is refactored +// to support testing (ex: docker/cli#294) +func TestRunBuildFromGitHubSpecialCase(t *testing.T) { + cmd := NewBuildCommand(test.NewFakeCli(nil)) + // Clone a small repo that exists so git doesn't prompt for credentials + cmd.SetArgs([]string{"github.com/docker/for-win"}) + cmd.SetOutput(ioutil.Discard) + err := cmd.Execute() + assert.ErrorContains(t, err, "unable to prepare context") + assert.ErrorContains(t, err, "docker-build-git") +} + +// TestRunBuildFromLocalGitHubDirNonExistingRepo tests that a local directory +// starting with `github.com` takes precedence over the `github.com` special +// case. +func TestRunBuildFromLocalGitHubDir(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-build-from-local-dir-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + buildDir := filepath.Join(tmpDir, "github.com", "docker", "no-such-repository") + err = os.MkdirAll(buildDir, 0777) + assert.NilError(t, err) + err = ioutil.WriteFile(filepath.Join(buildDir, "Dockerfile"), []byte("FROM busybox\n"), 0644) + assert.NilError(t, err) + + client := test.NewFakeCli(&fakeClient{}) + cmd := NewBuildCommand(client) + cmd.SetArgs([]string{buildDir}) + cmd.SetOutput(ioutil.Discard) + err = cmd.Execute() + assert.NilError(t, err) +} + +func TestRunBuildWithSymlinkedContext(t *testing.T) { + dockerfile := ` +FROM alpine:3.6 +RUN echo hello world +` + + tmpDir := fs.NewDir(t, t.Name(), + fs.WithDir("context", + fs.WithFile("Dockerfile", dockerfile)), + fs.WithSymlink("context-link", "context")) + defer tmpDir.Remove() + + fakeBuild := newFakeBuild() + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build}) + options := newBuildOptions() + options.context = tmpDir.Join("context-link") + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"}) +} + +type fakeBuild struct { + context *tar.Reader + options types.ImageBuildOptions +} + +func newFakeBuild() *fakeBuild { + return &fakeBuild{} +} + +func (f *fakeBuild) build(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + f.context = tar.NewReader(context) + f.options = options + body := new(bytes.Buffer) + return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil +} + +func (f *fakeBuild) headers(t *testing.T) []*tar.Header { + t.Helper() + headers := []*tar.Header{} + for { + hdr, err := f.context.Next() + switch err { + case io.EOF: + return headers + case nil: + headers = append(headers, hdr) + default: + assert.NilError(t, err) + } + } +} + +func (f *fakeBuild) filenames(t *testing.T) []string { + t.Helper() + names := []string{} + for _, header := range f.headers(t) { + names = append(names, header.Name) + } + sort.Strings(names) + return names +} diff --git a/cli/cli/command/image/client_test.go b/cli/cli/command/image/client_test.go new file mode 100644 index 00000000..50e46f4e --- /dev/null +++ b/cli/cli/command/image/client_test.go @@ -0,0 +1,124 @@ +package image + +import ( + "context" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + imageTagFunc func(string, string) error + imageSaveFunc func(images []string) (io.ReadCloser, error) + imageRemoveFunc func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + imagePushFunc func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) + infoFunc func() (types.Info, error) + imagePullFunc func(ref string, options types.ImagePullOptions) (io.ReadCloser, error) + imagesPruneFunc func(pruneFilter filters.Args) (types.ImagesPruneReport, error) + imageLoadFunc func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) + imageListFunc func(options types.ImageListOptions) ([]types.ImageSummary, error) + imageInspectFunc func(image string) (types.ImageInspect, []byte, error) + imageImportFunc func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + imageHistoryFunc func(image string) ([]image.HistoryResponseItem, error) + imageBuildFunc func(context.Context, io.Reader, types.ImageBuildOptions) (types.ImageBuildResponse, error) +} + +func (cli *fakeClient) ImageTag(_ context.Context, image, ref string) error { + if cli.imageTagFunc != nil { + return cli.imageTagFunc(image, ref) + } + return nil +} + +func (cli *fakeClient) ImageSave(_ context.Context, images []string) (io.ReadCloser, error) { + if cli.imageSaveFunc != nil { + return cli.imageSaveFunc(images) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) ImageRemove(_ context.Context, image string, + options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + if cli.imageRemoveFunc != nil { + return cli.imageRemoveFunc(image, options) + } + return []types.ImageDeleteResponseItem{}, nil +} + +func (cli *fakeClient) ImagePush(_ context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + if cli.imagePushFunc != nil { + return cli.imagePushFunc(ref, options) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) Info(_ context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func (cli *fakeClient) ImagePull(_ context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + if cli.imagePullFunc != nil { + cli.imagePullFunc(ref, options) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) ImagesPrune(_ context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) { + if cli.imagesPruneFunc != nil { + return cli.imagesPruneFunc(pruneFilter) + } + return types.ImagesPruneReport{}, nil +} + +func (cli *fakeClient) ImageLoad(_ context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + if cli.imageLoadFunc != nil { + return cli.imageLoadFunc(input, quiet) + } + return types.ImageLoadResponse{}, nil +} + +func (cli *fakeClient) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + if cli.imageListFunc != nil { + return cli.imageListFunc(options) + } + return []types.ImageSummary{{}}, nil +} + +func (cli *fakeClient) ImageInspectWithRaw(_ context.Context, image string) (types.ImageInspect, []byte, error) { + if cli.imageInspectFunc != nil { + return cli.imageInspectFunc(image) + } + return types.ImageInspect{}, nil, nil +} + +func (cli *fakeClient) ImageImport(_ context.Context, source types.ImageImportSource, ref string, + options types.ImageImportOptions) (io.ReadCloser, error) { + if cli.imageImportFunc != nil { + return cli.imageImportFunc(source, ref, options) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) ImageHistory(_ context.Context, img string) ([]image.HistoryResponseItem, error) { + if cli.imageHistoryFunc != nil { + return cli.imageHistoryFunc(img) + } + return []image.HistoryResponseItem{{ID: img, Created: time.Now().Unix()}}, nil +} + +func (cli *fakeClient) ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + if cli.imageBuildFunc != nil { + return cli.imageBuildFunc(ctx, context, options) + } + return types.ImageBuildResponse{Body: ioutil.NopCloser(strings.NewReader(""))}, nil +} diff --git a/cli/cli/command/image/cmd.go b/cli/cli/command/image/cmd.go new file mode 100644 index 00000000..a12bf339 --- /dev/null +++ b/cli/cli/command/image/cmd.go @@ -0,0 +1,33 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +func NewImageCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/image/history.go b/cli/cli/command/image/history.go new file mode 100644 index 00000000..11acc93d --- /dev/null +++ b/cli/cli/command/image/history.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool + format string +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli command.Cli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runHistory(dockerCli command.Cli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + historyCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewHistoryFormat(format, opts.quiet, opts.human), + Trunc: !opts.noTrunc, + } + return formatter.HistoryWrite(historyCtx, opts.human, history) +} diff --git a/cli/cli/command/image/history_test.go b/cli/cli/command/image/history_test.go new file mode 100644 index 00000000..ad2beb9a --- /dev/null +++ b/cli/cli/command/image/history_test.go @@ -0,0 +1,105 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/image" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" + "gotest.tools/skip" +) + +func TestNewHistoryCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageHistoryFunc func(img string) ([]image.HistoryResponseItem, error) + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires exactly 1 argument.", + }, + { + name: "client-error", + args: []string{"image:tag"}, + expectedError: "something went wrong", + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{}}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewHistoryCommand(test.NewFakeCli(&fakeClient{imageHistoryFunc: tc.imageHistoryFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func notUTCTimezone() bool { + now := time.Now() + return now != now.UTC() +} + +func TestNewHistoryCommandSuccess(t *testing.T) { + skip.If(t, notUTCTimezone, "expected output requires UTC timezone") + testCases := []struct { + name string + args []string + imageHistoryFunc func(img string) ([]image.HistoryResponseItem, error) + }{ + { + name: "simple", + args: []string{"image:tag"}, + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{ + ID: "1234567890123456789", + Created: time.Now().Unix(), + }}, nil + }, + }, + { + name: "quiet", + args: []string{"--quiet", "image:tag"}, + }, + { + name: "non-human", + args: []string{"--human=false", "image:tag"}, + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{ + ID: "abcdef", + Created: time.Date(2017, 1, 1, 12, 0, 3, 0, time.UTC).Unix(), + CreatedBy: "rose", + Comment: "new history item!", + }}, nil + }, + }, + { + name: "quiet-no-trunc", + args: []string{"--quiet", "--no-trunc", "image:tag"}, + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{ + ID: "1234567890123456789", + Created: time.Now().Unix(), + }}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageHistoryFunc: tc.imageHistoryFunc}) + cmd := NewHistoryCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + actual := cli.OutBuffer().String() + golden.Assert(t, actual, fmt.Sprintf("history-command-success.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/image/import.go b/cli/cli/command/image/import.go new file mode 100644 index 00000000..cfa6a87b --- /dev/null +++ b/cli/cli/command/image/import.go @@ -0,0 +1,87 @@ +package image + +import ( + "context" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + dockeropts "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli command.Cli) *cobra.Command { + var options importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.source = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runImport(dockerCli, options) + }, + } + + flags := cmd.Flags() + + options.changes = dockeropts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image") + + return cmd +} + +func runImport(dockerCli command.Cli, options importOptions) error { + var ( + in io.Reader + srcName = options.source + ) + + if options.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(options.source) { + srcName = "-" + file, err := os.Open(options.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + importOptions := types.ImageImportOptions{ + Message: options.message, + Changes: options.changes.GetAll(), + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/cli/cli/command/image/import_test.go b/cli/cli/command/image/import_test.go new file mode 100644 index 00000000..9e2fad61 --- /dev/null +++ b/cli/cli/command/image/import_test.go @@ -0,0 +1,97 @@ +package image + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNewImportCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageImportFunc func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + name: "import-failed", + args: []string{"testdata/import-command-success.input.txt"}, + expectedError: "something went wrong", + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + return nil, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewImportCommand(test.NewFakeCli(&fakeClient{imageImportFunc: tc.imageImportFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewImportCommandInvalidFile(t *testing.T) { + cmd := NewImportCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"testdata/import-command-success.unexistent-file"}) + assert.ErrorContains(t, cmd.Execute(), "testdata/import-command-success.unexistent-file") +} + +func TestNewImportCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageImportFunc func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + }{ + { + name: "simple", + args: []string{"testdata/import-command-success.input.txt"}, + }, + { + name: "terminal-source", + args: []string{"-"}, + }, + { + name: "double", + args: []string{"-", "image:local"}, + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal("image:local", ref)) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + { + name: "message", + args: []string{"--message", "test message", "-"}, + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal("test message", options.Message)) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + { + name: "change", + args: []string{"--change", "ENV DEBUG true", "-"}, + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal("ENV DEBUG true", options.Changes[0])) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + } + for _, tc := range testCases { + cmd := NewImportCommand(test.NewFakeCli(&fakeClient{imageImportFunc: tc.imageImportFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + } +} diff --git a/cli/cli/command/image/inspect.go b/cli/cli/command/image/inspect.go new file mode 100644 index 00000000..2044fcaf --- /dev/null +++ b/cli/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/cli/cli/command/image/inspect_test.go b/cli/cli/command/image/inspect_test.go new file mode 100644 index 00000000..d881ae0a --- /dev/null +++ b/cli/cli/command/image/inspect_test.go @@ -0,0 +1,88 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewInspectCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewInspectCommandSuccess(t *testing.T) { + imageInspectInvocationCount := 0 + testCases := []struct { + name string + args []string + imageCount int + imageInspectFunc func(image string) (types.ImageInspect, []byte, error) + }{ + { + name: "simple", + args: []string{"image"}, + imageCount: 1, + imageInspectFunc: func(image string) (types.ImageInspect, []byte, error) { + imageInspectInvocationCount++ + assert.Check(t, is.Equal("image", image)) + return types.ImageInspect{}, nil, nil + }, + }, + { + name: "format", + imageCount: 1, + args: []string{"--format='{{.ID}}'", "image"}, + imageInspectFunc: func(image string) (types.ImageInspect, []byte, error) { + imageInspectInvocationCount++ + return types.ImageInspect{ID: image}, nil, nil + }, + }, + { + name: "simple-many", + args: []string{"image1", "image2"}, + imageCount: 2, + imageInspectFunc: func(image string) (types.ImageInspect, []byte, error) { + imageInspectInvocationCount++ + if imageInspectInvocationCount == 1 { + assert.Check(t, is.Equal("image1", image)) + } else { + assert.Check(t, is.Equal("image2", image)) + } + return types.ImageInspect{}, nil, nil + }, + }, + } + for _, tc := range testCases { + imageInspectInvocationCount = 0 + cli := test.NewFakeCli(&fakeClient{imageInspectFunc: tc.imageInspectFunc}) + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("inspect-command-success.%s.golden", tc.name)) + assert.Check(t, is.Equal(imageInspectInvocationCount, tc.imageCount)) + } +} diff --git a/cli/cli/command/image/list.go b/cli/cli/command/image/list.go new file mode 100644 index 00000000..2dd9786e --- /dev/null +++ b/cli/cli/command/image/list.go @@ -0,0 +1,96 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli command.Cli) *cobra.Command { + options := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + options.matchName = args[0] + } + return runImages(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&options.showDigests, "digests", false, "Show digests") + flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli command.Cli, options imagesOptions) error { + ctx := context.Background() + + filters := options.filter.Value() + if options.matchName != "" { + filters.Add("reference", options.matchName) + } + + listOptions := types.ImageListOptions{ + All: options.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, options.quiet, options.showDigests), + Trunc: !options.noTrunc, + }, + Digest: options.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/cli/cli/command/image/list_test.go b/cli/cli/command/image/list_test.go new file mode 100644 index 00000000..81394a79 --- /dev/null +++ b/cli/cli/command/image/list_test.go @@ -0,0 +1,98 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewImagesCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageListFunc func(options types.ImageListOptions) ([]types.ImageSummary, error) + }{ + { + name: "wrong-args", + args: []string{"arg1", "arg2"}, + expectedError: "requires at most 1 argument.", + }, + { + name: "failed-list", + expectedError: "something went wrong", + imageListFunc: func(options types.ImageListOptions) ([]types.ImageSummary, error) { + return []types.ImageSummary{{}}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewImagesCommand(test.NewFakeCli(&fakeClient{imageListFunc: tc.imageListFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewImagesCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageFormat string + imageListFunc func(options types.ImageListOptions) ([]types.ImageSummary, error) + }{ + { + name: "simple", + }, + { + name: "format", + imageFormat: "raw", + }, + { + name: "quiet-format", + args: []string{"-q"}, + imageFormat: "table", + }, + { + name: "match-name", + args: []string{"image"}, + imageListFunc: func(options types.ImageListOptions) ([]types.ImageSummary, error) { + assert.Check(t, is.Equal("image", options.Filters.Get("reference")[0])) + return []types.ImageSummary{{}}, nil + }, + }, + { + name: "filters", + args: []string{"--filter", "name=value"}, + imageListFunc: func(options types.ImageListOptions) ([]types.ImageSummary, error) { + assert.Check(t, is.Equal("value", options.Filters.Get("name")[0])) + return []types.ImageSummary{{}}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageListFunc: tc.imageListFunc}) + cli.SetConfigFile(&configfile.ConfigFile{ImagesFormat: tc.imageFormat}) + cmd := NewImagesCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("list-command-success.%s.golden", tc.name)) + } +} + +func TestNewListCommandAlias(t *testing.T) { + cmd := newListCommand(test.NewFakeCli(&fakeClient{})) + assert.Check(t, cmd.HasAlias("images")) + assert.Check(t, cmd.HasAlias("list")) + assert.Check(t, !cmd.HasAlias("other")) +} diff --git a/cli/cli/command/image/load.go b/cli/cli/command/image/load.go new file mode 100644 index 00000000..6809c620 --- /dev/null +++ b/cli/cli/command/image/load.go @@ -0,0 +1,76 @@ +package image + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli command.Cli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli command.Cli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return errors.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/cli/cli/command/image/load_test.go b/cli/cli/command/image/load_test.go new file mode 100644 index 00000000..5fe4344f --- /dev/null +++ b/cli/cli/command/image/load_test.go @@ -0,0 +1,101 @@ +package image + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestNewLoadCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + isTerminalIn bool + expectedError string + imageLoadFunc func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) + }{ + { + name: "wrong-args", + args: []string{"arg"}, + expectedError: "accepts no arguments.", + }, + { + name: "input-to-terminal", + isTerminalIn: true, + expectedError: "requested load from stdin, but stdin is empty", + }, + { + name: "pull-error", + expectedError: "something went wrong", + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + return types.ImageLoadResponse{}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageLoadFunc: tc.imageLoadFunc}) + cli.In().SetIsTerminal(tc.isTerminalIn) + cmd := NewLoadCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewLoadCommandInvalidInput(t *testing.T) { + expectedError := "open *" + cmd := NewLoadCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"--input", "*"}) + err := cmd.Execute() + assert.ErrorContains(t, err, expectedError) +} + +func TestNewLoadCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageLoadFunc func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) + }{ + { + name: "simple", + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + return types.ImageLoadResponse{Body: ioutil.NopCloser(strings.NewReader("Success"))}, nil + }, + }, + { + name: "json", + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + json := "{\"ID\": \"1\"}" + return types.ImageLoadResponse{ + Body: ioutil.NopCloser(strings.NewReader(json)), + JSON: true, + }, nil + }, + }, + { + name: "input-file", + args: []string{"--input", "testdata/load-command-success.input.txt"}, + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + return types.ImageLoadResponse{Body: ioutil.NopCloser(strings.NewReader("Success"))}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageLoadFunc: tc.imageLoadFunc}) + cmd := NewLoadCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("load-command-success.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/image/prune.go b/cli/cli/command/image/prune.go new file mode 100644 index 00000000..ada47df3 --- /dev/null +++ b/cli/cli/command/image/prune.go @@ -0,0 +1,94 @@ +package image + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := options.filter.Value() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all)) + pruneFilters = command.PruneFilters(dockerCli, pruneFilters) + + warning := danglingWarning + if options.all { + warning = allImageWarning + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return 0, "", nil + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return 0, "", err + } + + if len(report.ImagesDeleted) > 0 { + output = "Deleted Images:\n" + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + output += fmt.Sprintln("untagged:", st.Untagged) + } else { + output += fmt.Sprintln("deleted:", st.Deleted) + } + } + spaceReclaimed = report.SpaceReclaimed + } + + return spaceReclaimed, output, nil +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) +} diff --git a/cli/cli/command/image/prune_test.go b/cli/cli/command/image/prune_test.go new file mode 100644 index 00000000..ca46bbf6 --- /dev/null +++ b/cli/cli/command/image/prune_test.go @@ -0,0 +1,94 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewPruneCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imagesPruneFunc func(pruneFilter filters.Args) (types.ImagesPruneReport, error) + }{ + { + name: "wrong-args", + args: []string{"something"}, + expectedError: "accepts no arguments.", + }, + { + name: "prune-error", + args: []string{"--force"}, + expectedError: "something went wrong", + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + return types.ImagesPruneReport{}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewPruneCommand(test.NewFakeCli(&fakeClient{ + imagesPruneFunc: tc.imagesPruneFunc, + })) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewPruneCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imagesPruneFunc func(pruneFilter filters.Args) (types.ImagesPruneReport, error) + }{ + { + name: "all", + args: []string{"--all"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("false", pruneFilter.Get("dangling")[0])) + return types.ImagesPruneReport{}, nil + }, + }, + { + name: "force-deleted", + args: []string{"--force"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("true", pruneFilter.Get("dangling")[0])) + return types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{{Deleted: "image1"}}, + SpaceReclaimed: 1, + }, nil + }, + }, + { + name: "force-untagged", + args: []string{"--force"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("true", pruneFilter.Get("dangling")[0])) + return types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{{Untagged: "image1"}}, + SpaceReclaimed: 2, + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imagesPruneFunc: tc.imagesPruneFunc}) + cmd := NewPruneCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("prune-command-success.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/image/pull.go b/cli/cli/command/image/pull.go new file mode 100644 index 00000000..9ac382c3 --- /dev/null +++ b/cli/cli/command/image/pull.go @@ -0,0 +1,83 @@ +package image + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// PullOptions defines what and how to pull +type PullOptions struct { + remote string + all bool + platform string + untrusted bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli command.Cli) *cobra.Command { + var opts PullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return RunPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + + command.AddPlatformFlag(flags, &opts.platform) + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + + return cmd +} + +// RunPull performs a pull against the engine based on the specified options +func RunPull(cli command.Cli, opts PullOptions) error { + distributionRef, err := reference.ParseNormalizedNamed(opts.remote) + switch { + case err != nil: + return err + case opts.all && !reference.IsNameOnly(distributionRef): + return errors.New("tag can't be used with --all-tags/-a") + case !opts.all && reference.IsNameOnly(distributionRef): + distributionRef = reference.TagNameOnly(distributionRef) + if tagged, ok := distributionRef.(reference.Tagged); ok { + fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag()) + } + } + + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, AuthResolver(cli), distributionRef.String()) + if err != nil { + return err + } + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if !opts.untrusted && !isCanonical { + err = trustedPull(ctx, cli, imgRefAndAuth, opts.platform) + } else { + err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts.all, opts.platform) + } + if err != nil { + if strings.Contains(err.Error(), "when fetching 'plugin'") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + return nil +} diff --git a/cli/cli/command/image/pull_test.go b/cli/cli/command/image/pull_test.go new file mode 100644 index 00000000..c5ae7560 --- /dev/null +++ b/cli/cli/command/image/pull_test.go @@ -0,0 +1,121 @@ +package image + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewPullCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "wrong-args", + expectedError: "requires exactly 1 argument.", + args: []string{}, + }, + { + name: "invalid-name", + expectedError: "invalid reference format: repository name must be lowercase", + args: []string{"UPPERCASE_REPO"}, + }, + { + name: "all-tags-with-tag", + expectedError: "tag can't be used with --all-tags/-a", + args: []string{"--all-tags", "image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := NewPullCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewPullCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + expectedTag string + }{ + { + name: "simple", + args: []string{"image:tag"}, + expectedTag: "image:tag", + }, + { + name: "simple-no-tag", + args: []string{"image"}, + expectedTag: "image:latest", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + imagePullFunc: func(ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal(tc.expectedTag, ref), tc.name) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }) + cmd := NewPullCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("pull-command-success.%s.golden", tc.name)) + } +} + +func TestNewPullCommandWithContentTrustErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + name: "offline-notary-server", + notaryFunc: notary.GetOfflineNotaryRepository, + expectedError: "client is offline", + args: []string{"image:tag"}, + }, + { + name: "uninitialized-notary-server", + notaryFunc: notary.GetUninitializedNotaryRepository, + expectedError: "remote trust data does not exist", + args: []string{"image:tag"}, + }, + { + name: "empty-notary-server", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + expectedError: "No valid trust data for tag", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + imagePullFunc: func(ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), fmt.Errorf("shouldn't try to pull image") + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := NewPullCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.ErrorContains(t, err, tc.expectedError) + } +} diff --git a/cli/cli/command/image/push.go b/cli/cli/command/image/push.go new file mode 100644 index 00000000..de6c2ec3 --- /dev/null +++ b/cli/cli/command/image/push.go @@ -0,0 +1,70 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type pushOptions struct { + remote string + untrusted bool +} + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli command.Cli) *cobra.Command { + var opts pushOptions + + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return RunPush(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + command.AddTrustSigningFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + + return cmd +} + +// RunPush performs a push against the engine based on the specified options +func RunPush(dockerCli command.Cli, opts pushOptions) error { + ref, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if !opts.untrusted { + return TrustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/cli/cli/command/image/push_test.go b/cli/cli/command/image/push_test.go new file mode 100644 index 00000000..75798aaa --- /dev/null +++ b/cli/cli/command/image/push_test.go @@ -0,0 +1,71 @@ +package image + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNewPushCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imagePushFunc func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires exactly 1 argument.", + }, + { + name: "invalid-name", + args: []string{"UPPERCASE_REPO"}, + expectedError: "invalid reference format: repository name must be lowercase", + }, + { + name: "push-failed", + args: []string{"image:repo"}, + expectedError: "Failed to push", + imagePushFunc: func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), errors.Errorf("Failed to push") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imagePushFunc: tc.imagePushFunc}) + cmd := NewPushCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewPushCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + }{ + { + name: "simple", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + imagePushFunc: func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }) + cmd := NewPushCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + } +} diff --git a/cli/cli/command/image/remove.go b/cli/cli/command/image/remove.go new file mode 100644 index 00000000..a4c72e44 --- /dev/null +++ b/cli/cli/command/image/remove.go @@ -0,0 +1,86 @@ +package image + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + var fatalErr = false + for _, img := range images { + dels, err := client.ImageRemove(ctx, img, options) + if err != nil { + if !apiclient.IsErrNotFound(err) { + fatalErr = true + } + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + msg := strings.Join(errs, "\n") + if !opts.force || fatalErr { + return errors.New(msg) + } + fmt.Fprintln(dockerCli.Err(), msg) + } + return nil +} diff --git a/cli/cli/command/image/remove_test.go b/cli/cli/command/image/remove_test.go new file mode 100644 index 00000000..6db2e031 --- /dev/null +++ b/cli/cli/command/image/remove_test.go @@ -0,0 +1,134 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +type notFound struct { + imageID string +} + +func (n notFound) Error() string { + return fmt.Sprintf("Error: No such image: %s", n.imageID) +} + +func (n notFound) NotFound() bool { + return true +} + +func TestNewRemoveCommandAlias(t *testing.T) { + cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{})) + assert.Check(t, cmd.HasAlias("rmi")) + assert.Check(t, cmd.HasAlias("remove")) + assert.Check(t, !cmd.HasAlias("other")) +} + +func TestNewRemoveCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageRemoveFunc func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + }{ + { + name: "wrong args", + expectedError: "requires at least 1 argument.", + }, + { + name: "ImageRemove fail with force option", + args: []string{"-f", "image1"}, + expectedError: "error removing image", + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + return []types.ImageDeleteResponseItem{}, errors.Errorf("error removing image") + }, + }, + { + name: "ImageRemove fail", + args: []string{"arg1"}, + expectedError: "error removing image", + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, !options.Force) + assert.Check(t, options.PruneChildren) + return []types.ImageDeleteResponseItem{}, errors.Errorf("error removing image") + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := NewRemoveCommand(test.NewFakeCli(&fakeClient{ + imageRemoveFunc: tc.imageRemoveFunc, + })) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + }) + } +} + +func TestNewRemoveCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageRemoveFunc func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + expectedStderr string + }{ + { + name: "Image Deleted", + args: []string{"image1"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + return []types.ImageDeleteResponseItem{{Deleted: image}}, nil + }, + }, + { + name: "Image not found with force option", + args: []string{"-f", "image1"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + assert.Check(t, is.Equal(true, options.Force)) + return []types.ImageDeleteResponseItem{}, notFound{"image1"} + }, + expectedStderr: "Error: No such image: image1\n", + }, + + { + name: "Image Untagged", + args: []string{"image1"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + return []types.ImageDeleteResponseItem{{Untagged: image}}, nil + }, + }, + { + name: "Image Deleted and Untagged", + args: []string{"image1", "image2"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + if image == "image1" { + return []types.ImageDeleteResponseItem{{Untagged: image}}, nil + } + return []types.ImageDeleteResponseItem{{Deleted: image}}, nil + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{imageRemoveFunc: tc.imageRemoveFunc}) + cmd := NewRemoveCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(tc.expectedStderr, cli.ErrBuffer().String())) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("remove-command-success.%s.golden", tc.name)) + }) + } +} diff --git a/cli/cli/command/image/save.go b/cli/cli/command/image/save.go new file mode 100644 index 00000000..ef23ca1b --- /dev/null +++ b/cli/cli/command/image/save.go @@ -0,0 +1,73 @@ +package image + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli command.Cli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return RunSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +// RunSave performs a save against the engine based on the specified options +func RunSave(dockerCli command.Cli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + if err := validateOutputPath(opts.output); err != nil { + return errors.Wrap(err, "failed to save image") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} + +func validateOutputPath(path string) error { + dir := filepath.Dir(path) + if dir != "" && dir != "." { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return errors.Errorf("unable to validate output path: directory %q does not exist", dir) + } + } + return nil +} diff --git a/cli/cli/command/image/save_test.go b/cli/cli/command/image/save_test.go new file mode 100644 index 00000000..d051e8cb --- /dev/null +++ b/cli/cli/command/image/save_test.go @@ -0,0 +1,103 @@ +package image + +import ( + "io" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNewSaveCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + isTerminal bool + expectedError string + imageSaveFunc func(images []string) (io.ReadCloser, error) + }{ + { + name: "wrong args", + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + name: "output to terminal", + args: []string{"output", "file", "arg1"}, + isTerminal: true, + expectedError: "cowardly refusing to save to a terminal. Use the -o flag or redirect", + }, + { + name: "ImageSave fail", + args: []string{"arg1"}, + isTerminal: false, + expectedError: "error saving image", + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), errors.Errorf("error saving image") + }, + }, + { + name: "output directory does not exist", + args: []string{"-o", "fakedir/out.tar", "arg1"}, + expectedError: "failed to save image: unable to validate output path: directory \"fakedir\" does not exist", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageSaveFunc: tc.imageSaveFunc}) + cli.Out().SetIsTerminal(tc.isTerminal) + cmd := NewSaveCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewSaveCommandSuccess(t *testing.T) { + testCases := []struct { + args []string + isTerminal bool + imageSaveFunc func(images []string) (io.ReadCloser, error) + deferredFunc func() + }{ + { + args: []string{"-o", "save_tmp_file", "arg1"}, + isTerminal: true, + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + assert.Assert(t, is.Len(images, 1)) + assert.Check(t, is.Equal("arg1", images[0])) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + deferredFunc: func() { + os.Remove("save_tmp_file") + }, + }, + { + args: []string{"arg1", "arg2"}, + isTerminal: false, + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + assert.Assert(t, is.Len(images, 2)) + assert.Check(t, is.Equal("arg1", images[0])) + assert.Check(t, is.Equal("arg2", images[1])) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + } + for _, tc := range testCases { + cmd := NewSaveCommand(test.NewFakeCli(&fakeClient{ + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + })) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + if tc.deferredFunc != nil { + tc.deferredFunc() + } + } +} diff --git a/cli/cli/command/image/tag.go b/cli/cli/command/image/tag.go new file mode 100644 index 00000000..39d4caaf --- /dev/null +++ b/cli/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli command.Cli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli command.Cli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/cli/cli/command/image/tag_test.go b/cli/cli/command/image/tag_test.go new file mode 100644 index 00000000..9c43f3fe --- /dev/null +++ b/cli/cli/command/image/tag_test.go @@ -0,0 +1,41 @@ +package image + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCliNewTagCommandErrors(t *testing.T) { + testCases := [][]string{ + {}, + {"image1"}, + {"image1", "image2", "image3"}, + } + expectedError := "\"tag\" requires exactly 2 arguments." + for _, args := range testCases { + cmd := NewTagCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), expectedError) + } +} + +func TestCliNewTagCommand(t *testing.T) { + cmd := NewTagCommand( + test.NewFakeCli(&fakeClient{ + imageTagFunc: func(image string, ref string) error { + assert.Check(t, is.Equal("image1", image)) + assert.Check(t, is.Equal("image2", ref)) + return nil + }, + })) + cmd.SetArgs([]string{"image1", "image2"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + value, _ := cmd.Flags().GetBool("interspersed") + assert.Check(t, !value) +} diff --git a/cli/cli/command/image/testdata/history-command-success.non-human.golden b/cli/cli/command/image/testdata/history-command-success.non-human.golden new file mode 100644 index 00000000..4a83a3d8 --- /dev/null +++ b/cli/cli/command/image/testdata/history-command-success.non-human.golden @@ -0,0 +1,2 @@ +IMAGE CREATED AT CREATED BY SIZE COMMENT +abcdef 2017-01-01T12:00:03Z rose 0 new history item! diff --git a/cli/cli/command/image/testdata/history-command-success.quiet-no-trunc.golden b/cli/cli/command/image/testdata/history-command-success.quiet-no-trunc.golden new file mode 100644 index 00000000..65103f63 --- /dev/null +++ b/cli/cli/command/image/testdata/history-command-success.quiet-no-trunc.golden @@ -0,0 +1 @@ +1234567890123456789 diff --git a/cli/cli/command/image/testdata/history-command-success.quiet.golden b/cli/cli/command/image/testdata/history-command-success.quiet.golden new file mode 100644 index 00000000..42c7c82c --- /dev/null +++ b/cli/cli/command/image/testdata/history-command-success.quiet.golden @@ -0,0 +1 @@ +tag diff --git a/cli/cli/command/image/testdata/history-command-success.simple.golden b/cli/cli/command/image/testdata/history-command-success.simple.golden new file mode 100644 index 00000000..8aa59052 --- /dev/null +++ b/cli/cli/command/image/testdata/history-command-success.simple.golden @@ -0,0 +1,2 @@ +IMAGE CREATED CREATED BY SIZE COMMENT +123456789012 Less than a second ago 0B diff --git a/cli/cli/command/image/testdata/import-command-success.input.txt b/cli/cli/command/image/testdata/import-command-success.input.txt new file mode 100644 index 00000000..7ab5949b --- /dev/null +++ b/cli/cli/command/image/testdata/import-command-success.input.txt @@ -0,0 +1 @@ +file input test \ No newline at end of file diff --git a/cli/cli/command/image/testdata/inspect-command-success.format.golden b/cli/cli/command/image/testdata/inspect-command-success.format.golden new file mode 100644 index 00000000..f934996b --- /dev/null +++ b/cli/cli/command/image/testdata/inspect-command-success.format.golden @@ -0,0 +1 @@ +'image' diff --git a/cli/cli/command/image/testdata/inspect-command-success.simple-many.golden b/cli/cli/command/image/testdata/inspect-command-success.simple-many.golden new file mode 100644 index 00000000..f72c96f7 --- /dev/null +++ b/cli/cli/command/image/testdata/inspect-command-success.simple-many.golden @@ -0,0 +1,56 @@ +[ + { + "Id": "", + "RepoTags": null, + "RepoDigests": null, + "Parent": "", + "Comment": "", + "Created": "", + "Container": "", + "ContainerConfig": null, + "DockerVersion": "", + "Author": "", + "Config": null, + "Architecture": "", + "Os": "", + "Size": 0, + "VirtualSize": 0, + "GraphDriver": { + "Data": null, + "Name": "" + }, + "RootFS": { + "Type": "" + }, + "Metadata": { + "LastTagTime": "0001-01-01T00:00:00Z" + } + }, + { + "Id": "", + "RepoTags": null, + "RepoDigests": null, + "Parent": "", + "Comment": "", + "Created": "", + "Container": "", + "ContainerConfig": null, + "DockerVersion": "", + "Author": "", + "Config": null, + "Architecture": "", + "Os": "", + "Size": 0, + "VirtualSize": 0, + "GraphDriver": { + "Data": null, + "Name": "" + }, + "RootFS": { + "Type": "" + }, + "Metadata": { + "LastTagTime": "0001-01-01T00:00:00Z" + } + } +] diff --git a/cli/cli/command/image/testdata/inspect-command-success.simple.golden b/cli/cli/command/image/testdata/inspect-command-success.simple.golden new file mode 100644 index 00000000..878463ff --- /dev/null +++ b/cli/cli/command/image/testdata/inspect-command-success.simple.golden @@ -0,0 +1,29 @@ +[ + { + "Id": "", + "RepoTags": null, + "RepoDigests": null, + "Parent": "", + "Comment": "", + "Created": "", + "Container": "", + "ContainerConfig": null, + "DockerVersion": "", + "Author": "", + "Config": null, + "Architecture": "", + "Os": "", + "Size": 0, + "VirtualSize": 0, + "GraphDriver": { + "Data": null, + "Name": "" + }, + "RootFS": { + "Type": "" + }, + "Metadata": { + "LastTagTime": "0001-01-01T00:00:00Z" + } + } +] diff --git a/cli/cli/command/image/testdata/list-command-success.filters.golden b/cli/cli/command/image/testdata/list-command-success.filters.golden new file mode 100644 index 00000000..e3b8109b --- /dev/null +++ b/cli/cli/command/image/testdata/list-command-success.filters.golden @@ -0,0 +1 @@ +REPOSITORY TAG IMAGE ID CREATED SIZE diff --git a/cli/cli/command/image/testdata/list-command-success.format.golden b/cli/cli/command/image/testdata/list-command-success.format.golden new file mode 100644 index 00000000..e69de29b diff --git a/cli/cli/command/image/testdata/list-command-success.match-name.golden b/cli/cli/command/image/testdata/list-command-success.match-name.golden new file mode 100644 index 00000000..e3b8109b --- /dev/null +++ b/cli/cli/command/image/testdata/list-command-success.match-name.golden @@ -0,0 +1 @@ +REPOSITORY TAG IMAGE ID CREATED SIZE diff --git a/cli/cli/command/image/testdata/list-command-success.quiet-format.golden b/cli/cli/command/image/testdata/list-command-success.quiet-format.golden new file mode 100644 index 00000000..e69de29b diff --git a/cli/cli/command/image/testdata/list-command-success.simple.golden b/cli/cli/command/image/testdata/list-command-success.simple.golden new file mode 100644 index 00000000..e3b8109b --- /dev/null +++ b/cli/cli/command/image/testdata/list-command-success.simple.golden @@ -0,0 +1 @@ +REPOSITORY TAG IMAGE ID CREATED SIZE diff --git a/cli/cli/command/image/testdata/load-command-success.input-file.golden b/cli/cli/command/image/testdata/load-command-success.input-file.golden new file mode 100644 index 00000000..51da4200 --- /dev/null +++ b/cli/cli/command/image/testdata/load-command-success.input-file.golden @@ -0,0 +1 @@ +Success \ No newline at end of file diff --git a/cli/cli/command/image/testdata/load-command-success.input.txt b/cli/cli/command/image/testdata/load-command-success.input.txt new file mode 100644 index 00000000..7ab5949b --- /dev/null +++ b/cli/cli/command/image/testdata/load-command-success.input.txt @@ -0,0 +1 @@ +file input test \ No newline at end of file diff --git a/cli/cli/command/image/testdata/load-command-success.json.golden b/cli/cli/command/image/testdata/load-command-success.json.golden new file mode 100644 index 00000000..c17f16ec --- /dev/null +++ b/cli/cli/command/image/testdata/load-command-success.json.golden @@ -0,0 +1 @@ +1: diff --git a/cli/cli/command/image/testdata/load-command-success.simple.golden b/cli/cli/command/image/testdata/load-command-success.simple.golden new file mode 100644 index 00000000..51da4200 --- /dev/null +++ b/cli/cli/command/image/testdata/load-command-success.simple.golden @@ -0,0 +1 @@ +Success \ No newline at end of file diff --git a/cli/cli/command/image/testdata/prune-command-success.all.golden b/cli/cli/command/image/testdata/prune-command-success.all.golden new file mode 100644 index 00000000..4d144528 --- /dev/null +++ b/cli/cli/command/image/testdata/prune-command-success.all.golden @@ -0,0 +1,2 @@ +WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue? [y/N] Total reclaimed space: 0B diff --git a/cli/cli/command/image/testdata/prune-command-success.force-deleted.golden b/cli/cli/command/image/testdata/prune-command-success.force-deleted.golden new file mode 100644 index 00000000..1b6efd4a --- /dev/null +++ b/cli/cli/command/image/testdata/prune-command-success.force-deleted.golden @@ -0,0 +1,4 @@ +Deleted Images: +deleted: image1 + +Total reclaimed space: 1B diff --git a/cli/cli/command/image/testdata/prune-command-success.force-untagged.golden b/cli/cli/command/image/testdata/prune-command-success.force-untagged.golden new file mode 100644 index 00000000..725468fe --- /dev/null +++ b/cli/cli/command/image/testdata/prune-command-success.force-untagged.golden @@ -0,0 +1,4 @@ +Deleted Images: +untagged: image1 + +Total reclaimed space: 2B diff --git a/cli/cli/command/image/testdata/pull-command-success.simple-no-tag.golden b/cli/cli/command/image/testdata/pull-command-success.simple-no-tag.golden new file mode 100644 index 00000000..946de409 --- /dev/null +++ b/cli/cli/command/image/testdata/pull-command-success.simple-no-tag.golden @@ -0,0 +1 @@ +Using default tag: latest diff --git a/cli/cli/command/image/testdata/pull-command-success.simple.golden b/cli/cli/command/image/testdata/pull-command-success.simple.golden new file mode 100644 index 00000000..e69de29b diff --git a/cli/cli/command/image/testdata/remove-command-success.Image Deleted and Untagged.golden b/cli/cli/command/image/testdata/remove-command-success.Image Deleted and Untagged.golden new file mode 100644 index 00000000..94db0844 --- /dev/null +++ b/cli/cli/command/image/testdata/remove-command-success.Image Deleted and Untagged.golden @@ -0,0 +1,2 @@ +Untagged: image1 +Deleted: image2 diff --git a/cli/cli/command/image/testdata/remove-command-success.Image Deleted.golden b/cli/cli/command/image/testdata/remove-command-success.Image Deleted.golden new file mode 100644 index 00000000..445df11a --- /dev/null +++ b/cli/cli/command/image/testdata/remove-command-success.Image Deleted.golden @@ -0,0 +1 @@ +Deleted: image1 diff --git a/cli/cli/command/image/testdata/remove-command-success.Image Untagged.golden b/cli/cli/command/image/testdata/remove-command-success.Image Untagged.golden new file mode 100644 index 00000000..ebbb4075 --- /dev/null +++ b/cli/cli/command/image/testdata/remove-command-success.Image Untagged.golden @@ -0,0 +1 @@ +Untagged: image1 diff --git a/cli/cli/command/image/testdata/remove-command-success.Image not found with force option.golden b/cli/cli/command/image/testdata/remove-command-success.Image not found with force option.golden new file mode 100644 index 00000000..e69de29b diff --git a/cli/cli/command/image/trust.go b/cli/cli/command/image/trust.go new file mode 100644 index 00000000..230420d8 --- /dev/null +++ b/cli/cli/command/image/trust.go @@ -0,0 +1,352 @@ +package image + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// TrustedPush handles content trust pushing of an image +func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +// nolint: gocyclo +func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(msg jsonmessage.JSONMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called once. This will be treated as an error. + return + } + + var pushResult types.PushResult + err := json.Unmarshal(*msg.Aux, &pushResult) + if err == nil && pushResult.Tag != "" { + if dgst, err := digest.Parse(pushResult.Digest); err == nil { + h, err := hex.DecodeString(dgst.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(dgst.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil { + return err + } + fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return errors.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + return errors.Errorf("no targets found, please provide a specific tag in order to sign it") + } + + fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull") + if err != nil { + return errors.Wrap(err, "error establishing connection to trust repository") + } + + // get the latest repository metadata so we can figure out which roles to sign + _, err = repo.ListTargets() + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.GetCryptoService().ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.GetCryptoService().Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.Name.Name(), err) + } + fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = AddTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + err = errors.Wrapf(err, "failed to sign %s:%s", repoInfo.Name.Name(), tag) + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag) + return nil +} + +// AddTargetToAllSignableRoles attempts to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func AddTargetToAllSignableRoles(repo client.Repository, target *client.Target) error { + signableRoles, err := trust.GetSignableRoles(repo, target) + if err != nil { + return err + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, platform string) error { + refs, err := getTrustedPullTargets(cli, imgRefAndAuth) + if err != nil { + return err + } + + ref := imgRefAndAuth.Reference() + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest) + + trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest) + if err != nil { + return err + } + updatedImgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, AuthResolver(cli), trustedRef.String()) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, updatedImgRefAndAuth, false, platform); err != nil { + return err + } + + tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name) + if err != nil { + return err + } + + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +func getTrustedPullTargets(cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth) ([]target, error) { + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly) + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + ref := imgRefAndAuth.Reference() + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(ref.Name(), err) + } + var refs []target + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Err(), "Skipping target for %q\n", reference.FamiliarName(ref)) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) + } + return refs, nil + } + + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(ref.Name(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role", t.Role) + r, err := convertTarget(t.Target) + return []target{r}, err +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, all bool, platform string) error { + ref := reference.FamiliarString(imgRefAndAuth.Reference()) + + encodedAuth, err := command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig()) + if err != nil { + return err + } + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "pull") + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: all, + Platform: platform, + } + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, rs, AuthResolver(cli), ref.String()) + if err != nil { + return nil, err + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, []string{"pull"}) + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), client.ErrNoSuchTarget(ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +// nolint: interfacer +func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + // Use familiar references when interacting with client and output + familiarRef := reference.FamiliarString(ref) + trustedFamiliarRef := reference.FamiliarString(trustedRef) + + fmt.Fprintf(cli.Err(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef) + + return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef) +} + +// AuthResolver returns an auth resolver function from a command.Cli +func AuthResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return command.ResolveAuthConfig(ctx, cli, index) + } +} diff --git a/cli/cli/command/image/trust_test.go b/cli/cli/command/image/trust_test.go new file mode 100644 index 00000000..97585a72 --- /dev/null +++ b/cli/cli/command/image/trust_test.go @@ -0,0 +1,73 @@ +package image + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/trust" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "gotest.tools/assert" +) + +func unsetENV() { + os.Unsetenv("DOCKER_CONTENT_TRUST") + os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") +} + +func TestENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + output, err := trust.Server(indexInfo) + expectedStr := "https://notary-test.com:5000" + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestHTTPENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + _, err := trust.Server(indexInfo) + if err == nil { + t.Fatal("Expected error with invalid scheme") + } +} + +func TestOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} + output, err := trust.Server(indexInfo) + if err != nil || output != trust.NotaryServer { + t.Fatalf("Expected server to be %s, got %s", trust.NotaryServer, output) + } +} + +func TestNonOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} + output, err := trust.Server(indexInfo) + expectedStr := "https://" + indexInfo.Name + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestAddTargetToAllSignableRolesError(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever("password"), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + target := client.Target{} + err = AddTargetToAllSignableRoles(notaryRepo, &target) + assert.Error(t, err, "client is offline") +} diff --git a/cli/cli/command/in.go b/cli/cli/command/in.go new file mode 100644 index 00000000..54855c6d --- /dev/null +++ b/cli/cli/command/in.go @@ -0,0 +1,56 @@ +package command + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/docker/docker/pkg/term" +) + +// InStream is an input stream used by the DockerCli to read user input +type InStream struct { + CommonStream + in io.ReadCloser +} + +func (i *InStream) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *InStream) Close() error { + return i.in.Close() +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *InStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal { + return nil + } + i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd) + return err +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewInStream returns a new InStream object from a ReadCloser +func NewInStream(in io.ReadCloser) *InStream { + fd, isTerminal := term.GetFdInfo(in) + return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in} +} diff --git a/cli/cli/command/inspect/inspector.go b/cli/cli/command/inspect/inspector.go new file mode 100644 index 00000000..aef31e62 --- /dev/null +++ b/cli/cli/command/inspect/inspector.go @@ -0,0 +1,199 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "io" + "strings" + "text/template" + + "github.com/docker/cli/cli" + "github.com/docker/cli/templates" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// NewTemplateInspectorFromString creates a new TemplateInspector from a string +// which is compiled into a template. +func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { + if tmplStr == "" { + return NewIndentedInspector(out), nil + } + + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, errors.Errorf("Template parsing error: %s", err) + } + return NewTemplateInspector(out, tmpl), nil +} + +// GetRefFunc is a function which used by Inspect to fetch an object from a +// reference +type GetRefFunc func(ref string) (interface{}, []byte, error) + +// Inspect fetches objects by reference using GetRefFunc and writes the json +// representation to the output writer. +func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { + inspector, err := NewTemplateInspectorFromString(out, tmplStr) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErrs []string + for _, ref := range references { + element, raw, err := getRef(ref) + if err != nil { + inspectErrs = append(inspectErrs, err.Error()) + continue + } + + if err := inspector.Inspect(element, raw); err != nil { + inspectErrs = append(inspectErrs, err.Error()) + } + } + + if err := inspector.Flush(); err != nil { + logrus.Errorf("%s\n", err) + } + + if len(inspectErrs) != 0 { + return cli.StatusError{ + StatusCode: 1, + Status: strings.Join(inspectErrs, "\n"), + } + } + return nil +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return errors.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// tryRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + dec.UseNumber() + + if rawErr := dec.Decode(&raw); rawErr != nil { + return errors.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return errors.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/cli/cli/command/inspect/inspector_test.go b/cli/cli/command/inspect/inspector_test.go new file mode 100644 index 00000000..f4df3684 --- /dev/null +++ b/cli/cli/command/inspect/inspector_test.go @@ -0,0 +1,259 @@ +package inspect + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/cli/templates" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +type testElement struct { + DNS string `json:"Dns"` +} + +func TestTemplateInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "\n" { + t.Fatalf("Expected `\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorTemplateError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Foo}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + err = i.Inspect(testElement{"0.0.0.0"}, nil) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorRawFallback(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorRawFallbackError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n1.1.1.1\n" { + t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) + } +} + +func TestIndentedInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + }, + { + "Dns": "1.1.1.1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := "[]\n" + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorRawElements(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0", + "Node": "0" + }, + { + "Dns": "1.1.1.1", + "Node": "1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +// moby/moby#32235 +// This test verifies that even if `tryRawInspectFallback` is called the fields containing +// numerical values are displayed correctly. +// For example, `docker inspect --format "{{.Id}} {{.Size}} alpine` and +// `docker inspect --format "{{.ID}} {{.Size}} alpine" will have the same output which is +// sha256:651aa95985aa4a17a38ffcf71f598ec461924ca96865facc2c5782ef2d2be07f 3983636 +func TestTemplateInspectorRawFallbackNumber(t *testing.T) { + // Using typedElem to automatically fall to tryRawInspectFallback. + typedElem := struct { + ID string `json:"Id"` + }{"ad3"} + testcases := []struct { + raw []byte + exp string + }{ + {raw: []byte(`{"Id": "ad3", "Size": 53317}`), exp: "53317 ad3\n"}, + {raw: []byte(`{"Id": "ad3", "Size": 53317.102}`), exp: "53317.102 ad3\n"}, + {raw: []byte(`{"Id": "ad3", "Size": 53317.0}`), exp: "53317.0 ad3\n"}, + } + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Size}} {{.Id}}") + assert.NilError(t, err) + + i := NewTemplateInspector(b, tmpl) + for _, tc := range testcases { + err = i.Inspect(typedElem, tc.raw) + assert.NilError(t, err) + + err = i.Flush() + assert.NilError(t, err) + + assert.Check(t, is.Equal(tc.exp, b.String())) + b.Reset() + } +} diff --git a/cli/cli/command/manifest/annotate.go b/cli/cli/command/manifest/annotate.go new file mode 100644 index 00000000..e6c47394 --- /dev/null +++ b/cli/cli/command/manifest/annotate.go @@ -0,0 +1,97 @@ +package manifest + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/store" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type annotateOptions struct { + target string // the target manifest list name (also transaction ID) + image string // the manifest to annotate within the list + variant string // an architecture variant + os string + arch string + osFeatures []string +} + +// NewAnnotateCommand creates a new `docker manifest annotate` command +func newAnnotateCommand(dockerCli command.Cli) *cobra.Command { + var opts annotateOptions + + cmd := &cobra.Command{ + Use: "annotate [OPTIONS] MANIFEST_LIST MANIFEST", + Short: "Add additional information to a local image manifest", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + opts.image = args[1] + return runManifestAnnotate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVar(&opts.os, "os", "", "Set operating system") + flags.StringVar(&opts.arch, "arch", "", "Set architecture") + flags.StringSliceVar(&opts.osFeatures, "os-features", []string{}, "Set operating system feature") + flags.StringVar(&opts.variant, "variant", "", "Set architecture variant") + + return cmd +} + +func runManifestAnnotate(dockerCli command.Cli, opts annotateOptions) error { + targetRef, err := normalizeReference(opts.target) + if err != nil { + return errors.Wrapf(err, "annotate: error parsing name for manifest list %s", opts.target) + } + imgRef, err := normalizeReference(opts.image) + if err != nil { + return errors.Wrapf(err, "annotate: error parsing name for manifest %s", opts.image) + } + + manifestStore := dockerCli.ManifestStore() + imageManifest, err := manifestStore.Get(targetRef, imgRef) + switch { + case store.IsNotFound(err): + return fmt.Errorf("manifest for image %s does not exist in %s", opts.image, opts.target) + case err != nil: + return err + } + + // Update the mf + if imageManifest.Descriptor.Platform == nil { + imageManifest.Descriptor.Platform = new(ocispec.Platform) + } + if opts.os != "" { + imageManifest.Descriptor.Platform.OS = opts.os + } + if opts.arch != "" { + imageManifest.Descriptor.Platform.Architecture = opts.arch + } + for _, osFeature := range opts.osFeatures { + imageManifest.Descriptor.Platform.OSFeatures = appendIfUnique(imageManifest.Descriptor.Platform.OSFeatures, osFeature) + } + if opts.variant != "" { + imageManifest.Descriptor.Platform.Variant = opts.variant + } + + if !isValidOSArch(imageManifest.Descriptor.Platform.OS, imageManifest.Descriptor.Platform.Architecture) { + return errors.Errorf("manifest entry for image has unsupported os/arch combination: %s/%s", opts.os, opts.arch) + } + return manifestStore.Save(targetRef, imgRef, imageManifest) +} + +func appendIfUnique(list []string, str string) []string { + for _, s := range list { + if s == str { + return list + } + } + return append(list, str) +} diff --git a/cli/cli/command/manifest/annotate_test.go b/cli/cli/command/manifest/annotate_test.go new file mode 100644 index 00000000..e5cce8f6 --- /dev/null +++ b/cli/cli/command/manifest/annotate_test.go @@ -0,0 +1,77 @@ +package manifest + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestManifestAnnotateError(t *testing.T) { + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"th!si'sa/fa!ke/li$t/name", "example.com/alpine:3.0"}, + expectedError: "error parsing name for manifest list", + }, + { + args: []string{"example.com/list:v1", "th!si'sa/fa!ke/im@ge/nam32"}, + expectedError: "error parsing name for manifest", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(nil) + cmd := newAnnotateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestManifestAnnotate(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newAnnotateCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/fake:0.0"}) + cmd.SetOutput(ioutil.Discard) + expectedError := "manifest for image example.com/fake:0.0 does not exist" + assert.ErrorContains(t, cmd.Execute(), expectedError) + + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + cmd.Flags().Set("os", "freebsd") + cmd.Flags().Set("arch", "fake") + cmd.Flags().Set("os-features", "feature1") + cmd.Flags().Set("variant", "v7") + expectedError = "manifest entry for image has unsupported os/arch combination" + assert.ErrorContains(t, cmd.Execute(), expectedError) + + cmd.Flags().Set("arch", "arm") + assert.NilError(t, cmd.Execute()) + + cmd = newInspectCommand(cli) + err = cmd.Flags().Set("verbose", "true") + assert.NilError(t, err) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + assert.NilError(t, cmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-annotate.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} diff --git a/cli/cli/command/manifest/client_test.go b/cli/cli/command/manifest/client_test.go new file mode 100644 index 00000000..07967c29 --- /dev/null +++ b/cli/cli/command/manifest/client_test.go @@ -0,0 +1,48 @@ +package manifest + +import ( + "context" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/cli/registry/client" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +type fakeRegistryClient struct { + getManifestFunc func(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) + getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) + mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error + putManifestFunc func(ctx context.Context, source reference.Named, mf distribution.Manifest) (digest.Digest, error) +} + +func (c *fakeRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + if c.getManifestFunc != nil { + return c.getManifestFunc(ctx, ref) + } + return manifesttypes.ImageManifest{}, nil +} + +func (c *fakeRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + if c.getManifestListFunc != nil { + return c.getManifestListFunc(ctx, ref) + } + return nil, nil +} + +func (c *fakeRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error { + if c.mountBlobFunc != nil { + return c.mountBlobFunc(ctx, source, target) + } + return nil +} + +func (c *fakeRegistryClient) PutManifest(ctx context.Context, ref reference.Named, mf distribution.Manifest) (digest.Digest, error) { + if c.putManifestFunc != nil { + return c.putManifestFunc(ctx, ref, mf) + } + return digest.Digest(""), nil +} + +var _ client.RegistryClient = &fakeRegistryClient{} diff --git a/cli/cli/command/manifest/cmd.go b/cli/cli/command/manifest/cmd.go new file mode 100644 index 00000000..8cc4987a --- /dev/null +++ b/cli/cli/command/manifest/cmd.go @@ -0,0 +1,45 @@ +package manifest + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + + "github.com/spf13/cobra" +) + +// NewManifestCommand returns a cobra command for `manifest` subcommands +func NewManifestCommand(dockerCli command.Cli) *cobra.Command { + // use dockerCli as command.Cli + cmd := &cobra.Command{ + Use: "manifest COMMAND", + Short: "Manage Docker image manifests and manifest lists", + Long: manifestDescription, + Args: cli.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) + }, + Annotations: map[string]string{"experimentalCLI": ""}, + } + cmd.AddCommand( + newCreateListCommand(dockerCli), + newInspectCommand(dockerCli), + newAnnotateCommand(dockerCli), + newPushListCommand(dockerCli), + ) + return cmd +} + +var manifestDescription = ` +The **docker manifest** command has subcommands for managing image manifests and +manifest lists. A manifest list allows you to use one name to refer to the same image +built for multiple architectures. + +To see help for a subcommand, use: + + docker manifest CMD --help + +For full details on using docker manifest lists, see the registry v2 specification. + +` diff --git a/cli/cli/command/manifest/create_list.go b/cli/cli/command/manifest/create_list.go new file mode 100644 index 00000000..f2e54dcf --- /dev/null +++ b/cli/cli/command/manifest/create_list.go @@ -0,0 +1,82 @@ +package manifest + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/store" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOpts struct { + amend bool + insecure bool +} + +func newCreateListCommand(dockerCli command.Cli) *cobra.Command { + opts := createOpts{} + + cmd := &cobra.Command{ + Use: "create MANIFEST_LIST MANIFEST [MANIFEST...]", + Short: "Create a local manifest list for annotating and pushing to a registry", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return createManifestList(dockerCli, args, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.insecure, "insecure", false, "Allow communication with an insecure registry") + flags.BoolVarP(&opts.amend, "amend", "a", false, "Amend an existing manifest list") + return cmd +} + +func createManifestList(dockerCli command.Cli, args []string, opts createOpts) error { + newRef := args[0] + targetRef, err := normalizeReference(newRef) + if err != nil { + return errors.Wrapf(err, "error parsing name for manifest list %s", newRef) + } + + _, err = registry.ParseRepositoryInfo(targetRef) + if err != nil { + return errors.Wrapf(err, "error parsing repository name for manifest list %s", newRef) + } + + manifestStore := dockerCli.ManifestStore() + _, err = manifestStore.GetList(targetRef) + switch { + case store.IsNotFound(err): + // New manifest list + case err != nil: + return err + case !opts.amend: + return errors.Errorf("refusing to amend an existing manifest list with no --amend flag") + } + + ctx := context.Background() + // Now create the local manifest list transaction by looking up the manifest schemas + // for the constituent images: + manifests := args[1:] + for _, manifestRef := range manifests { + namedRef, err := normalizeReference(manifestRef) + if err != nil { + // TODO: wrap error? + return err + } + + manifest, err := getManifest(ctx, dockerCli, targetRef, namedRef, opts.insecure) + if err != nil { + return err + } + if err := manifestStore.Save(targetRef, namedRef, manifest); err != nil { + return err + } + } + fmt.Fprintf(dockerCli.Out(), "Created manifest list %s\n", targetRef.String()) + return nil +} diff --git a/cli/cli/command/manifest/create_test.go b/cli/cli/command/manifest/create_test.go new file mode 100644 index 00000000..fbf0ae7b --- /dev/null +++ b/cli/cli/command/manifest/create_test.go @@ -0,0 +1,116 @@ +package manifest + +import ( + "context" + "io/ioutil" + "testing" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestManifestCreateErrors(t *testing.T) { + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires at least 2 arguments", + }, + { + args: []string{"th!si'sa/fa!ke/li$t/name", "example.com/alpine:3.0"}, + expectedError: "error parsing name for manifest list", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(nil) + cmd := newCreateListCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +// create a manifest list, then overwrite it, and inspect to see if the old one is still there +func TestManifestCreateAmend(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + namedRef = ref(t, "alpine:3.1") + imageManifest = fullImageManifest(t, namedRef) + err = store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newCreateListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.1"}) + cmd.Flags().Set("amend", "true") + cmd.SetOutput(ioutil.Discard) + err = cmd.Execute() + assert.NilError(t, err) + + // make a new cli to clear the buffers + cli = test.NewFakeCli(nil) + cli.SetManifestStore(store) + inspectCmd := newInspectCommand(cli) + inspectCmd.SetArgs([]string{"example.com/list:v1"}) + assert.NilError(t, inspectCmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-manifest-list.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} + +// attempt to overwrite a saved manifest and get refused +func TestManifestCreateRefuseAmend(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newCreateListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + cmd.SetOutput(ioutil.Discard) + err = cmd.Execute() + assert.Error(t, err, "refusing to amend an existing manifest list with no --amend flag") +} + +// attempt to make a manifest list without valid images +func TestManifestCreateNoManifest(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(&fakeRegistryClient{ + getManifestFunc: func(_ context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, errors.Errorf("No such image: %v", ref) + }, + getManifestListFunc: func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, errors.Errorf("No such manifest: %s", ref) + }, + }) + + cmd := newCreateListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + cmd.SetOutput(ioutil.Discard) + err := cmd.Execute() + assert.Error(t, err, "No such image: example.com/alpine:3.0") +} diff --git a/cli/cli/command/manifest/inspect.go b/cli/cli/command/manifest/inspect.go new file mode 100644 index 00000000..c270ee53 --- /dev/null +++ b/cli/cli/command/manifest/inspect.go @@ -0,0 +1,148 @@ +package manifest + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + ref string + list string + verbose bool + insecure bool +} + +// NewInspectCommand creates a new `docker manifest inspect` command +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] [MANIFEST_LIST] MANIFEST", + Short: "Display an image manifest, or manifest list", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + switch len(args) { + case 1: + opts.ref = args[0] + case 2: + opts.list = args[0] + opts.ref = args[1] + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.insecure, "insecure", false, "Allow communication with an insecure registry") + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Output additional info including layers and platform") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + namedRef, err := normalizeReference(opts.ref) + if err != nil { + return err + } + + // If list reference is provided, display the local manifest in a list + if opts.list != "" { + listRef, err := normalizeReference(opts.list) + if err != nil { + return err + } + + imageManifest, err := dockerCli.ManifestStore().Get(listRef, namedRef) + if err != nil { + return err + } + return printManifest(dockerCli, imageManifest, opts) + } + + // Try a local manifest list first + localManifestList, err := dockerCli.ManifestStore().GetList(namedRef) + if err == nil { + return printManifestList(dockerCli, namedRef, localManifestList, opts) + } + + // Next try a remote manifest + ctx := context.Background() + registryClient := dockerCli.RegistryClient(opts.insecure) + imageManifest, err := registryClient.GetManifest(ctx, namedRef) + if err == nil { + return printManifest(dockerCli, imageManifest, opts) + } + + // Finally try a remote manifest list + manifestList, err := registryClient.GetManifestList(ctx, namedRef) + if err != nil { + return err + } + return printManifestList(dockerCli, namedRef, manifestList, opts) +} + +func printManifest(dockerCli command.Cli, manifest types.ImageManifest, opts inspectOptions) error { + buffer := new(bytes.Buffer) + if !opts.verbose { + _, raw, err := manifest.Payload() + if err != nil { + return err + } + if err := json.Indent(buffer, raw, "", "\t"); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), buffer.String()) + return nil + } + jsonBytes, err := json.MarshalIndent(manifest, "", "\t") + if err != nil { + return err + } + dockerCli.Out().Write(append(jsonBytes, '\n')) + return nil +} + +func printManifestList(dockerCli command.Cli, namedRef reference.Named, list []types.ImageManifest, opts inspectOptions) error { + if !opts.verbose { + targetRepo, err := registry.ParseRepositoryInfo(namedRef) + if err != nil { + return err + } + + manifests := []manifestlist.ManifestDescriptor{} + // More than one response. This is a manifest list. + for _, img := range list { + mfd, err := buildManifestDescriptor(targetRepo, img) + if err != nil { + return errors.Wrap(err, "failed to assemble ManifestDescriptor") + } + manifests = append(manifests, mfd) + } + deserializedML, err := manifestlist.FromDescriptors(manifests) + if err != nil { + return err + } + jsonBytes, err := deserializedML.MarshalJSON() + if err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), string(jsonBytes)) + return nil + } + jsonBytes, err := json.MarshalIndent(list, "", "\t") + if err != nil { + return err + } + dockerCli.Out().Write(append(jsonBytes, '\n')) + return nil +} diff --git a/cli/cli/command/manifest/inspect_test.go b/cli/cli/command/manifest/inspect_test.go new file mode 100644 index 00000000..7abe06d2 --- /dev/null +++ b/cli/cli/command/manifest/inspect_test.go @@ -0,0 +1,146 @@ +package manifest + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/manifest/store" + "github.com/docker/cli/cli/manifest/types" + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func newTempManifestStore(t *testing.T) (store.Store, func()) { + tmpdir, err := ioutil.TempDir("", "test-manifest-storage") + assert.NilError(t, err) + + return store.NewStore(tmpdir), func() { os.RemoveAll(tmpdir) } +} + +func ref(t *testing.T, name string) reference.Named { + named, err := reference.ParseNamed("example.com/" + name) + assert.NilError(t, err) + return named +} + +func fullImageManifest(t *testing.T, ref reference.Named) types.ImageManifest { + man, err := schema2.FromStruct(schema2.Manifest{ + Versioned: schema2.SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560", + Size: 1520, + MediaType: schema2.MediaTypeImageConfig, + }, + Layers: []distribution.Descriptor{ + { + MediaType: schema2.MediaTypeLayer, + Size: 1990402, + Digest: "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926", + }, + }, + }) + assert.NilError(t, err) + + // TODO: include image data for verbose inspect + mt, raw, err := man.Payload() + assert.NilError(t, err) + + desc := ocispec.Descriptor{ + Digest: digest.FromBytes(raw), + Size: int64(len(raw)), + MediaType: mt, + Platform: &ocispec.Platform{ + Architecture: "amd64", + OS: "linux", + }, + } + + return types.NewImageManifest(ref, desc, man) +} + +func TestInspectCommandLocalManifestNotFound(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + err := cmd.Execute() + assert.Error(t, err, "No such manifest: example.com/alpine:3.0") +} + +func TestInspectCommandNotFound(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(&fakeRegistryClient{ + getManifestFunc: func(_ context.Context, _ reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, errors.New("missing") + }, + getManifestListFunc: func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, errors.Errorf("No such manifest: %s", ref) + }, + }) + + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"example.com/alpine:3.0"}) + err := cmd.Execute() + assert.Error(t, err, "No such manifest: example.com/alpine:3.0") +} + +func TestInspectCommandLocalManifest(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + assert.NilError(t, cmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-manifest.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} + +func TestInspectcommandRemoteManifest(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(&fakeRegistryClient{ + getManifestFunc: func(_ context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + return fullImageManifest(t, ref), nil + }, + }) + + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"example.com/alpine:3.0"}) + assert.NilError(t, cmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-manifest.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} diff --git a/cli/cli/command/manifest/push.go b/cli/cli/command/manifest/push.go new file mode 100644 index 00000000..fa734afa --- /dev/null +++ b/cli/cli/command/manifest/push.go @@ -0,0 +1,281 @@ +package manifest + +import ( + "context" + "encoding/json" + "fmt" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/types" + registryclient "github.com/docker/cli/cli/registry/client" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type pushOpts struct { + insecure bool + purge bool + target string +} + +type mountRequest struct { + ref reference.Named + manifest types.ImageManifest +} + +type manifestBlob struct { + canonical reference.Canonical + os string +} + +type pushRequest struct { + targetRef reference.Named + list *manifestlist.DeserializedManifestList + mountRequests []mountRequest + manifestBlobs []manifestBlob + insecure bool +} + +func newPushListCommand(dockerCli command.Cli) *cobra.Command { + opts := pushOpts{} + + cmd := &cobra.Command{ + Use: "push [OPTIONS] MANIFEST_LIST", + Short: "Push a manifest list to a repository", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + return runPush(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.purge, "purge", "p", false, "Remove the local manifest list after push") + flags.BoolVar(&opts.insecure, "insecure", false, "Allow push to an insecure registry") + return cmd +} + +func runPush(dockerCli command.Cli, opts pushOpts) error { + + targetRef, err := normalizeReference(opts.target) + if err != nil { + return err + } + + manifests, err := dockerCli.ManifestStore().GetList(targetRef) + if err != nil { + return err + } + if len(manifests) == 0 { + return errors.Errorf("%s not found", targetRef) + } + + pushRequest, err := buildPushRequest(manifests, targetRef, opts.insecure) + if err != nil { + return err + } + + ctx := context.Background() + if err := pushList(ctx, dockerCli, pushRequest); err != nil { + return err + } + if opts.purge { + return dockerCli.ManifestStore().Remove(targetRef) + } + return nil +} + +func buildPushRequest(manifests []types.ImageManifest, targetRef reference.Named, insecure bool) (pushRequest, error) { + req := pushRequest{targetRef: targetRef, insecure: insecure} + + var err error + req.list, err = buildManifestList(manifests, targetRef) + if err != nil { + return req, err + } + + targetRepo, err := registry.ParseRepositoryInfo(targetRef) + if err != nil { + return req, err + } + targetRepoName, err := registryclient.RepoNameForReference(targetRepo.Name) + if err != nil { + return req, err + } + + for _, imageManifest := range manifests { + manifestRepoName, err := registryclient.RepoNameForReference(imageManifest.Ref) + if err != nil { + return req, err + } + + repoName, _ := reference.WithName(manifestRepoName) + if repoName.Name() != targetRepoName { + blobs, err := buildBlobRequestList(imageManifest, repoName) + if err != nil { + return req, err + } + req.manifestBlobs = append(req.manifestBlobs, blobs...) + + manifestPush, err := buildPutManifestRequest(imageManifest, targetRef) + if err != nil { + return req, err + } + req.mountRequests = append(req.mountRequests, manifestPush) + } + } + return req, nil +} + +func buildManifestList(manifests []types.ImageManifest, targetRef reference.Named) (*manifestlist.DeserializedManifestList, error) { + targetRepoInfo, err := registry.ParseRepositoryInfo(targetRef) + if err != nil { + return nil, err + } + + descriptors := []manifestlist.ManifestDescriptor{} + for _, imageManifest := range manifests { + if imageManifest.Descriptor.Platform == nil || + imageManifest.Descriptor.Platform.Architecture == "" || + imageManifest.Descriptor.Platform.OS == "" { + return nil, errors.Errorf( + "manifest %s must have an OS and Architecture to be pushed to a registry", imageManifest.Ref) + } + descriptor, err := buildManifestDescriptor(targetRepoInfo, imageManifest) + if err != nil { + return nil, err + } + descriptors = append(descriptors, descriptor) + } + + return manifestlist.FromDescriptors(descriptors) +} + +func buildManifestDescriptor(targetRepo *registry.RepositoryInfo, imageManifest types.ImageManifest) (manifestlist.ManifestDescriptor, error) { + repoInfo, err := registry.ParseRepositoryInfo(imageManifest.Ref) + if err != nil { + return manifestlist.ManifestDescriptor{}, err + } + + manifestRepoHostname := reference.Domain(repoInfo.Name) + targetRepoHostname := reference.Domain(targetRepo.Name) + if manifestRepoHostname != targetRepoHostname { + return manifestlist.ManifestDescriptor{}, errors.Errorf("cannot use source images from a different registry than the target image: %s != %s", manifestRepoHostname, targetRepoHostname) + } + + manifest := manifestlist.ManifestDescriptor{ + Descriptor: distribution.Descriptor{ + Digest: imageManifest.Descriptor.Digest, + Size: imageManifest.Descriptor.Size, + MediaType: imageManifest.Descriptor.MediaType, + }, + } + + platform := types.PlatformSpecFromOCI(imageManifest.Descriptor.Platform) + if platform != nil { + manifest.Platform = *platform + } + + if err = manifest.Descriptor.Digest.Validate(); err != nil { + return manifestlist.ManifestDescriptor{}, errors.Wrapf(err, + "digest parse of image %q failed", imageManifest.Ref) + } + + return manifest, nil +} + +func buildBlobRequestList(imageManifest types.ImageManifest, repoName reference.Named) ([]manifestBlob, error) { + var blobReqs []manifestBlob + + for _, blobDigest := range imageManifest.Blobs() { + canonical, err := reference.WithDigest(repoName, blobDigest) + if err != nil { + return nil, err + } + var os string + if imageManifest.Descriptor.Platform != nil { + os = imageManifest.Descriptor.Platform.OS + } + blobReqs = append(blobReqs, manifestBlob{canonical: canonical, os: os}) + } + return blobReqs, nil +} + +// nolint: interfacer +func buildPutManifestRequest(imageManifest types.ImageManifest, targetRef reference.Named) (mountRequest, error) { + refWithoutTag, err := reference.WithName(targetRef.Name()) + if err != nil { + return mountRequest{}, err + } + mountRef, err := reference.WithDigest(refWithoutTag, imageManifest.Descriptor.Digest) + if err != nil { + return mountRequest{}, err + } + + // This indentation has to be added to ensure sha parity with the registry + v2ManifestBytes, err := json.MarshalIndent(imageManifest.SchemaV2Manifest, "", " ") + if err != nil { + return mountRequest{}, err + } + // indent only the DeserializedManifest portion of this, in order to maintain parity with the registry + // and not alter the sha + var v2Manifest schema2.DeserializedManifest + if err = v2Manifest.UnmarshalJSON(v2ManifestBytes); err != nil { + return mountRequest{}, err + } + imageManifest.SchemaV2Manifest = &v2Manifest + + return mountRequest{ref: mountRef, manifest: imageManifest}, err +} + +func pushList(ctx context.Context, dockerCli command.Cli, req pushRequest) error { + rclient := dockerCli.RegistryClient(req.insecure) + + if err := mountBlobs(ctx, rclient, req.targetRef, req.manifestBlobs); err != nil { + return err + } + if err := pushReferences(ctx, dockerCli.Out(), rclient, req.mountRequests); err != nil { + return err + } + dgst, err := rclient.PutManifest(ctx, req.targetRef, req.list) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), dgst.String()) + return nil +} + +func pushReferences(ctx context.Context, out io.Writer, client registryclient.RegistryClient, mounts []mountRequest) error { + for _, mount := range mounts { + newDigest, err := client.PutManifest(ctx, mount.ref, mount.manifest) + if err != nil { + return err + } + fmt.Fprintf(out, "Pushed ref %s with digest: %s\n", mount.ref, newDigest) + } + return nil +} + +func mountBlobs(ctx context.Context, client registryclient.RegistryClient, ref reference.Named, blobs []manifestBlob) error { + for _, blob := range blobs { + err := client.MountBlob(ctx, blob.canonical, ref) + switch err.(type) { + case nil: + case registryclient.ErrBlobCreated: + if blob.os != "windows" { + return fmt.Errorf("error mounting %s to %s", blob.canonical, ref) + } + default: + return err + } + } + return nil +} diff --git a/cli/cli/command/manifest/push_test.go b/cli/cli/command/manifest/push_test.go new file mode 100644 index 00000000..3a2e9b8a --- /dev/null +++ b/cli/cli/command/manifest/push_test.go @@ -0,0 +1,69 @@ +package manifest + +import ( + "context" + "io/ioutil" + "testing" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func newFakeRegistryClient() *fakeRegistryClient { + return &fakeRegistryClient{ + getManifestFunc: func(_ context.Context, _ reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, errors.New("") + }, + getManifestListFunc: func(_ context.Context, _ reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, errors.Errorf("") + }, + } +} + +func TestManifestPushErrors(t *testing.T) { + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{"one-arg", "extra-arg"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"th!si'sa/fa!ke/li$t/-name"}, + expectedError: "invalid reference format", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(nil) + cmd := newPushListCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestManifestPush(t *testing.T) { + store, sCleanup := newTempManifestStore(t) + defer sCleanup() + + registry := newFakeRegistryClient() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(registry) + + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newPushListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1"}) + err = cmd.Execute() + assert.NilError(t, err) +} diff --git a/cli/cli/command/manifest/testdata/inspect-annotate.golden b/cli/cli/command/manifest/testdata/inspect-annotate.golden new file mode 100644 index 00000000..4d65b729 --- /dev/null +++ b/cli/cli/command/manifest/testdata/inspect-annotate.golden @@ -0,0 +1,32 @@ +{ + "Ref": "example.com/alpine:3.0", + "Descriptor": { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe", + "size": 528, + "platform": { + "architecture": "arm", + "os": "freebsd", + "os.features": [ + "feature1" + ], + "variant": "v7" + } + }, + "SchemaV2Manifest": { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1520, + "digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1990402, + "digest": "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926" + } + ] + } +} diff --git a/cli/cli/command/manifest/testdata/inspect-manifest-list.golden b/cli/cli/command/manifest/testdata/inspect-manifest-list.golden new file mode 100644 index 00000000..a0c2673e --- /dev/null +++ b/cli/cli/command/manifest/testdata/inspect-manifest-list.golden @@ -0,0 +1,24 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 528, + "digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe", + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 528, + "digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe", + "platform": { + "architecture": "amd64", + "os": "linux" + } + } + ] +} diff --git a/cli/cli/command/manifest/testdata/inspect-manifest.golden b/cli/cli/command/manifest/testdata/inspect-manifest.golden new file mode 100644 index 00000000..7089d9bd --- /dev/null +++ b/cli/cli/command/manifest/testdata/inspect-manifest.golden @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1520, + "digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1990402, + "digest": "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926" + } + ] +} diff --git a/cli/cli/command/manifest/util.go b/cli/cli/command/manifest/util.go new file mode 100644 index 00000000..8d4bd303 --- /dev/null +++ b/cli/cli/command/manifest/util.go @@ -0,0 +1,80 @@ +package manifest + +import ( + "context" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/store" + "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution/reference" +) + +type osArch struct { + os string + arch string +} + +// Remove any unsupported os/arch combo +// list of valid os/arch values (see "Optional Environment Variables" section +// of https://golang.org/doc/install/source +// Added linux/s390x as we know System z support already exists +var validOSArches = map[osArch]bool{ + {os: "darwin", arch: "386"}: true, + {os: "darwin", arch: "amd64"}: true, + {os: "darwin", arch: "arm"}: true, + {os: "darwin", arch: "arm64"}: true, + {os: "dragonfly", arch: "amd64"}: true, + {os: "freebsd", arch: "386"}: true, + {os: "freebsd", arch: "amd64"}: true, + {os: "freebsd", arch: "arm"}: true, + {os: "linux", arch: "386"}: true, + {os: "linux", arch: "amd64"}: true, + {os: "linux", arch: "arm"}: true, + {os: "linux", arch: "arm64"}: true, + {os: "linux", arch: "ppc64le"}: true, + {os: "linux", arch: "mips64"}: true, + {os: "linux", arch: "mips64le"}: true, + {os: "linux", arch: "s390x"}: true, + {os: "netbsd", arch: "386"}: true, + {os: "netbsd", arch: "amd64"}: true, + {os: "netbsd", arch: "arm"}: true, + {os: "openbsd", arch: "386"}: true, + {os: "openbsd", arch: "amd64"}: true, + {os: "openbsd", arch: "arm"}: true, + {os: "plan9", arch: "386"}: true, + {os: "plan9", arch: "amd64"}: true, + {os: "solaris", arch: "amd64"}: true, + {os: "windows", arch: "386"}: true, + {os: "windows", arch: "amd64"}: true, +} + +func isValidOSArch(os string, arch string) bool { + // check for existence of this combo + _, ok := validOSArches[osArch{os, arch}] + return ok +} + +func normalizeReference(ref string) (reference.Named, error) { + namedRef, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, isDigested := namedRef.(reference.Canonical); !isDigested { + return reference.TagNameOnly(namedRef), nil + } + return namedRef, nil +} + +// getManifest from the local store, and fallback to the remote registry if it +// doesn't exist locally +func getManifest(ctx context.Context, dockerCli command.Cli, listRef, namedRef reference.Named, insecure bool) (types.ImageManifest, error) { + data, err := dockerCli.ManifestStore().Get(listRef, namedRef) + switch { + case store.IsNotFound(err): + return dockerCli.RegistryClient(insecure).GetManifest(ctx, namedRef) + case err != nil: + return types.ImageManifest{}, err + default: + return data, nil + } +} diff --git a/cli/cli/command/network/client_test.go b/cli/cli/command/network/client_test.go new file mode 100644 index 00000000..33cec6e5 --- /dev/null +++ b/cli/cli/command/network/client_test.go @@ -0,0 +1,45 @@ +package network + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + networkCreateFunc func(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + networkConnectFunc func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + networkDisconnectFunc func(ctx context.Context, networkID, container string, force bool) error + networkListFunc func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) +} + +func (c *fakeClient) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + if c.networkCreateFunc != nil { + return c.networkCreateFunc(ctx, name, options) + } + return types.NetworkCreateResponse{}, nil +} + +func (c *fakeClient) NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error { + if c.networkConnectFunc != nil { + return c.networkConnectFunc(ctx, networkID, container, config) + } + return nil +} + +func (c *fakeClient) NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error { + if c.networkDisconnectFunc != nil { + return c.networkDisconnectFunc(ctx, networkID, container, force) + } + return nil +} + +func (c *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + if c.networkListFunc != nil { + return c.networkListFunc(ctx, options) + } + return []types.NetworkResource{}, nil +} diff --git a/cli/cli/command/network/cmd.go b/cli/cli/command/network/cmd.go new file mode 100644 index 00000000..48edf1c4 --- /dev/null +++ b/cli/cli/command/network/cmd.go @@ -0,0 +1,28 @@ +package network + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewNetworkCommand returns a cobra command for `network` subcommands +func NewNetworkCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage networks", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newConnectCommand(dockerCli), + newCreateCommand(dockerCli), + newDisconnectCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/network/connect.go b/cli/cli/command/network/connect.go new file mode 100644 index 00000000..7ff055aa --- /dev/null +++ b/cli/cli/command/network/connect.go @@ -0,0 +1,63 @@ +package network + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/network" + "github.com/spf13/cobra" +) + +type connectOptions struct { + network string + container string + ipaddress string + ipv6address string + links opts.ListOpts + aliases []string + linklocalips []string +} + +func newConnectCommand(dockerCli command.Cli) *cobra.Command { + options := connectOptions{ + links: opts.NewListOpts(opts.ValidateLink), + } + + cmd := &cobra.Command{ + Use: "connect [OPTIONS] NETWORK CONTAINER", + Short: "Connect a container to a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.network = args[0] + options.container = args[1] + return runConnect(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.ipaddress, "ip", "", "IPv4 address (e.g., 172.30.100.104)") + flags.StringVar(&options.ipv6address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") + flags.Var(&options.links, "link", "Add link to another container") + flags.StringSliceVar(&options.aliases, "alias", []string{}, "Add network-scoped alias for the container") + flags.StringSliceVar(&options.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") + + return cmd +} + +func runConnect(dockerCli command.Cli, options connectOptions) error { + client := dockerCli.Client() + + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: options.ipaddress, + IPv6Address: options.ipv6address, + LinkLocalIPs: options.linklocalips, + }, + Links: options.links.GetAll(), + Aliases: options.aliases, + } + + return client.NetworkConnect(context.Background(), options.network, options.container, epConfig) +} diff --git a/cli/cli/command/network/connect_test.go b/cli/cli/command/network/connect_test.go new file mode 100644 index 00000000..2c1d0401 --- /dev/null +++ b/cli/cli/command/network/connect_test.go @@ -0,0 +1,70 @@ +package network + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworkConnectErrors(t *testing.T) { + testCases := []struct { + args []string + networkConnectFunc func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + expectedError string + }{ + { + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"toto", "titi"}, + networkConnectFunc: func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error { + return errors.Errorf("error connecting network") + }, + expectedError: "error connecting network", + }, + } + + for _, tc := range testCases { + cmd := newConnectCommand( + test.NewFakeCli(&fakeClient{ + networkConnectFunc: tc.networkConnectFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + + } +} + +func TestNetworkConnectWithFlags(t *testing.T) { + expectedOpts := []network.IPAMConfig{ + { + Subnet: "192.168.4.0/24", + IPRange: "192.168.4.0/24", + Gateway: "192.168.4.1/24", + AuxAddress: map[string]string{}, + }, + } + cli := test.NewFakeCli(&fakeClient{ + networkConnectFunc: func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error { + assert.Check(t, is.DeepEqual(expectedOpts, config.IPAMConfig), "not expected driver error") + return nil + }, + }) + args := []string{"banana"} + cmd := newCreateCommand(cli) + + cmd.SetArgs(args) + cmd.Flags().Set("driver", "foo") + cmd.Flags().Set("ip-range", "192.168.4.0/24") + cmd.Flags().Set("gateway", "192.168.4.1/24") + cmd.Flags().Set("subnet", "192.168.4.0/24") + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/network/create.go b/cli/cli/command/network/create.go new file mode 100644 index 00000000..a8dda0a2 --- /dev/null +++ b/cli/cli/command/network/create.go @@ -0,0 +1,248 @@ +package network + +import ( + "context" + "fmt" + "net" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + scope string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts + internal bool + ipv6 bool + attachable bool + ingress bool + configOnly bool + configFrom string + + ipamDriver string + ipamSubnet []string + ipamIPRange []string + ipamGateway []string + ipamAux opts.MapOpts + ipamOpt opts.MapOpts +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(opts.ValidateEnv), + ipamAux: *opts.NewMapOpts(nil, nil), + ipamOpt: *opts.NewMapOpts(nil, nil), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] NETWORK", + Short: "Create a network", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&options.driver, "driver", "d", "bridge", "Driver to manage the Network") + flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&options.labels, "label", "Set metadata on a network") + flags.BoolVar(&options.internal, "internal", false, "Restrict external access to the network") + flags.BoolVar(&options.ipv6, "ipv6", false, "Enable IPv6 networking") + flags.BoolVar(&options.attachable, "attachable", false, "Enable manual container attachment") + flags.SetAnnotation("attachable", "version", []string{"1.25"}) + flags.BoolVar(&options.ingress, "ingress", false, "Create swarm routing-mesh network") + flags.SetAnnotation("ingress", "version", []string{"1.29"}) + flags.StringVar(&options.scope, "scope", "", "Control the network's scope") + flags.SetAnnotation("scope", "version", []string{"1.30"}) + flags.BoolVar(&options.configOnly, "config-only", false, "Create a configuration only network") + flags.SetAnnotation("config-only", "version", []string{"1.30"}) + flags.StringVar(&options.configFrom, "config-from", "", "The network from which copying the configuration") + flags.SetAnnotation("config-from", "version", []string{"1.30"}) + + flags.StringVar(&options.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") + flags.StringSliceVar(&options.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") + flags.StringSliceVar(&options.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") + flags.StringSliceVar(&options.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") + + flags.Var(&options.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") + flags.Var(&options.ipamOpt, "ipam-opt", "Set IPAM driver specific options") + + return cmd +} + +func runCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + + ipamCfg, err := consolidateIpam(options.ipamSubnet, options.ipamIPRange, options.ipamGateway, options.ipamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Driver: options.driver, + Options: options.driverOpts.GetAll(), + IPAM: &network.IPAM{ + Driver: options.ipamDriver, + Config: ipamCfg, + Options: options.ipamOpt.GetAll(), + }, + CheckDuplicate: true, + Internal: options.internal, + EnableIPv6: options.ipv6, + Attachable: options.attachable, + Ingress: options.ingress, + Scope: options.scope, + ConfigOnly: options.configOnly, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + } + + if from := options.configFrom; from != "" { + nc.ConfigFrom = &network.ConfigReference{ + Network: from, + } + } + + resp, err := client.NetworkCreate(context.Background(), options.name, nc) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consolidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +// nolint: gocyclo +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, errors.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, errors.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, errors.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, errors.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, errors.Wrap(err, "invalid subnet") + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, err + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} diff --git a/cli/cli/command/network/create_test.go b/cli/cli/command/network/create_test.go new file mode 100644 index 00000000..6bfa7b65 --- /dev/null +++ b/cli/cli/command/network/create_test.go @@ -0,0 +1,174 @@ +package network + +import ( + "context" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworkCreateErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + networkCreateFunc func(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + expectedError string + }{ + { + expectedError: "exactly 1 argument", + }, + { + args: []string{"toto"}, + networkCreateFunc: func(ctx context.Context, name string, createBody types.NetworkCreate) (types.NetworkCreateResponse, error) { + return types.NetworkCreateResponse{}, errors.Errorf("error creating network") + }, + expectedError: "error creating network", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.0.0/24", + "gateway": "255.0.255.0/24", + "subnet": "10.1.2.0.30.50", + }, + expectedError: "invalid CIDR address: 10.1.2.0.30.50", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.0.0.30/24", + "gateway": "255.0.255.0/24", + "subnet": "255.0.0.0/24", + }, + expectedError: "invalid CIDR address: 255.255.0.0.30/24", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "gateway": "255.0.0.0/24", + }, + expectedError: "every ip-range or gateway must have a corresponding subnet", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.0.0.0/24", + }, + expectedError: "every ip-range or gateway must have a corresponding subnet", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.0.0.0/24", + "gateway": "255.0.0.0/24", + }, + expectedError: "every ip-range or gateway must have a corresponding subnet", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.0.0/24", + "gateway": "255.0.255.0/24", + "subnet": "10.1.2.0/23,10.1.3.248/30", + }, + expectedError: "multiple overlapping subnet configuration is not supported", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "192.168.1.0/24,192.168.1.200/24", + "gateway": "192.168.1.1,192.168.1.4", + "subnet": "192.168.2.0/24,192.168.1.250/24", + }, + expectedError: "cannot configure multiple ranges (192.168.1.200/24, 192.168.1.0/24) on the same subnet (192.168.1.250/24)", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.200.0/24,255.255.120.0/24", + "gateway": "255.0.255.0/24", + "subnet": "255.255.255.0/24,255.255.0.255/24", + }, + expectedError: "no matching subnet for range 255.255.200.0/24", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "192.168.1.0/24", + "gateway": "192.168.1.1,192.168.1.4", + "subnet": "192.168.2.0/24,192.168.1.250/24", + }, + expectedError: "cannot configure multiple gateways (192.168.1.4, 192.168.1.1) for the same subnet (192.168.1.250/24)", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "192.168.1.0/24", + "gateway": "192.168.4.1,192.168.5.4", + "subnet": "192.168.2.0/24,192.168.1.250/24", + }, + expectedError: "no matching subnet for gateway 192.168.4.1", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "gateway": "255.255.0.0/24", + "subnet": "255.255.0.0/24", + "aux-address": "255.255.0.30/24", + }, + expectedError: "no matching subnet for aux-address", + }, + } + + for _, tc := range testCases { + cmd := newCreateCommand( + test.NewFakeCli(&fakeClient{ + networkCreateFunc: tc.networkCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + assert.NilError(t, cmd.Flags().Set(key, value)) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + + } +} +func TestNetworkCreateWithFlags(t *testing.T) { + expectedDriver := "foo" + expectedOpts := []network.IPAMConfig{ + { + Subnet: "192.168.4.0/24", + IPRange: "192.168.4.0/24", + Gateway: "192.168.4.1/24", + AuxAddress: map[string]string{}, + }, + } + cli := test.NewFakeCli(&fakeClient{ + networkCreateFunc: func(ctx context.Context, name string, createBody types.NetworkCreate) (types.NetworkCreateResponse, error) { + assert.Check(t, is.Equal(expectedDriver, createBody.Driver), "not expected driver error") + assert.Check(t, is.DeepEqual(expectedOpts, createBody.IPAM.Config), "not expected driver error") + return types.NetworkCreateResponse{ + ID: name, + }, nil + }, + }) + args := []string{"banana"} + cmd := newCreateCommand(cli) + + cmd.SetArgs(args) + cmd.Flags().Set("driver", "foo") + cmd.Flags().Set("ip-range", "192.168.4.0/24") + cmd.Flags().Set("gateway", "192.168.4.1/24") + cmd.Flags().Set("subnet", "192.168.4.0/24") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("banana", strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/network/disconnect.go b/cli/cli/command/network/disconnect.go new file mode 100644 index 00000000..18bf4c7b --- /dev/null +++ b/cli/cli/command/network/disconnect.go @@ -0,0 +1,41 @@ +package network + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type disconnectOptions struct { + network string + container string + force bool +} + +func newDisconnectCommand(dockerCli command.Cli) *cobra.Command { + opts := disconnectOptions{} + + cmd := &cobra.Command{ + Use: "disconnect [OPTIONS] NETWORK CONTAINER", + Short: "Disconnect a container from a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runDisconnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") + + return cmd +} + +func runDisconnect(dockerCli command.Cli, opts disconnectOptions) error { + client := dockerCli.Client() + + return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) +} diff --git a/cli/cli/command/network/disconnect_test.go b/cli/cli/command/network/disconnect_test.go new file mode 100644 index 00000000..9a552570 --- /dev/null +++ b/cli/cli/command/network/disconnect_test.go @@ -0,0 +1,41 @@ +package network + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNetworkDisconnectErrors(t *testing.T) { + testCases := []struct { + args []string + networkDisconnectFunc func(ctx context.Context, networkID, container string, force bool) error + expectedError string + }{ + { + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"toto", "titi"}, + networkDisconnectFunc: func(ctx context.Context, networkID, container string, force bool) error { + return errors.Errorf("error disconnecting network") + }, + expectedError: "error disconnecting network", + }, + } + + for _, tc := range testCases { + cmd := newDisconnectCommand( + test.NewFakeCli(&fakeClient{ + networkDisconnectFunc: tc.networkDisconnectFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} diff --git a/cli/cli/command/network/inspect.go b/cli/cli/command/network/inspect.go new file mode 100644 index 00000000..3d7543d9 --- /dev/null +++ b/cli/cli/command/network/inspect.go @@ -0,0 +1,48 @@ +package network + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string + verbose bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NETWORK [NETWORK...]", + Short: "Display detailed information on one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVarP(&opts.verbose, "verbose", "v", false, "Verbose output for diagnostics") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getNetFunc := func(name string) (interface{}, []byte, error) { + return client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{Verbose: opts.verbose}) + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) +} diff --git a/cli/cli/command/network/list.go b/cli/cli/command/network/list.go new file mode 100644 index 00000000..39191929 --- /dev/null +++ b/cli/cli/command/network/list.go @@ -0,0 +1,75 @@ +package network + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display network IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate the output") + flags.StringVar(&options.format, "format", "", "Pretty-print networks using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + listOptions := types.NetworkListOptions{Filters: options.filter.Value()} + networkResources, err := client.NetworkList(context.Background(), listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().NetworksFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byNetworkName(networkResources)) + + networksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNetworkFormat(format, options.quiet), + Trunc: !options.noTrunc, + } + return formatter.NetworkWrite(networksCtx, networkResources) +} diff --git a/cli/cli/command/network/list_test.go b/cli/cli/command/network/list_test.go new file mode 100644 index 00000000..1106315f --- /dev/null +++ b/cli/cli/command/network/list_test.go @@ -0,0 +1,63 @@ +package network + +import ( + "context" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNetworkListErrors(t *testing.T) { + testCases := []struct { + networkListFunc func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + expectedError string + }{ + { + networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + return []types.NetworkResource{}, errors.Errorf("error creating network") + }, + expectedError: "error creating network", + }, + } + + for _, tc := range testCases { + cmd := newListCommand( + test.NewFakeCli(&fakeClient{ + networkListFunc: tc.networkListFunc, + }), + ) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNetworkListWithFlags(t *testing.T) { + expectedOpts := types.NetworkListOptions{ + Filters: filters.NewArgs(filters.Arg("image.name", "ubuntu")), + } + + cli := test.NewFakeCli(&fakeClient{ + networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + assert.Check(t, is.DeepEqual(expectedOpts, options, cmp.AllowUnexported(filters.Args{}))) + return []types.NetworkResource{*NetworkResource(NetworkResourceID("123454321"), + NetworkResourceName("network_1"), + NetworkResourceDriver("09.7.01"), + NetworkResourceScope("global"))}, nil + }, + }) + cmd := newListCommand(cli) + + cmd.Flags().Set("filter", "image.name=ubuntu") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, strings.TrimSpace(cli.OutBuffer().String()), "network-list.golden") +} diff --git a/cli/cli/command/network/prune.go b/cli/cli/command/network/prune.go new file mode 100644 index 00000000..b00e5cd2 --- /dev/null +++ b/cli/cli/command/network/prune.go @@ -0,0 +1,76 @@ +package network + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for networks +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const warning = `WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) (output string, err error) { + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return "", nil + } + + report, err := dockerCli.Client().NetworksPrune(context.Background(), pruneFilters) + if err != nil { + return "", err + } + + if len(report.NetworksDeleted) > 0 { + output = "Deleted Networks:\n" + for _, id := range report.NetworksDeleted { + output += id + "\n" + } + } + + return output, nil +} + +// RunPrune calls the Network Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter}) + return 0, output, err +} diff --git a/cli/cli/command/network/remove.go b/cli/cli/command/network/remove.go new file mode 100644 index 00000000..66f48197 --- /dev/null +++ b/cli/cli/command/network/remove.go @@ -0,0 +1,53 @@ +package network + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "rm NETWORK [NETWORK...]", + Aliases: []string{"remove"}, + Short: "Remove one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } +} + +const ingressWarning = "WARNING! Before removing the routing-mesh network, " + + "make sure all the nodes in your swarm run the same docker engine version. " + + "Otherwise, removal may not be effective and functionality of newly create " + + "ingress networks will be impaired.\nAre you sure you want to continue?" + +func runRemove(dockerCli command.Cli, networks []string) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range networks { + if nw, _, err := client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{}); err == nil && + nw.Ingress && + !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), ingressWarning) { + continue + } + if err := client.NetworkRemove(ctx, name); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/cli/cli/command/network/testdata/network-list.golden b/cli/cli/command/network/testdata/network-list.golden new file mode 100644 index 00000000..e7765109 --- /dev/null +++ b/cli/cli/command/network/testdata/network-list.golden @@ -0,0 +1,2 @@ +NETWORK ID NAME DRIVER SCOPE +123454321 network_1 09.7.01 global \ No newline at end of file diff --git a/cli/cli/command/node/client_test.go b/cli/cli/command/node/client_test.go new file mode 100644 index 00000000..75a128cd --- /dev/null +++ b/cli/cli/command/node/client_test.go @@ -0,0 +1,69 @@ +package node + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + infoFunc func() (types.Info, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeListFunc func() ([]swarm.Node, error) + nodeRemoveFunc func() error + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + taskInspectFunc func(taskID string) (swarm.Task, []byte, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectFunc != nil { + return cli.nodeInspectFunc() + } + return swarm.Node{}, []byte{}, nil +} + +func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + if cli.nodeListFunc != nil { + return cli.nodeListFunc() + } + return []swarm.Node{}, nil +} + +func (cli *fakeClient) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + if cli.nodeRemoveFunc != nil { + return cli.nodeRemoveFunc() + } + return nil +} + +func (cli *fakeClient) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if cli.nodeUpdateFunc != nil { + return cli.nodeUpdateFunc(nodeID, version, node) + } + return nil +} + +func (cli *fakeClient) Info(ctx context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func (cli *fakeClient) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if cli.taskInspectFunc != nil { + return cli.taskInspectFunc(taskID) + } + return swarm.Task{}, []byte{}, nil +} + +func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if cli.taskListFunc != nil { + return cli.taskListFunc(options) + } + return []swarm.Task{}, nil +} diff --git a/cli/cli/command/node/cmd.go b/cli/cli/command/node/cmd.go new file mode 100644 index 00000000..f96c9a6b --- /dev/null +++ b/cli/cli/command/node/cmd.go @@ -0,0 +1,60 @@ +package node + +import ( + "context" + "errors" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +// NewNodeCommand returns a cobra command for `node` subcommands +func NewNodeCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Manage Swarm nodes", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.24", + "swarm": "", + }, + } + cmd.AddCommand( + newDemoteCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newPromoteCommand(dockerCli), + newRemoveCommand(dockerCli), + newPsCommand(dockerCli), + newUpdateCommand(dockerCli), + ) + return cmd +} + +// Reference returns the reference of a node. The special value "self" for a node +// reference is mapped to the current node, hence the node ID is retrieved using +// the `/info` endpoint. +func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { + if ref == "self" { + info, err := client.Info(ctx) + if err != nil { + return "", err + } + if info.Swarm.NodeID == "" { + // If there's no node ID in /info, the node probably + // isn't a manager. Call a swarm-specific endpoint to + // get a more specific error message. + _, err = client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return "", err + } + return "", errors.New("node ID not found in /info") + } + return info.Swarm.NodeID, nil + } + return ref, nil +} diff --git a/cli/cli/command/node/demote.go b/cli/cli/command/node/demote.go new file mode 100644 index 00000000..5250dfc0 --- /dev/null +++ b/cli/cli/command/node/demote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" +) + +func newDemoteCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "demote NODE [NODE...]", + Short: "Demote one or more nodes from manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDemote(dockerCli, args) + }, + } +} + +func runDemote(dockerCli command.Cli, nodes []string) error { + demote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleWorker { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleWorker + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, demote, success) +} diff --git a/cli/cli/command/node/demote_test.go b/cli/cli/command/node/demote_test.go new file mode 100644 index 00000000..3f18d63d --- /dev/null +++ b/cli/cli/command/node/demote_test.go @@ -0,0 +1,84 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodeDemoteErrors(t *testing.T) { + testCases := []struct { + args []string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + return errors.Errorf("error updating the node") + }, + expectedError: "error updating the node", + }, + } + for _, tc := range testCases { + cmd := newDemoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeDemoteNoChange(t *testing.T) { + cmd := newDemoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleWorker { + return errors.Errorf("expected role worker, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID"}) + assert.NilError(t, cmd.Execute()) +} + +func TestNodeDemoteMultipleNode(t *testing.T) { + cmd := newDemoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleWorker { + return errors.Errorf("expected role worker, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID1", "nodeID2"}) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/node/inspect.go b/cli/cli/command/node/inspect.go new file mode 100644 index 00000000..0dcb5db9 --- /dev/null +++ b/cli/cli/command/node/inspect.go @@ -0,0 +1,72 @@ +package node + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + nodeIds []string + format string + pretty bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] self|NODE [NODE...]", + Short: "Display detailed information on one or more nodes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIds = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + nodeRef, err := Reference(ctx, client, ref) + if err != nil { + return nil, nil, err + } + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + return node, nil, err + } + f := opts.format + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + nodeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNodeFormat(f, false), + } + + if err := formatter.NodeInspectWrite(nodeCtx, opts.nodeIds, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/cli/cli/command/node/inspect_test.go b/cli/cli/command/node/inspect_test.go new file mode 100644 index 00000000..de343b0f --- /dev/null +++ b/cli/cli/command/node/inspect_test.go @@ -0,0 +1,118 @@ +package node + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestNodeInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + nodeInspectFunc func() (swarm.Node, []byte, error) + infoFunc func() (types.Info, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"self"}, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"self"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + infoFunc: func() (types.Info, error) { + return types.Info{Swarm: swarm.Info{NodeID: "abc"}}, nil + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"self"}, + flags: map[string]string{ + "pretty": "true", + }, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + infoFunc: tc.infoFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeInspectPretty(t *testing.T) { + testCases := []struct { + name string + nodeInspectFunc func() (swarm.Node, []byte, error) + }{ + { + name: "simple", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(NodeLabels(map[string]string{ + "lbl1": "value1", + })), []byte{}, nil + }, + }, + { + name: "manager", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + }, + { + name: "manager-leader", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager(Leader())), []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"nodeID"}) + cmd.Flags().Set("pretty", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("node-inspect-pretty.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/node/list.go b/cli/cli/command/node/list.go new file mode 100644 index 00000000..d35ed0ea --- /dev/null +++ b/cli/cli/command/node/list.go @@ -0,0 +1,85 @@ +package node + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type byHostname []swarm.Node + +func (n byHostname) Len() int { return len(n) } +func (n byHostname) Swap(i, j int) { n[i], n[j] = n[j], n[i] } +func (n byHostname) Less(i, j int) bool { + return sortorder.NaturalLess(n[i].Description.Hostname, n[j].Description.Hostname) +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List nodes in the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&options.format, "format", "", "Pretty-print nodes using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + nodes, err := client.NodeList( + ctx, + types.NodeListOptions{Filters: options.filter.Value()}) + if err != nil { + return err + } + + info := types.Info{} + if len(nodes) > 0 && !options.quiet { + // only non-empty nodes and not quiet, should we call /info api + info, err = client.Info(ctx) + if err != nil { + return err + } + } + + format := options.format + if len(format) == 0 { + format = formatter.TableFormatKey + if len(dockerCli.ConfigFile().NodesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().NodesFormat + } + } + + nodesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNodeFormat(format, options.quiet), + } + sort.Sort(byHostname(nodes)) + return formatter.NodeWrite(nodesCtx, nodes, info) +} diff --git a/cli/cli/command/node/list_test.go b/cli/cli/command/node/list_test.go new file mode 100644 index 00000000..5dc11c96 --- /dev/null +++ b/cli/cli/command/node/list_test.go @@ -0,0 +1,141 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodeListErrorOnAPIFailure(t *testing.T) { + testCases := []struct { + nodeListFunc func() ([]swarm.Node, error) + infoFunc func() (types.Info, error) + expectedError string + }{ + { + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{}, errors.Errorf("error listing nodes") + }, + expectedError: "error listing nodes", + }, + { + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + { + ID: "nodeID", + }, + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: tc.nodeListFunc, + infoFunc: tc.infoFunc, + }) + cmd := newListCommand(cli) + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeList(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1"), Hostname("node-2-foo"), Manager(Leader()), EngineVersion(".")), + *Node(NodeID("nodeID2"), Hostname("node-10-foo"), Manager(), EngineVersion("18.03.0-ce")), + *Node(NodeID("nodeID3"), Hostname("node-1-foo")), + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID1", + }, + }, nil + }, + }) + + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "node-list-sort.golden") +} + +func TestNodeListQuietShouldOnlyPrintIDs(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(cli.OutBuffer().String(), "nodeID1\n")) +} + +func TestNodeListDefaultFormatFromConfig(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), + *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), + *Node(NodeID("nodeID3"), Hostname("nodeHostname3")), + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID1", + }, + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + NodesFormat: "{{.ID}}: {{.Hostname}} {{.Status}}/{{.ManagerStatus}}", + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "node-list-format-from-config.golden") +} + +func TestNodeListFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), + *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID1", + }, + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + NodesFormat: "{{.ID}}: {{.Hostname}} {{.Status}}/{{.ManagerStatus}}", + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{.Hostname}}: {{.ManagerStatus}}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "node-list-format-flag.golden") +} diff --git a/cli/cli/command/node/opts.go b/cli/cli/command/node/opts.go new file mode 100644 index 00000000..e30e5de9 --- /dev/null +++ b/cli/cli/command/node/opts.go @@ -0,0 +1,23 @@ +package node + +import ( + "github.com/docker/cli/opts" +) + +type nodeOptions struct { + annotations + role string + availability string +} + +type annotations struct { + labels opts.ListOpts +} + +func newNodeOptions() *nodeOptions { + return &nodeOptions{ + annotations: annotations{ + labels: opts.NewListOpts(nil), + }, + } +} diff --git a/cli/cli/command/node/promote.go b/cli/cli/command/node/promote.go new file mode 100644 index 00000000..4612cc13 --- /dev/null +++ b/cli/cli/command/node/promote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" +) + +func newPromoteCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "promote NODE [NODE...]", + Short: "Promote one or more nodes to manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPromote(dockerCli, args) + }, + } +} + +func runPromote(dockerCli command.Cli, nodes []string) error { + promote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleManager { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleManager + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, promote, success) +} diff --git a/cli/cli/command/node/promote_test.go b/cli/cli/command/node/promote_test.go new file mode 100644 index 00000000..c6b53423 --- /dev/null +++ b/cli/cli/command/node/promote_test.go @@ -0,0 +1,84 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodePromoteErrors(t *testing.T) { + testCases := []struct { + args []string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + return errors.Errorf("error updating the node") + }, + expectedError: "error updating the node", + }, + } + for _, tc := range testCases { + cmd := newPromoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodePromoteNoChange(t *testing.T) { + cmd := newPromoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleManager { + return errors.Errorf("expected role manager, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID"}) + assert.NilError(t, cmd.Execute()) +} + +func TestNodePromoteMultipleNode(t *testing.T) { + cmd := newPromoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleManager { + return errors.Errorf("expected role manager, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID1", "nodeID2"}) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/node/ps.go b/cli/cli/command/node/ps.go new file mode 100644 index 00000000..2450e6af --- /dev/null +++ b/cli/cli/command/node/ps.go @@ -0,0 +1,104 @@ +package node + +import ( + "context" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/command/task" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type psOptions struct { + nodeIDs []string + noResolve bool + noTrunc bool + quiet bool + format string + filter opts.FilterOpt +} + +func newPsCommand(dockerCli command.Cli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] [NODE...]", + Short: "List tasks running on one or more nodes, defaults to current node", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + options.nodeIDs = []string{"self"} + + if len(args) != 0 { + options.nodeIDs = args + } + + return runPs(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&options.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&options.format, "format", "", "Pretty-print tasks using a Go template") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display task IDs") + + return cmd +} + +func runPs(dockerCli command.Cli, options psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var ( + errs []string + tasks []swarm.Task + ) + + for _, nodeID := range options.nodeIDs { + nodeRef, err := Reference(ctx, client, nodeID) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + filter := options.filter.Value() + filter.Add("node", node.ID) + + nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + tasks = append(tasks, nodeTasks...) + } + + format := options.format + if len(format) == 0 { + format = task.DefaultFormat(dockerCli.ConfigFile(), options.quiet) + } + + if len(errs) == 0 || len(tasks) != 0 { + if err := task.Print(ctx, dockerCli, tasks, idresolver.New(client, options.noResolve), !options.noTrunc, options.quiet, format); err != nil { + errs = append(errs, err.Error()) + } + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/node/ps_test.go b/cli/cli/command/node/ps_test.go new file mode 100644 index 00000000..ae5ed616 --- /dev/null +++ b/cli/cli/command/node/ps_test.go @@ -0,0 +1,128 @@ +package node + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestNodePsErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + infoFunc func() (types.Info, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + taskInspectFunc func(taskID string) (swarm.Task, []byte, error) + expectedError string + }{ + { + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{}, errors.Errorf("error returning the task list") + }, + expectedError: "error returning the task list", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + taskInspectFunc: tc.taskInspectFunc, + taskListFunc: tc.taskListFunc, + }) + cmd := newPsCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodePs(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + infoFunc func() (types.Info, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + taskInspectFunc func(taskID string) (swarm.Task, []byte, error) + }{ + { + name: "simple", + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{ + *Task(WithStatus(Timestamp(time.Now().Add(-2*time.Hour)), PortStatus([]swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: "tcp", + }, + }))), + }, nil + }, + }, + { + name: "with-errors", + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{ + *Task(TaskID("taskID1"), TaskServiceID("failure"), + WithStatus(Timestamp(time.Now().Add(-2*time.Hour)), StatusErr("a task error"))), + *Task(TaskID("taskID2"), TaskServiceID("failure"), + WithStatus(Timestamp(time.Now().Add(-3*time.Hour)), StatusErr("a task error"))), + *Task(TaskID("taskID3"), TaskServiceID("failure"), + WithStatus(Timestamp(time.Now().Add(-4*time.Hour)), StatusErr("a task error"))), + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + taskInspectFunc: tc.taskInspectFunc, + taskListFunc: tc.taskListFunc, + }) + cmd := newPsCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("node-ps.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/node/remove.go b/cli/cli/command/node/remove.go new file mode 100644 index 00000000..65e3cdc3 --- /dev/null +++ b/cli/cli/command/node/remove.go @@ -0,0 +1,56 @@ +package node + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + opts := removeOptions{} + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] NODE [NODE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more nodes from the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") + return cmd +} + +func runRemove(dockerCli command.Cli, args []string, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, nodeID := range args { + err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/node/remove_test.go b/cli/cli/command/node/remove_test.go new file mode 100644 index 00000000..8ae01c7b --- /dev/null +++ b/cli/cli/command/node/remove_test.go @@ -0,0 +1,44 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNodeRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + nodeRemoveFunc func() error + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"nodeID"}, + nodeRemoveFunc: func() error { + return errors.Errorf("error removing the node") + }, + expectedError: "error removing the node", + }, + } + for _, tc := range testCases { + cmd := newRemoveCommand( + test.NewFakeCli(&fakeClient{ + nodeRemoveFunc: tc.nodeRemoveFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeRemoveMultiple(t *testing.T) { + cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetArgs([]string{"nodeID1", "nodeID2"}) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden b/cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden new file mode 100644 index 00000000..5cd95c5b --- /dev/null +++ b/cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden @@ -0,0 +1,24 @@ +ID: nodeID +Name: defaultNodeName +Hostname: defaultNodeHostname +Joined at: 2009-11-10 23:00:00 +0000 utc +Status: + State: Ready + Availability: Active + Address: 127.0.0.1 +Manager Status: + Address: 127.0.0.1 + Raft Status: Reachable + Leader: Yes +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 0 + Memory: 20MiB +Plugins: + Network: bridge, overlay + Volume: local +Engine Version: 1.13.0 +Engine Labels: + - engine=label diff --git a/cli/cli/command/node/testdata/node-inspect-pretty.manager.golden b/cli/cli/command/node/testdata/node-inspect-pretty.manager.golden new file mode 100644 index 00000000..a6371829 --- /dev/null +++ b/cli/cli/command/node/testdata/node-inspect-pretty.manager.golden @@ -0,0 +1,24 @@ +ID: nodeID +Name: defaultNodeName +Hostname: defaultNodeHostname +Joined at: 2009-11-10 23:00:00 +0000 utc +Status: + State: Ready + Availability: Active + Address: 127.0.0.1 +Manager Status: + Address: 127.0.0.1 + Raft Status: Reachable + Leader: No +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 0 + Memory: 20MiB +Plugins: + Network: bridge, overlay + Volume: local +Engine Version: 1.13.0 +Engine Labels: + - engine=label diff --git a/cli/cli/command/node/testdata/node-inspect-pretty.simple.golden b/cli/cli/command/node/testdata/node-inspect-pretty.simple.golden new file mode 100644 index 00000000..8aaf9089 --- /dev/null +++ b/cli/cli/command/node/testdata/node-inspect-pretty.simple.golden @@ -0,0 +1,22 @@ +ID: nodeID +Name: defaultNodeName +Labels: + - lbl1=value1 +Hostname: defaultNodeHostname +Joined at: 2009-11-10 23:00:00 +0000 utc +Status: + State: Ready + Availability: Active + Address: 127.0.0.1 +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 0 + Memory: 20MiB +Plugins: + Network: bridge, overlay + Volume: local +Engine Version: 1.13.0 +Engine Labels: + - engine=label diff --git a/cli/cli/command/node/testdata/node-list-format-flag.golden b/cli/cli/command/node/testdata/node-list-format-flag.golden new file mode 100644 index 00000000..c898df13 --- /dev/null +++ b/cli/cli/command/node/testdata/node-list-format-flag.golden @@ -0,0 +1,2 @@ +nodeHostname1: Leader +nodeHostname2: Reachable diff --git a/cli/cli/command/node/testdata/node-list-format-from-config.golden b/cli/cli/command/node/testdata/node-list-format-from-config.golden new file mode 100644 index 00000000..91beb4a2 --- /dev/null +++ b/cli/cli/command/node/testdata/node-list-format-from-config.golden @@ -0,0 +1,3 @@ +nodeID1: nodeHostname1 Ready/Leader +nodeID2: nodeHostname2 Ready/Reachable +nodeID3: nodeHostname3 Ready/ diff --git a/cli/cli/command/node/testdata/node-list-sort.golden b/cli/cli/command/node/testdata/node-list-sort.golden new file mode 100644 index 00000000..ffc09c92 --- /dev/null +++ b/cli/cli/command/node/testdata/node-list-sort.golden @@ -0,0 +1,4 @@ +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION +nodeID3 node-1-foo Ready Active 1.13.0 +nodeID1 * node-2-foo Ready Active Leader . +nodeID2 node-10-foo Ready Active Reachable 18.03.0-ce diff --git a/cli/cli/command/node/testdata/node-ps.simple.golden b/cli/cli/command/node/testdata/node-ps.simple.golden new file mode 100644 index 00000000..b1818b96 --- /dev/null +++ b/cli/cli/command/node/testdata/node-ps.simple.golden @@ -0,0 +1,2 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +taskID rl02d5gwz6chzu7il5fhtb8be.1 myimage:mytag defaultNodeName Ready Ready 2 hours ago *:80->80/tcp diff --git a/cli/cli/command/node/testdata/node-ps.with-errors.golden b/cli/cli/command/node/testdata/node-ps.with-errors.golden new file mode 100644 index 00000000..99e34931 --- /dev/null +++ b/cli/cli/command/node/testdata/node-ps.with-errors.golden @@ -0,0 +1,4 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +taskID1 failure.1 myimage:mytag defaultNodeName Ready Ready 2 hours ago "a task error" +taskID2 \_ failure.1 myimage:mytag defaultNodeName Ready Ready 3 hours ago "a task error" +taskID3 \_ failure.1 myimage:mytag defaultNodeName Ready Ready 4 hours ago "a task error" diff --git a/cli/cli/command/node/update.go b/cli/cli/command/node/update.go new file mode 100644 index 00000000..dbae49c6 --- /dev/null +++ b/cli/cli/command/node/update.go @@ -0,0 +1,120 @@ +package node + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var ( + errNoRoleChange = errors.New("role was already set to the requested value") +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + options := newNodeOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] NODE", + Short: "Update a node", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.role, flagRole, "", `Role of the node ("worker"|"manager")`) + flags.StringVar(&options.availability, flagAvailability, "", `Availability of the node ("active"|"pause"|"drain")`) + flags.Var(&options.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") + labelKeys := opts.NewListOpts(nil) + flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") + return cmd +} + +func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, nodeID string) error { + success := func(_ string) { + fmt.Fprintln(dockerCli.Out(), nodeID) + } + return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) +} + +func updateNodes(dockerCli command.Cli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { + client := dockerCli.Client() + ctx := context.Background() + + for _, nodeID := range nodes { + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + err = mergeNode(&node) + if err != nil { + if err == errNoRoleChange { + continue + } + return err + } + err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) + if err != nil { + return err + } + success(nodeID) + } + return nil +} + +func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { + return func(node *swarm.Node) error { + spec := &node.Spec + + if flags.Changed(flagRole) { + str, err := flags.GetString(flagRole) + if err != nil { + return err + } + spec.Role = swarm.NodeRole(str) + } + if flags.Changed(flagAvailability) { + str, err := flags.GetString(flagAvailability) + if err != nil { + return err + } + spec.Availability = swarm.NodeAvailability(str) + } + if spec.Annotations.Labels == nil { + spec.Annotations.Labels = make(map[string]string) + } + if flags.Changed(flagLabelAdd) { + labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for k, v := range opts.ConvertKVStringsToMap(labels) { + spec.Annotations.Labels[k] = v + } + } + if flags.Changed(flagLabelRemove) { + keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, k := range keys { + // if a key doesn't exist, fail the command explicitly + if _, exists := spec.Annotations.Labels[k]; !exists { + return errors.Errorf("key %s doesn't exist in node's labels", k) + } + delete(spec.Annotations.Labels, k) + } + } + return nil + } +} + +const ( + flagRole = "role" + flagAvailability = "availability" + flagLabelAdd = "label-add" + flagLabelRemove = "label-rm" +) diff --git a/cli/cli/command/node/update_test.go b/cli/cli/command/node/update_test.go new file mode 100644 index 00000000..8b6ae807 --- /dev/null +++ b/cli/cli/command/node/update_test.go @@ -0,0 +1,169 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodeUpdateErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + expectedError string + }{ + { + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"node1", "node2"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + return errors.Errorf("error updating the node") + }, + expectedError: "error updating the node", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(NodeLabels(map[string]string{ + "key": "value", + })), []byte{}, nil + }, + flags: map[string]string{ + "label-rm": "notpresent", + }, + expectedError: "key notpresent doesn't exist in node's labels", + }, + } + for _, tc := range testCases { + cmd := newUpdateCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeUpdate(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + }{ + { + args: []string{"nodeID"}, + flags: map[string]string{ + "role": "manager", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleManager { + return errors.Errorf("expected role manager, got %s", node.Role) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "availability": "drain", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Availability != swarm.NodeAvailabilityDrain { + return errors.Errorf("expected drain availability, got %s", node.Availability) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "label-add": "lbl", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if _, present := node.Annotations.Labels["lbl"]; !present { + return errors.Errorf("expected 'lbl' label, got %v", node.Annotations.Labels) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "label-add": "key=value", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if value, present := node.Annotations.Labels["key"]; !present || value != "value" { + return errors.Errorf("expected 'key' label to be 'value', got %v", node.Annotations.Labels) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "label-rm": "key", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(NodeLabels(map[string]string{ + "key": "value", + })), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if len(node.Annotations.Labels) > 0 { + return errors.Errorf("expected no labels, got %v", node.Annotations.Labels) + } + return nil + }, + }, + } + for _, tc := range testCases { + cmd := newUpdateCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + } +} diff --git a/cli/cli/command/orchestrator.go b/cli/cli/command/orchestrator.go new file mode 100644 index 00000000..5f3e4462 --- /dev/null +++ b/cli/cli/command/orchestrator.go @@ -0,0 +1,77 @@ +package command + +import ( + "fmt" + "io" + "os" +) + +// Orchestrator type acts as an enum describing supported orchestrators. +type Orchestrator string + +const ( + // OrchestratorKubernetes orchestrator + OrchestratorKubernetes = Orchestrator("kubernetes") + // OrchestratorSwarm orchestrator + OrchestratorSwarm = Orchestrator("swarm") + // OrchestratorAll orchestrator + OrchestratorAll = Orchestrator("all") + orchestratorUnset = Orchestrator("unset") + + defaultOrchestrator = OrchestratorSwarm + envVarDockerStackOrchestrator = "DOCKER_STACK_ORCHESTRATOR" + envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR" +) + +// HasKubernetes returns true if defined orchestrator has Kubernetes capabilities. +func (o Orchestrator) HasKubernetes() bool { + return o == OrchestratorKubernetes || o == OrchestratorAll +} + +// HasSwarm returns true if defined orchestrator has Swarm capabilities. +func (o Orchestrator) HasSwarm() bool { + return o == OrchestratorSwarm || o == OrchestratorAll +} + +// HasAll returns true if defined orchestrator has both Swarm and Kubernetes capabilities. +func (o Orchestrator) HasAll() bool { + return o == OrchestratorAll +} + +func normalize(value string) (Orchestrator, error) { + switch value { + case "kubernetes": + return OrchestratorKubernetes, nil + case "swarm": + return OrchestratorSwarm, nil + case "": + return orchestratorUnset, nil + case "all": + return OrchestratorAll, nil + default: + return defaultOrchestrator, fmt.Errorf("specified orchestrator %q is invalid, please use either kubernetes, swarm or all", value) + } +} + +// GetStackOrchestrator checks DOCKER_STACK_ORCHESTRATOR environment variable and configuration file +// orchestrator value and returns user defined Orchestrator. +func GetStackOrchestrator(flagValue, value string, stderr io.Writer) (Orchestrator, error) { + // Check flag + if o, err := normalize(flagValue); o != orchestratorUnset { + return o, err + } + // Check environment variable + env := os.Getenv(envVarDockerStackOrchestrator) + if env == "" && os.Getenv(envVarDockerOrchestrator) != "" { + fmt.Fprintf(stderr, "WARNING: experimental environment variable %s is set. Please use %s instead\n", envVarDockerOrchestrator, envVarDockerStackOrchestrator) + } + if o, err := normalize(env); o != orchestratorUnset { + return o, err + } + // Check specified orchestrator + if o, err := normalize(value); o != orchestratorUnset { + return o, err + } + // Nothing set, use default orchestrator + return defaultOrchestrator, nil +} diff --git a/cli/cli/command/orchestrator_test.go b/cli/cli/command/orchestrator_test.go new file mode 100644 index 00000000..322e8a91 --- /dev/null +++ b/cli/cli/command/orchestrator_test.go @@ -0,0 +1,118 @@ +package command + +import ( + "io/ioutil" + "os" + "testing" + + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/flags" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/env" + "gotest.tools/fs" +) + +func TestOrchestratorSwitch(t *testing.T) { + defaultVersion := "v0.00" + + var testcases = []struct { + doc string + configfile string + envOrchestrator string + flagOrchestrator string + expectedOrchestrator string + expectedKubernetes bool + expectedSwarm bool + }{ + { + doc: "default", + configfile: `{ + }`, + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + { + doc: "kubernetesConfigFile", + configfile: `{ + "stackOrchestrator": "kubernetes" + }`, + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + expectedSwarm: false, + }, + { + doc: "kubernetesEnv", + configfile: `{ + }`, + envOrchestrator: "kubernetes", + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + expectedSwarm: false, + }, + { + doc: "kubernetesFlag", + configfile: `{ + }`, + flagOrchestrator: "kubernetes", + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + expectedSwarm: false, + }, + { + doc: "allOrchestratorFlag", + configfile: `{ + }`, + flagOrchestrator: "all", + expectedOrchestrator: "all", + expectedKubernetes: true, + expectedSwarm: true, + }, + { + doc: "envOverridesConfigFile", + configfile: `{ + "stackOrchestrator": "kubernetes" + }`, + envOrchestrator: "swarm", + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + { + doc: "flagOverridesEnv", + configfile: `{ + }`, + envOrchestrator: "kubernetes", + flagOrchestrator: "swarm", + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + dir := fs.NewDir(t, testcase.doc, fs.WithFile("config.json", testcase.configfile)) + defer dir.Remove() + apiclient := &fakeClient{ + version: defaultVersion, + } + if testcase.envOrchestrator != "" { + defer env.Patch(t, "DOCKER_STACK_ORCHESTRATOR", testcase.envOrchestrator)() + } + + cli := &DockerCli{client: apiclient, err: os.Stderr} + cliconfig.SetDir(dir.Path()) + options := flags.NewClientOptions() + err := cli.Initialize(options) + assert.NilError(t, err) + + orchestrator, err := GetStackOrchestrator(testcase.flagOrchestrator, cli.ConfigFile().StackOrchestrator, ioutil.Discard) + assert.NilError(t, err) + assert.Check(t, is.Equal(testcase.expectedKubernetes, orchestrator.HasKubernetes())) + assert.Check(t, is.Equal(testcase.expectedSwarm, orchestrator.HasSwarm())) + assert.Check(t, is.Equal(testcase.expectedOrchestrator, string(orchestrator))) + }) + } +} diff --git a/cli/cli/command/out.go b/cli/cli/command/out.go new file mode 100644 index 00000000..89cc5d3a --- /dev/null +++ b/cli/cli/command/out.go @@ -0,0 +1,50 @@ +package command + +import ( + "io" + "os" + + "github.com/docker/docker/pkg/term" + "github.com/sirupsen/logrus" +) + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +type OutStream struct { + CommonStream + out io.Writer +} + +func (o *OutStream) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// SetRawTerminal sets raw mode on the input terminal +func (o *OutStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal { + return nil + } + o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd) + return err +} + +// GetTtySize returns the height and width in characters of the tty +func (o *OutStream) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOutStream returns a new OutStream object from a Writer +func NewOutStream(out io.Writer) *OutStream { + fd, isTerminal := term.GetFdInfo(out) + return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out} +} diff --git a/cli/cli/command/plugin/client_test.go b/cli/cli/command/plugin/client_test.go new file mode 100644 index 00000000..07b4a06e --- /dev/null +++ b/cli/cli/command/plugin/client_test.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + pluginCreateFunc func(createContext io.Reader, createOptions types.PluginCreateOptions) error + pluginDisableFunc func(name string, disableOptions types.PluginDisableOptions) error + pluginEnableFunc func(name string, options types.PluginEnableOptions) error + pluginRemoveFunc func(name string, options types.PluginRemoveOptions) error +} + +func (c *fakeClient) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + if c.pluginCreateFunc != nil { + return c.pluginCreateFunc(createContext, createOptions) + } + return nil +} + +func (c *fakeClient) PluginEnable(ctx context.Context, name string, enableOptions types.PluginEnableOptions) error { + if c.pluginEnableFunc != nil { + return c.pluginEnableFunc(name, enableOptions) + } + return nil +} + +func (c *fakeClient) PluginDisable(context context.Context, name string, disableOptions types.PluginDisableOptions) error { + if c.pluginDisableFunc != nil { + return c.pluginDisableFunc(name, disableOptions) + } + return nil +} + +func (c *fakeClient) PluginRemove(context context.Context, name string, removeOptions types.PluginRemoveOptions) error { + if c.pluginRemoveFunc != nil { + return c.pluginRemoveFunc(name, removeOptions) + } + return nil +} diff --git a/cli/cli/command/plugin/cmd.go b/cli/cli/command/plugin/cmd.go new file mode 100644 index 00000000..2e79ab1d --- /dev/null +++ b/cli/cli/command/plugin/cmd.go @@ -0,0 +1,32 @@ +package plugin + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewPluginCommand returns a cobra command for `plugin` subcommands +func NewPluginCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "plugin", + Short: "Manage plugins", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{"version": "1.25"}, + } + + cmd.AddCommand( + newDisableCommand(dockerCli), + newEnableCommand(dockerCli), + newInspectCommand(dockerCli), + newInstallCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newSetCommand(dockerCli), + newPushCommand(dockerCli), + newCreateCommand(dockerCli), + newUpgradeCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/plugin/create.go b/cli/cli/command/plugin/create.go new file mode 100644 index 00000000..d6550eda --- /dev/null +++ b/cli/cli/command/plugin/create.go @@ -0,0 +1,128 @@ +package plugin + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// validateTag checks if the given repoName can be resolved. +func validateTag(rawRepo string) error { + _, err := reference.ParseNormalizedNamed(rawRepo) + + return err +} + +// validateConfig ensures that a valid config.json is available in the given path +func validateConfig(path string) error { + dt, err := os.Open(filepath.Join(path, "config.json")) + if err != nil { + return err + } + + m := types.PluginConfig{} + err = json.NewDecoder(dt).Decode(&m) + dt.Close() + + return err +} + +// validateContextDir validates the given dir and returns abs path on success. +func validateContextDir(contextDir string) (string, error) { + absContextDir, err := filepath.Abs(contextDir) + if err != nil { + return "", err + } + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", err + } + + if !stat.IsDir() { + return "", errors.Errorf("context must be a directory") + } + + return absContextDir, nil +} + +type pluginCreateOptions struct { + repoName string + context string + compress bool +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := pluginCreateOptions{} + + cmd := &cobra.Command{ + Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", + Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.repoName = args[0] + options.context = args[1] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") + + return cmd +} + +func runCreate(dockerCli command.Cli, options pluginCreateOptions) error { + var ( + createCtx io.ReadCloser + err error + ) + + if err := validateTag(options.repoName); err != nil { + return err + } + + absContextDir, err := validateContextDir(options.context) + if err != nil { + return err + } + + if err := validateConfig(options.context); err != nil { + return err + } + + compression := archive.Uncompressed + if options.compress { + logrus.Debugf("compression enabled") + compression = archive.Gzip + } + + createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ + Compression: compression, + }) + + if err != nil { + return err + } + + ctx := context.Background() + + createOptions := types.PluginCreateOptions{RepoName: options.repoName} + if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), options.repoName) + return nil +} diff --git a/cli/cli/command/plugin/create_test.go b/cli/cli/command/plugin/create_test.go new file mode 100644 index 00000000..bef002c0 --- /dev/null +++ b/cli/cli/command/plugin/create_test.go @@ -0,0 +1,123 @@ +package plugin + +import ( + "fmt" + "io" + "io/ioutil" + "runtime" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestCreateErrors(t *testing.T) { + noSuchFile := "no such file or directory" + if runtime.GOOS == "windows" { + noSuchFile = "The system cannot find the file specified." + } + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 2 arguments", + }, + { + args: []string{"INVALID_TAG", "context-dir"}, + expectedError: "invalid", + }, + { + args: []string{"plugin-foo", "nonexistent_context_dir"}, + expectedError: noSuchFile, + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCreateErrorOnFileAsContextDir(t *testing.T) { + tmpFile := fs.NewFile(t, "file-as-context-dir") + defer tmpFile.Remove() + + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpFile.Path()}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "context must be a directory") +} + +func TestCreateErrorOnContextDirWithoutConfig(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test") + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + cmd.SetOutput(ioutil.Discard) + + expectedErr := "config.json: no such file or directory" + if runtime.GOOS == "windows" { + expectedErr = "config.json: The system cannot find the file specified." + } + assert.ErrorContains(t, cmd.Execute(), expectedErr) +} + +func TestCreateErrorOnInvalidConfig(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test", + fs.WithDir("rootfs"), + fs.WithFile("config.json", "invalid-config-contents")) + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "invalid") +} + +func TestCreateErrorFromDaemon(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test", + fs.WithDir("rootfs"), + fs.WithFile("config.json", `{ "Name": "plugin-foo" }`)) + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{ + pluginCreateFunc: func(createContext io.Reader, createOptions types.PluginCreateOptions) error { + return fmt.Errorf("Error creating plugin") + }, + }) + + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "Error creating plugin") +} + +func TestCreatePlugin(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test", + fs.WithDir("rootfs"), + fs.WithFile("config.json", `{ "Name": "plugin-foo" }`)) + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{ + pluginCreateFunc: func(createContext io.Reader, createOptions types.PluginCreateOptions) error { + return nil + }, + }) + + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/disable.go b/cli/cli/command/plugin/disable.go new file mode 100644 index 00000000..014d86b2 --- /dev/null +++ b/cli/cli/command/plugin/disable.go @@ -0,0 +1,36 @@ +package plugin + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +func newDisableCommand(dockerCli command.Cli) *cobra.Command { + var force bool + + cmd := &cobra.Command{ + Use: "disable [OPTIONS] PLUGIN", + Short: "Disable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDisable(dockerCli, args[0], force) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") + return cmd +} + +func runDisable(dockerCli command.Cli, name string, force bool) error { + if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/cli/cli/command/plugin/disable_test.go b/cli/cli/command/plugin/disable_test.go new file mode 100644 index 00000000..c9292965 --- /dev/null +++ b/cli/cli/command/plugin/disable_test.go @@ -0,0 +1,58 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPluginDisableErrors(t *testing.T) { + testCases := []struct { + args []string + expectedError string + pluginDisableFunc func(name string, disableOptions types.PluginDisableOptions) error + }{ + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"plugin-foo"}, + expectedError: "Error disabling plugin", + pluginDisableFunc: func(name string, disableOptions types.PluginDisableOptions) error { + return fmt.Errorf("Error disabling plugin") + }, + }, + } + + for _, tc := range testCases { + cmd := newDisableCommand( + test.NewFakeCli(&fakeClient{ + pluginDisableFunc: tc.pluginDisableFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestPluginDisable(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + pluginDisableFunc: func(name string, disableOptions types.PluginDisableOptions) error { + return nil + }, + }) + cmd := newDisableCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/enable.go b/cli/cli/command/plugin/enable.go new file mode 100644 index 00000000..19df1e7b --- /dev/null +++ b/cli/cli/command/plugin/enable.go @@ -0,0 +1,48 @@ +package plugin + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type enableOpts struct { + timeout int + name string +} + +func newEnableCommand(dockerCli command.Cli) *cobra.Command { + var opts enableOpts + + cmd := &cobra.Command{ + Use: "enable [OPTIONS] PLUGIN", + Short: "Enable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runEnable(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVar(&opts.timeout, "timeout", 30, "HTTP client timeout (in seconds)") + return cmd +} + +func runEnable(dockerCli command.Cli, opts *enableOpts) error { + name := opts.name + if opts.timeout < 0 { + return errors.Errorf("negative timeout %d is invalid", opts.timeout) + } + + if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/cli/cli/command/plugin/enable_test.go b/cli/cli/command/plugin/enable_test.go new file mode 100644 index 00000000..933ff5de --- /dev/null +++ b/cli/cli/command/plugin/enable_test.go @@ -0,0 +1,70 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPluginEnableErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + pluginEnableFunc func(name string, options types.PluginEnableOptions) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"too-many", "arguments"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"plugin-foo"}, + pluginEnableFunc: func(name string, options types.PluginEnableOptions) error { + return fmt.Errorf("failed to enable plugin") + }, + expectedError: "failed to enable plugin", + }, + { + args: []string{"plugin-foo"}, + flags: map[string]string{ + "timeout": "-1", + }, + expectedError: "negative timeout -1 is invalid", + }, + } + + for _, tc := range testCases { + cmd := newEnableCommand( + test.NewFakeCli(&fakeClient{ + pluginEnableFunc: tc.pluginEnableFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestPluginEnable(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + pluginEnableFunc: func(name string, options types.PluginEnableOptions) error { + return nil + }, + }) + + cmd := newEnableCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/inspect.go b/cli/cli/command/plugin/inspect.go new file mode 100644 index 00000000..9ce49eb9 --- /dev/null +++ b/cli/cli/command/plugin/inspect.go @@ -0,0 +1,43 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + pluginNames []string + format string +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Display detailed information on one or more plugins", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.pluginNames = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + return client.PluginInspectWithRaw(ctx, ref) + } + + return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) +} diff --git a/cli/cli/command/plugin/install.go b/cli/cli/command/plugin/install.go new file mode 100644 index 00000000..44e007f6 --- /dev/null +++ b/cli/cli/command/plugin/install.go @@ -0,0 +1,174 @@ +package plugin + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type pluginOptions struct { + remote string + localName string + grantPerms bool + disable bool + args []string + skipRemoteCheck bool + untrusted bool +} + +func loadPullFlags(dockerCli command.Cli, opts *pluginOptions, flags *pflag.FlagSet) { + flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) +} + +func newInstallCommand(dockerCli command.Cli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", + Short: "Install a plugin", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remote = args[0] + if len(args) > 1 { + options.args = args[1:] + } + return runInstall(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(dockerCli, &options, flags) + flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") + flags.StringVar(&options.localName, "alias", "", "Local name for plugin") + return cmd +} + +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (*registry.RepositoryInfo, error) { + repoInfo, err := s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return repoInfo, err +} + +func newRegistryService() (registry.Service, error) { + svc, err := registry.NewService(registry.ServiceOptions{V2Only: true}) + if err != nil { + return nil, err + } + return pluginRegistryService{Service: svc}, nil +} + +func buildPullConfig(ctx context.Context, dockerCli command.Cli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { + // Names with both tag and digest will be treated by the daemon + // as a pull by digest with a local name for the tag + // (if no local name is provided). + ref, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return types.PluginInstallOptions{}, err + } + + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + + remote := ref.String() + + _, isCanonical := ref.(reference.Canonical) + if !opts.untrusted && !isCanonical { + ref = reference.TagNameOnly(ref) + nt, ok := ref.(reference.NamedTagged) + if !ok { + return types.PluginInstallOptions{}, errors.Errorf("invalid name: %s", ref.String()) + } + + ctx := context.Background() + svc, err := newRegistryService() + if err != nil { + return types.PluginInstallOptions{}, err + } + trusted, err := image.TrustedReference(ctx, dockerCli, nt, svc) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote = reference.FamiliarString(trusted) + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return types.PluginInstallOptions{}, err + } + registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, cmdName) + + options := types.PluginInstallOptions{ + RegistryAuth: encodedAuth, + RemoteRef: remote, + Disabled: opts.disable, + AcceptAllPermissions: opts.grantPerms, + AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), + PrivilegeFunc: registryAuthFunc, + Args: opts.args, + } + return options, nil +} + +func runInstall(dockerCli command.Cli, opts pluginOptions) error { + var localName string + if opts.localName != "" { + aref, err := reference.ParseNormalizedNamed(opts.localName) + if err != nil { + return err + } + if _, ok := aref.(reference.Canonical); ok { + return errors.Errorf("invalid name: %s", opts.localName) + } + localName = reference.FamiliarString(reference.TagNameOnly(aref)) + } + + ctx := context.Background() + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) + if err != nil { + if strings.Contains(err.Error(), "(image) when fetching") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result + return nil +} + +func acceptPrivileges(dockerCli command.Cli, name string) func(privileges types.PluginPrivileges) (bool, error) { + return func(privileges types.PluginPrivileges) (bool, error) { + fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) + for _, privilege := range privileges { + fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) + } + return command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "Do you grant the above permissions?"), nil + } +} diff --git a/cli/cli/command/plugin/list.go b/cli/cli/command/plugin/list.go new file mode 100644 index 00000000..efbb0ffe --- /dev/null +++ b/cli/cli/command/plugin/list.go @@ -0,0 +1,64 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Short: "List plugins", + Aliases: []string{"list"}, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display plugin IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.StringVar(&options.format, "format", "", "Pretty-print plugins using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'enabled=true')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + plugins, err := dockerCli.Client().PluginList(context.Background(), options.filter.Value()) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PluginsFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().PluginsFormat + } else { + format = formatter.TableFormatKey + } + } + + pluginsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewPluginFormat(format, options.quiet), + Trunc: !options.noTrunc, + } + return formatter.PluginWrite(pluginsCtx, plugins) +} diff --git a/cli/cli/command/plugin/push.go b/cli/cli/command/plugin/push.go new file mode 100644 index 00000000..7df5a89d --- /dev/null +++ b/cli/cli/command/plugin/push.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type pushOptions struct { + name string + untrusted bool +} + +func newPushCommand(dockerCli command.Cli) *cobra.Command { + var opts pushOptions + cmd := &cobra.Command{ + Use: "push [OPTIONS] PLUGIN[:TAG]", + Short: "Push a plugin to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runPush(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + command.AddTrustSigningFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + + return cmd +} + +func runPush(dockerCli command.Cli, opts pushOptions) error { + named, err := reference.ParseNormalizedNamed(opts.name) + if err != nil { + return err + } + if _, ok := named.(reference.Canonical); ok { + return errors.Errorf("invalid name: %s", opts.name) + } + + named = reference.TagNameOnly(named) + + ctx := context.Background() + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return err + } + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginPush(ctx, reference.FamiliarString(named), encodedAuth) + if err != nil { + return err + } + defer responseBody.Close() + + if !opts.untrusted { + repoInfo.Class = "plugin" + return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) + } + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/cli/cli/command/plugin/remove.go b/cli/cli/command/plugin/remove.go new file mode 100644 index 00000000..a2092bd7 --- /dev/null +++ b/cli/cli/command/plugin/remove.go @@ -0,0 +1,54 @@ +package plugin + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type rmOptions struct { + force bool + + plugins []string +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Remove one or more plugins", + Aliases: []string{"remove"}, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.plugins = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") + return cmd +} + +func runRemove(dockerCli command.Cli, opts *rmOptions) error { + ctx := context.Background() + + var errs cli.Errors + for _, name := range opts.plugins { + if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { + errs = append(errs, err) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. + if errs != nil { + return errs + } + return nil +} diff --git a/cli/cli/command/plugin/remove_test.go b/cli/cli/command/plugin/remove_test.go new file mode 100644 index 00000000..4cfec433 --- /dev/null +++ b/cli/cli/command/plugin/remove_test.go @@ -0,0 +1,71 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRemoveErrors(t *testing.T) { + + testCases := []struct { + args []string + pluginRemoveFunc func(name string, options types.PluginRemoveOptions) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 1 argument", + }, + { + args: []string{"plugin-foo"}, + pluginRemoveFunc: func(name string, options types.PluginRemoveOptions) error { + return fmt.Errorf("Error removing plugin") + }, + expectedError: "Error removing plugin", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + pluginRemoveFunc: tc.pluginRemoveFunc, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestRemove(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + pluginRemoveFunc: func(name string, options types.PluginRemoveOptions) error { + return nil + }, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} + +func TestRemoveWithForceOption(t *testing.T) { + force := false + cli := test.NewFakeCli(&fakeClient{ + pluginRemoveFunc: func(name string, options types.PluginRemoveOptions) error { + force = options.Force + return nil + }, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + cmd.Flags().Set("force", "true") + assert.NilError(t, cmd.Execute()) + assert.Check(t, force) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/set.go b/cli/cli/command/plugin/set.go new file mode 100644 index 00000000..724fdebf --- /dev/null +++ b/cli/cli/command/plugin/set.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +func newSetCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", + Short: "Change settings for a plugin", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) + }, + } + + return cmd +} diff --git a/cli/cli/command/plugin/upgrade.go b/cli/cli/command/plugin/upgrade.go new file mode 100644 index 00000000..f5afb509 --- /dev/null +++ b/cli/cli/command/plugin/upgrade.go @@ -0,0 +1,90 @@ +package plugin + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newUpgradeCommand(dockerCli command.Cli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", + Short: "Upgrade an existing plugin", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.localName = args[0] + if len(args) == 2 { + options.remote = args[1] + } + return runUpgrade(dockerCli, options) + }, + Annotations: map[string]string{"version": "1.26"}, + } + + flags := cmd.Flags() + loadPullFlags(dockerCli, &options, flags) + flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") + return cmd +} + +func runUpgrade(dockerCli command.Cli, opts pluginOptions) error { + ctx := context.Background() + p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) + if err != nil { + return errors.Errorf("error reading plugin data: %v", err) + } + + if p.Enabled { + return errors.Errorf("the plugin must be disabled before upgrading") + } + + opts.localName = p.Name + if opts.remote == "" { + opts.remote = p.PluginReference + } + remote, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return errors.Wrap(err, "error parsing remote upgrade image reference") + } + remote = reference.TagNameOnly(remote) + + old, err := reference.ParseNormalizedNamed(p.PluginReference) + if err != nil { + return errors.Wrap(err, "error parsing current image reference") + } + old = reference.TagNameOnly(old) + + fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, reference.FamiliarString(old), reference.FamiliarString(remote)) + if !opts.skipRemoteCheck && remote.String() != old.String() { + if !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "Plugin images do not match, are you sure?") { + return errors.New("canceling upgrade request") + } + } + + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result + return nil +} diff --git a/cli/cli/command/registry.go b/cli/cli/command/registry.go new file mode 100644 index 00000000..084d2b60 --- /dev/null +++ b/cli/cli/command/registry.go @@ -0,0 +1,199 @@ +package command + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli Cli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.Client().Info(ctx); err != nil { + fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else if info.IndexServerAddress == "" { + fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry) + if err != nil { + fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err) + } + err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(*authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.ConfigFile().GetAuthConfig(configKey) + return a +} + +// GetDefaultAuthConfig gets the default auth config given a serverAddress +// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it +func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) { + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + var authconfig types.AuthConfig + var err error + if checkCredStore { + authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress) + } else { + authconfig = types.AuthConfig{} + } + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + return &authconfig, err +} + +// ConfigureAuth handles prompting of user's username and password if needed +func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthConfig, isDefaultRegistry bool) error { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.SetIn(NewInStream(os.Stdin)) + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return errors.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return errors.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + + return nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNormalizedNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/cli/cli/command/registry/login.go b/cli/cli/command/registry/login.go new file mode 100644 index 00000000..7d5328d1 --- /dev/null +++ b/cli/cli/command/registry/login.go @@ -0,0 +1,169 @@ +package registry + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/client" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +const unencryptedWarning = `WARNING! Your password will be stored unencrypted in %s. +Configure a credential helper to remove this warning. See +https://docs.docker.com/engine/reference/commandline/login/#credentials-store +` + +type loginOptions struct { + serverAddress string + user string + password string + passwordStdin bool +} + +// NewLoginCommand creates a new `docker login` command +func NewLoginCommand(dockerCli command.Cli) *cobra.Command { + var opts loginOptions + + cmd := &cobra.Command{ + Use: "login [OPTIONS] [SERVER]", + Short: "Log in to a Docker registry", + Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.serverAddress = args[0] + } + return runLogin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.user, "username", "u", "", "Username") + flags.StringVarP(&opts.password, "password", "p", "", "Password") + flags.BoolVarP(&opts.passwordStdin, "password-stdin", "", false, "Take the password from stdin") + + return cmd +} + +// displayUnencryptedWarning warns the user when using an insecure credential storage. +// After a deprecation period, user will get prompted if stdin and stderr are a terminal. +// Otherwise, we'll assume they want it (sadly), because people may have been scripting +// insecure logins and we don't want to break them. Maybe they'll see the warning in their +// logs and fix things. +func displayUnencryptedWarning(dockerCli command.Streams, filename string) error { + _, err := fmt.Fprintln(dockerCli.Err(), fmt.Sprintf(unencryptedWarning, filename)) + + return err +} + +type isFileStore interface { + IsFileStore() bool + GetFilename() string +} + +func verifyloginOptions(dockerCli command.Cli, opts *loginOptions) error { + if opts.password != "" { + fmt.Fprintln(dockerCli.Err(), "WARNING! Using --password via the CLI is insecure. Use --password-stdin.") + if opts.passwordStdin { + return errors.New("--password and --password-stdin are mutually exclusive") + } + } + + if opts.passwordStdin { + if opts.user == "" { + return errors.New("Must provide --username with --password-stdin") + } + + contents, err := ioutil.ReadAll(dockerCli.In()) + if err != nil { + return err + } + + opts.password = strings.TrimSuffix(string(contents), "\n") + opts.password = strings.TrimSuffix(opts.password, "\r") + } + return nil +} + +func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocyclo + ctx := context.Background() + clnt := dockerCli.Client() + if err := verifyloginOptions(dockerCli, &opts); err != nil { + return err + } + var ( + serverAddress string + authServer = command.ElectAuthServer(ctx, dockerCli) + ) + if opts.serverAddress != "" && opts.serverAddress != registry.DefaultNamespace { + serverAddress = opts.serverAddress + } else { + serverAddress = authServer + } + + var err error + var authConfig *types.AuthConfig + var response registrytypes.AuthenticateOKBody + isDefaultRegistry := serverAddress == authServer + authConfig, err = command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry) + if err == nil && authConfig.Username != "" && authConfig.Password != "" { + response, err = loginWithCredStoreCreds(ctx, dockerCli, authConfig) + } + if err != nil || authConfig.Username == "" || authConfig.Password == "" { + err = command.ConfigureAuth(dockerCli, opts.user, opts.password, authConfig, isDefaultRegistry) + if err != nil { + return err + } + + response, err = clnt.RegistryLogin(ctx, *authConfig) + if err != nil { + return err + } + } + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + + creds := dockerCli.ConfigFile().GetCredentialsStore(serverAddress) + + store, isDefault := creds.(isFileStore) + if isDefault { + err = displayUnencryptedWarning(dockerCli, store.GetFilename()) + if err != nil { + return err + } + } + + if err := creds.Store(*authConfig); err != nil { + return errors.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(dockerCli.Out(), response.Status) + } + return nil +} + +func loginWithCredStoreCreds(ctx context.Context, dockerCli command.Cli, authConfig *types.AuthConfig) (registrytypes.AuthenticateOKBody, error) { + fmt.Fprintf(dockerCli.Out(), "Authenticating with existing credentials...\n") + cliClient := dockerCli.Client() + response, err := cliClient.RegistryLogin(ctx, *authConfig) + if err != nil { + if client.IsErrUnauthorized(err) { + fmt.Fprintf(dockerCli.Err(), "Stored credentials invalid or expired\n") + } else { + fmt.Fprintf(dockerCli.Err(), "Login did not succeed, error: %s\n", err) + } + } + return response, err +} diff --git a/cli/cli/command/registry/login_test.go b/cli/cli/command/registry/login_test.go new file mode 100644 index 00000000..7e774941 --- /dev/null +++ b/cli/cli/command/registry/login_test.go @@ -0,0 +1,157 @@ +package registry + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/client" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +const userErr = "userunknownError" +const testAuthErrMsg = "UNKNOWN_ERR" + +var testAuthErrors = map[string]error{ + userErr: fmt.Errorf(testAuthErrMsg), +} + +var expiredPassword = "I_M_EXPIRED" + +type fakeClient struct { + client.Client +} + +// nolint: unparam +func (c fakeClient) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) { + if auth.Password == expiredPassword { + return registrytypes.AuthenticateOKBody{}, fmt.Errorf("Invalid Username or Password") + } + err := testAuthErrors[auth.Username] + return registrytypes.AuthenticateOKBody{}, err +} + +func TestLoginWithCredStoreCreds(t *testing.T) { + testCases := []struct { + inputAuthConfig types.AuthConfig + expectedMsg string + expectedErr string + }{ + { + inputAuthConfig: types.AuthConfig{}, + expectedMsg: "Authenticating with existing credentials...\n", + }, + { + inputAuthConfig: types.AuthConfig{ + Username: userErr, + }, + expectedMsg: "Authenticating with existing credentials...\n", + expectedErr: fmt.Sprintf("Login did not succeed, error: %s\n", testAuthErrMsg), + }, + // can't easily test the 401 case because client.IsErrUnauthorized(err) involving + // creating an error of a private type + } + ctx := context.Background() + for _, tc := range testCases { + cli := (*test.FakeCli)(test.NewFakeCli(&fakeClient{})) + errBuf := new(bytes.Buffer) + cli.SetErr(errBuf) + loginWithCredStoreCreds(ctx, cli, &tc.inputAuthConfig) + outputString := cli.OutBuffer().String() + assert.Check(t, is.Equal(tc.expectedMsg, outputString)) + errorString := errBuf.String() + assert.Check(t, is.Equal(tc.expectedErr, errorString)) + } +} + +func TestRunLogin(t *testing.T) { + const storedServerAddress = "reg1" + const validUsername = "u1" + const validPassword = "p1" + const validPassword2 = "p2" + + validAuthConfig := types.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + Password: validPassword, + } + expiredAuthConfig := types.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + Password: expiredPassword, + } + testCases := []struct { + inputLoginOption loginOptions + inputStoredCred *types.AuthConfig + expectedErr string + expectedSavedCred types.AuthConfig + }{ + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + }, + inputStoredCred: &validAuthConfig, + expectedErr: "", + expectedSavedCred: validAuthConfig, + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + }, + inputStoredCred: &expiredAuthConfig, + expectedErr: "Error: Cannot perform an interactive login from a non TTY device", + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + user: validUsername, + password: validPassword2, + }, + inputStoredCred: &validAuthConfig, + expectedErr: "", + expectedSavedCred: types.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + Password: validPassword2, + }, + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + user: userErr, + password: validPassword, + }, + inputStoredCred: &validAuthConfig, + expectedErr: testAuthErrMsg, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + tmpFile := fs.NewFile(t, "test-run-login") + defer tmpFile.Remove() + cli := test.NewFakeCli(&fakeClient{}) + configfile := cli.ConfigFile() + configfile.Filename = tmpFile.Path() + + if tc.inputStoredCred != nil { + cred := *tc.inputStoredCred + configfile.GetCredentialsStore(cred.ServerAddress).Store(cred) + } + loginErr := runLogin(cli, tc.inputLoginOption) + if tc.expectedErr != "" { + assert.Error(t, loginErr, tc.expectedErr) + return + } + assert.NilError(t, loginErr) + savedCred, credStoreErr := configfile.GetCredentialsStore(tc.inputStoredCred.ServerAddress).Get(tc.inputStoredCred.ServerAddress) + assert.Check(t, credStoreErr) + assert.DeepEqual(t, tc.expectedSavedCred, savedCred) + }) + } +} diff --git a/cli/cli/command/registry/logout.go b/cli/cli/command/registry/logout.go new file mode 100644 index 00000000..ac84139f --- /dev/null +++ b/cli/cli/command/registry/logout.go @@ -0,0 +1,76 @@ +package registry + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewLogoutCommand creates a new `docker logout` command +func NewLogoutCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout [SERVER]", + Short: "Log out from a Docker registry", + Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var serverAddress string + if len(args) > 0 { + serverAddress = args[0] + } + return runLogout(dockerCli, serverAddress) + }, + } + + return cmd +} + +func runLogout(dockerCli command.Cli, serverAddress string) error { + ctx := context.Background() + var isDefaultRegistry bool + + if serverAddress == "" { + serverAddress = command.ElectAuthServer(ctx, dockerCli) + isDefaultRegistry = true + } + + var ( + loggedIn bool + regsToLogout []string + hostnameAddress = serverAddress + regsToTry = []string{serverAddress} + ) + if !isDefaultRegistry { + hostnameAddress = registry.ConvertToHostname(serverAddress) + // the tries below are kept for backward compatibility where a user could have + // saved the registry in one of the following format. + regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + for _, s := range regsToTry { + if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { + loggedIn = true + regsToLogout = append(regsToLogout, s) + } + } + + if !loggedIn { + fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) + return nil + } + + fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) + for _, r := range regsToLogout { + if err := dockerCli.ConfigFile().GetCredentialsStore(r).Erase(r); err != nil { + fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) + } + } + + return nil +} diff --git a/cli/cli/command/registry/search.go b/cli/cli/command/registry/search.go new file mode 100644 index 00000000..1af6fcd5 --- /dev/null +++ b/cli/cli/command/registry/search.go @@ -0,0 +1,104 @@ +package registry + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type searchOptions struct { + format string + term string + noTrunc bool + limit int + filter opts.FilterOpt + + // Deprecated + stars uint + automated bool +} + +// NewSearchCommand creates a new `docker search` command +func NewSearchCommand(dockerCli command.Cli) *cobra.Command { + options := searchOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "search [OPTIONS] TERM", + Short: "Search the Docker Hub for images", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.term = args[0] + return runSearch(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.IntVar(&options.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") + flags.StringVar(&options.format, "format", "", "Pretty-print search using a Go template") + + flags.BoolVar(&options.automated, "automated", false, "Only show automated builds") + flags.UintVarP(&options.stars, "stars", "s", 0, "Only displays with at least x stars") + + flags.MarkDeprecated("automated", "use --filter=is-automated=true instead") + flags.MarkDeprecated("stars", "use --filter=stars=3 instead") + + return cmd +} + +func runSearch(dockerCli command.Cli, options searchOptions) error { + indexInfo, err := registry.ParseSearchIndexInfo(options.term) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + searchOptions := types.ImageSearchOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + Filters: options.filter.Value(), + Limit: options.limit, + } + + clnt := dockerCli.Client() + + unorderedResults, err := clnt.ImageSearch(ctx, options.term, searchOptions) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + searchCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewSearchFormat(options.format), + Trunc: !options.noTrunc, + } + return formatter.SearchWrite(searchCtx, results, options.automated, int(options.stars)) +} + +// searchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/cli/cli/command/registry_test.go b/cli/cli/command/registry_test.go new file mode 100644 index 00000000..8c9f5835 --- /dev/null +++ b/cli/cli/command/registry_test.go @@ -0,0 +1,147 @@ +package command_test + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + + // Prevents a circular import with "github.com/docker/cli/internal/test" + + . "github.com/docker/cli/cli/command" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + infoFunc func() (types.Info, error) +} + +var testAuthConfigs = []types.AuthConfig{ + { + ServerAddress: "https://index.docker.io/v1/", + Username: "u0", + Password: "p0", + }, + { + ServerAddress: "server1.io", + Username: "u1", + Password: "p1", + }, +} + +func (cli *fakeClient) Info(_ context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func TestElectAuthServer(t *testing.T) { + testCases := []struct { + expectedAuthServer string + expectedWarning string + infoFunc func() (types.Info, error) + }{ + { + expectedAuthServer: "https://index.docker.io/v1/", + expectedWarning: "", + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: "https://index.docker.io/v1/"}, nil + }, + }, + { + expectedAuthServer: "https://index.docker.io/v1/", + expectedWarning: "Empty registry endpoint from daemon", + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: ""}, nil + }, + }, + { + expectedAuthServer: "https://foo.bar", + expectedWarning: "", + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: "https://foo.bar"}, nil + }, + }, + { + expectedAuthServer: "https://index.docker.io/v1/", + expectedWarning: "failed to get default registry endpoint from daemon", + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error getting info") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{infoFunc: tc.infoFunc}) + server := ElectAuthServer(context.Background(), cli) + assert.Check(t, is.Equal(tc.expectedAuthServer, server)) + actual := cli.ErrBuffer().String() + if tc.expectedWarning == "" { + assert.Check(t, is.Len(actual, 0)) + } else { + assert.Check(t, is.Contains(actual, tc.expectedWarning)) + } + } +} + +func TestGetDefaultAuthConfig(t *testing.T) { + testCases := []struct { + checkCredStore bool + inputServerAddress string + expectedErr string + expectedAuthConfig types.AuthConfig + }{ + { + checkCredStore: false, + inputServerAddress: "", + expectedErr: "", + expectedAuthConfig: types.AuthConfig{ + ServerAddress: "", + Username: "", + Password: "", + }, + }, + { + checkCredStore: true, + inputServerAddress: testAuthConfigs[0].ServerAddress, + expectedErr: "", + expectedAuthConfig: testAuthConfigs[0], + }, + { + checkCredStore: true, + inputServerAddress: testAuthConfigs[1].ServerAddress, + expectedErr: "", + expectedAuthConfig: testAuthConfigs[1], + }, + { + checkCredStore: true, + inputServerAddress: fmt.Sprintf("https://%s", testAuthConfigs[1].ServerAddress), + expectedErr: "", + expectedAuthConfig: testAuthConfigs[1], + }, + } + cli := test.NewFakeCli(&fakeClient{}) + errBuf := new(bytes.Buffer) + cli.SetErr(errBuf) + for _, authconfig := range testAuthConfigs { + cli.ConfigFile().GetCredentialsStore(authconfig.ServerAddress).Store(authconfig) + } + for _, tc := range testCases { + serverAddress := tc.inputServerAddress + authconfig, err := GetDefaultAuthConfig(cli, tc.checkCredStore, serverAddress, serverAddress == "https://index.docker.io/v1/") + if tc.expectedErr != "" { + assert.Check(t, err != nil) + assert.Check(t, is.Equal(tc.expectedErr, err.Error())) + } else { + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, *authconfig)) + } + } +} diff --git a/cli/cli/command/secret/client_test.go b/cli/cli/command/secret/client_test.go new file mode 100644 index 00000000..ea672fa4 --- /dev/null +++ b/cli/cli/command/secret/client_test.go @@ -0,0 +1,45 @@ +package secret + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + secretCreateFunc func(swarm.SecretSpec) (types.SecretCreateResponse, error) + secretInspectFunc func(string) (swarm.Secret, []byte, error) + secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) + secretRemoveFunc func(string) error +} + +func (c *fakeClient) SecretCreate(ctx context.Context, spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if c.secretCreateFunc != nil { + return c.secretCreateFunc(spec) + } + return types.SecretCreateResponse{}, nil +} + +func (c *fakeClient) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if c.secretInspectFunc != nil { + return c.secretInspectFunc(id) + } + return swarm.Secret{}, nil, nil +} + +func (c *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if c.secretListFunc != nil { + return c.secretListFunc(options) + } + return []swarm.Secret{}, nil +} + +func (c *fakeClient) SecretRemove(ctx context.Context, name string) error { + if c.secretRemoveFunc != nil { + return c.secretRemoveFunc(name) + } + return nil +} diff --git a/cli/cli/command/secret/cmd.go b/cli/cli/command/secret/cmd.go new file mode 100644 index 00000000..a29d2def --- /dev/null +++ b/cli/cli/command/secret/cmd.go @@ -0,0 +1,29 @@ +package secret + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewSecretCommand returns a cobra command for `secret` subcommands +func NewSecretCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "secret", + Short: "Manage Docker secrets", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.25", + "swarm": "", + }, + } + cmd.AddCommand( + newSecretListCommand(dockerCli), + newSecretCreateCommand(dockerCli), + newSecretInspectCommand(dockerCli), + newSecretRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/secret/create.go b/cli/cli/command/secret/create.go new file mode 100644 index 00000000..1739fefa --- /dev/null +++ b/cli/cli/command/secret/create.go @@ -0,0 +1,109 @@ +package secret + +import ( + "context" + "fmt" + "io" + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + templateDriver string + file string + labels opts.ListOpts +} + +func newSecretCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + labels: opts.NewListOpts(opts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] SECRET [file|-]", + Short: "Create a secret from a file or STDIN as content", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + if len(args) == 2 { + options.file = args[1] + } + return runSecretCreate(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.VarP(&options.labels, "label", "l", "Secret labels") + flags.StringVarP(&options.driver, "driver", "d", "", "Secret driver") + flags.SetAnnotation("driver", "version", []string{"1.31"}) + flags.StringVar(&options.templateDriver, "template-driver", "", "Template driver") + flags.SetAnnotation("driver", "version", []string{"1.37"}) + + return cmd +} + +func runSecretCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if options.driver != "" && options.file != "" { + return errors.Errorf("When using secret driver secret data must be empty") + } + + secretData, err := readSecretData(dockerCli.In(), options.file) + if err != nil { + return errors.Errorf("Error reading content from %q: %v", options.file, err) + } + spec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + Data: secretData, + } + if options.driver != "" { + spec.Driver = &swarm.Driver{ + Name: options.driver, + } + } + if options.templateDriver != "" { + spec.Templating = &swarm.Driver{ + Name: options.templateDriver, + } + } + r, err := client.SecretCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} + +func readSecretData(in io.ReadCloser, file string) ([]byte, error) { + // Read secret value from external driver + if file == "" { + return nil, nil + } + if file != "-" { + var err error + in, err = system.OpenSequential(file) + if err != nil { + return nil, err + } + defer in.Close() + } + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/cli/cli/command/secret/create_test.go b/cli/cli/command/secret/create_test.go new file mode 100644 index 00000000..eb9c0898 --- /dev/null +++ b/cli/cli/command/secret/create_test.go @@ -0,0 +1,169 @@ +package secret + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +const secretDataFile = "secret-create-with-name.golden" + +func TestSecretCreateErrors(t *testing.T) { + testCases := []struct { + args []string + secretCreateFunc func(swarm.SecretSpec) (types.SecretCreateResponse, error) + expectedError string + }{ + {args: []string{"too", "many", "arguments"}, + expectedError: "requires at least 1 and at most 2 arguments", + }, + {args: []string{"create", "--driver", "driver", "-"}, + expectedError: "secret data must be empty", + }, + { + args: []string{"name", filepath.Join("testdata", secretDataFile)}, + secretCreateFunc: func(secretSpec swarm.SecretSpec) (types.SecretCreateResponse, error) { + return types.SecretCreateResponse{}, errors.Errorf("error creating secret") + }, + expectedError: "error creating secret", + }, + } + for _, tc := range testCases { + cmd := newSecretCreateCommand( + test.NewFakeCli(&fakeClient{ + secretCreateFunc: tc.secretCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretCreateWithName(t *testing.T) { + name := "foo" + data, err := ioutil.ReadFile(filepath.Join("testdata", secretDataFile)) + assert.NilError(t, err) + + expected := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: make(map[string]string), + }, + Data: data, + } + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if !reflect.DeepEqual(spec, expected) { + return types.SecretCreateResponse{}, errors.Errorf("expected %+v, got %+v", expected, spec) + } + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", secretDataFile)}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestSecretCreateWithDriver(t *testing.T) { + expectedDriver := &swarm.Driver{ + Name: "secret-driver", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if spec.Name != name { + return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if spec.Driver.Name != expectedDriver.Name { + return types.SecretCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels) + } + + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name}) + cmd.Flags().Set("driver", expectedDriver.Name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestSecretCreateWithTemplatingDriver(t *testing.T) { + expectedDriver := &swarm.Driver{ + Name: "template-driver", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if spec.Name != name { + return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if spec.Templating.Name != expectedDriver.Name { + return types.SecretCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels) + } + + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name}) + cmd.Flags().Set("template-driver", expectedDriver.Name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestSecretCreateWithLabels(t *testing.T) { + expectedLabels := map[string]string{ + "lbl1": "Label-foo", + "lbl2": "Label-bar", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if spec.Name != name { + return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if !reflect.DeepEqual(spec.Labels, expectedLabels) { + return types.SecretCreateResponse{}, errors.Errorf("expected labels %v, got %v", expectedLabels, spec.Labels) + } + + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", secretDataFile)}) + cmd.Flags().Set("label", "lbl1=Label-foo") + cmd.Flags().Set("label", "lbl2=Label-bar") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/secret/inspect.go b/cli/cli/command/secret/inspect.go new file mode 100644 index 00000000..1afcb521 --- /dev/null +++ b/cli/cli/command/secret/inspect.go @@ -0,0 +1,65 @@ +package secret + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + names []string + format string + pretty bool +} + +func newSecretInspectCommand(dockerCli command.Cli) *cobra.Command { + opts := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SECRET [SECRET...]", + Short: "Display detailed information on one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runSecretInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runSecretInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(id string) (interface{}, []byte, error) { + return client.SecretInspectWithRaw(ctx, id) + } + f := opts.format + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + secretCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewSecretFormat(f, false), + } + + if err := formatter.SecretInspectWrite(secretCtx, opts.names, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/cli/cli/command/secret/inspect_test.go b/cli/cli/command/secret/inspect_test.go new file mode 100644 index 00000000..67addaea --- /dev/null +++ b/cli/cli/command/secret/inspect_test.go @@ -0,0 +1,173 @@ +package secret + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSecretInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + secretInspectFunc func(secretID string) (swarm.Secret, []byte, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"foo"}, + secretInspectFunc: func(secretID string) (swarm.Secret, []byte, error) { + return swarm.Secret{}, nil, errors.Errorf("error while inspecting the secret") + }, + expectedError: "error while inspecting the secret", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + args: []string{"foo", "bar"}, + secretInspectFunc: func(secretID string) (swarm.Secret, []byte, error) { + if secretID == "foo" { + return *Secret(SecretName("foo")), nil, nil + } + return swarm.Secret{}, nil, errors.Errorf("error while inspecting the secret") + }, + expectedError: "error while inspecting the secret", + }, + } + for _, tc := range testCases { + cmd := newSecretInspectCommand( + test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretInspectWithoutFormat(t *testing.T) { + testCases := []struct { + name string + args []string + secretInspectFunc func(secretID string) (swarm.Secret, []byte, error) + }{ + { + name: "single-secret", + args: []string{"foo"}, + secretInspectFunc: func(name string) (swarm.Secret, []byte, error) { + if name != "foo" { + return swarm.Secret{}, nil, errors.Errorf("Invalid name, expected %s, got %s", "foo", name) + } + return *Secret(SecretID("ID-foo"), SecretName("foo")), nil, nil + }, + }, + { + name: "multiple-secrets-with-labels", + args: []string{"foo", "bar"}, + secretInspectFunc: func(name string) (swarm.Secret, []byte, error) { + return *Secret(SecretID("ID-"+name), SecretName(name), SecretLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }) + cmd := newSecretInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("secret-inspect-without-format.%s.golden", tc.name)) + } +} + +func TestSecretInspectWithFormat(t *testing.T) { + secretInspectFunc := func(name string) (swarm.Secret, []byte, error) { + return *Secret(SecretName("foo"), SecretLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + } + testCases := []struct { + name string + format string + args []string + secretInspectFunc func(name string) (swarm.Secret, []byte, error) + }{ + { + name: "simple-template", + format: "{{.Spec.Name}}", + args: []string{"foo"}, + secretInspectFunc: secretInspectFunc, + }, + { + name: "json-template", + format: "{{json .Spec.Labels}}", + args: []string{"foo"}, + secretInspectFunc: secretInspectFunc, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }) + cmd := newSecretInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("format", tc.format) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("secret-inspect-with-format.%s.golden", tc.name)) + } +} + +func TestSecretInspectPretty(t *testing.T) { + testCases := []struct { + name string + secretInspectFunc func(string) (swarm.Secret, []byte, error) + }{ + { + name: "simple", + secretInspectFunc: func(id string) (swarm.Secret, []byte, error) { + return *Secret( + SecretLabels(map[string]string{ + "lbl1": "value1", + }), + SecretID("secretID"), + SecretName("secretName"), + SecretDriver("driver"), + SecretCreatedAt(time.Time{}), + SecretUpdatedAt(time.Time{}), + ), []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }) + cmd := newSecretInspectCommand(cli) + cmd.SetArgs([]string{"secretID"}) + cmd.Flags().Set("pretty", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("secret-inspect-pretty.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/secret/ls.go b/cli/cli/command/secret/ls.go new file mode 100644 index 00000000..a778137e --- /dev/null +++ b/cli/cli/command/secret/ls.go @@ -0,0 +1,76 @@ +package secret + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type bySecretName []swarm.Secret + +func (r bySecretName) Len() int { return len(r) } +func (r bySecretName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r bySecretName) Less(i, j int) bool { + return sortorder.NaturalLess(r[i].Spec.Name, r[j].Spec.Name) +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newSecretListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List secrets", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runSecretList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVarP(&options.format, "format", "", "", "Pretty-print secrets using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runSecretList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + secrets, err := client.SecretList(ctx, types.SecretListOptions{Filters: options.filter.Value()}) + if err != nil { + return err + } + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().SecretFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().SecretFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(bySecretName(secrets)) + + secretCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewSecretFormat(format, options.quiet), + } + return formatter.SecretWrite(secretCtx, secrets) +} diff --git a/cli/cli/command/secret/ls_test.go b/cli/cli/command/secret/ls_test.go new file mode 100644 index 00000000..e1417115 --- /dev/null +++ b/cli/cli/command/secret/ls_test.go @@ -0,0 +1,160 @@ +package secret + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestSecretListErrors(t *testing.T) { + testCases := []struct { + args []string + secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{}, errors.Errorf("error listing secrets") + }, + expectedError: "error listing secrets", + }, + } + for _, tc := range testCases { + cmd := newSecretListCommand( + test.NewFakeCli(&fakeClient{ + secretListFunc: tc.secretListFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretList(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-1-foo"), + SecretName("1-foo"), + SecretVersion(swarm.Version{Index: 10}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Secret(SecretID("ID-10-foo"), + SecretName("10-foo"), + SecretVersion(swarm.Version{Index: 11}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + SecretDriver("driver"), + ), + *Secret(SecretID("ID-2-foo"), + SecretName("2-foo"), + SecretVersion(swarm.Version{Index: 11}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + SecretDriver("driver"), + ), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-sort.golden") +} + +func TestSecretListWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), SecretName("foo")), + *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-quiet-option.golden") +} + +func TestSecretListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), SecretName("foo")), + *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + SecretFormat: "{{ .Name }} {{ .Labels }}", + }) + cmd := newSecretListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-config-format.golden") +} + +func TestSecretListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), SecretName("foo")), + *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-format.golden") +} + +func TestSecretListWithFilter(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + assert.Check(t, is.Equal("foo", options.Filters.Get("name")[0]), "foo") + assert.Check(t, is.Equal("lbl1=Label-bar", options.Filters.Get("label")[0])) + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), + SecretName("foo"), + SecretVersion(swarm.Version{Index: 10}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Secret(SecretID("ID-bar"), + SecretName("bar"), + SecretVersion(swarm.Version{Index: 11}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + cmd.Flags().Set("filter", "name=foo") + cmd.Flags().Set("filter", "label=lbl1=Label-bar") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-filter.golden") +} diff --git a/cli/cli/command/secret/remove.go b/cli/cli/command/secret/remove.go new file mode 100644 index 00000000..bdf47b77 --- /dev/null +++ b/cli/cli/command/secret/remove.go @@ -0,0 +1,53 @@ +package secret + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + names []string +} + +func newSecretRemoveCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "rm SECRET [SECRET...]", + Aliases: []string{"remove"}, + Short: "Remove one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := removeOptions{ + names: args, + } + return runSecretRemove(dockerCli, opts) + }, + } +} + +func runSecretRemove(dockerCli command.Cli, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, name := range opts.names { + if err := client.SecretRemove(ctx, name); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), name) + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/secret/remove_test.go b/cli/cli/command/secret/remove_test.go new file mode 100644 index 00000000..d2fc8ad0 --- /dev/null +++ b/cli/cli/command/secret/remove_test.go @@ -0,0 +1,79 @@ +package secret + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSecretRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + secretRemoveFunc func(string) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + args: []string{"foo"}, + secretRemoveFunc: func(name string) error { + return errors.Errorf("error removing secret") + }, + expectedError: "error removing secret", + }, + } + for _, tc := range testCases { + cmd := newSecretRemoveCommand( + test.NewFakeCli(&fakeClient{ + secretRemoveFunc: tc.secretRemoveFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretRemoveWithName(t *testing.T) { + names := []string{"foo", "bar"} + var removedSecrets []string + cli := test.NewFakeCli(&fakeClient{ + secretRemoveFunc: func(name string) error { + removedSecrets = append(removedSecrets, name) + return nil + }, + }) + cmd := newSecretRemoveCommand(cli) + cmd.SetArgs(names) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(names, strings.Split(strings.TrimSpace(cli.OutBuffer().String()), "\n"))) + assert.Check(t, is.DeepEqual(names, removedSecrets)) +} + +func TestSecretRemoveContinueAfterError(t *testing.T) { + names := []string{"foo", "bar"} + var removedSecrets []string + + cli := test.NewFakeCli(&fakeClient{ + secretRemoveFunc: func(name string) error { + removedSecrets = append(removedSecrets, name) + if name == "foo" { + return errors.Errorf("error removing secret: %s", name) + } + return nil + }, + }) + + cmd := newSecretRemoveCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(names) + assert.Error(t, cmd.Execute(), "error removing secret: foo") + assert.Check(t, is.DeepEqual(names, removedSecrets)) +} diff --git a/cli/cli/command/secret/testdata/secret-create-with-name.golden b/cli/cli/command/secret/testdata/secret-create-with-name.golden new file mode 100644 index 00000000..788642a9 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-create-with-name.golden @@ -0,0 +1 @@ +secret_foo_bar diff --git a/cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden b/cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden new file mode 100644 index 00000000..37234eff --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden @@ -0,0 +1,7 @@ +ID: secretID +Name: secretName +Labels: + - lbl1=value1 +Driver: driver +Created at: 0001-01-01 00:00:00 +0000 utc +Updated at: 0001-01-01 00:00:00 +0000 utc diff --git a/cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden b/cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden new file mode 100644 index 00000000..aab678f8 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden @@ -0,0 +1 @@ +{"label1":"label-foo"} diff --git a/cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden b/cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden new file mode 100644 index 00000000..257cc564 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden @@ -0,0 +1 @@ +foo diff --git a/cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden b/cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden new file mode 100644 index 00000000..b01a400c --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden @@ -0,0 +1,26 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": { + "label1": "label-foo" + } + } + }, + { + "ID": "ID-bar", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "bar", + "Labels": { + "label1": "label-foo" + } + } + } +] diff --git a/cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden b/cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden new file mode 100644 index 00000000..c4f41c10 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden @@ -0,0 +1,12 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": null + } + } +] diff --git a/cli/cli/command/secret/testdata/secret-list-sort.golden b/cli/cli/command/secret/testdata/secret-list-sort.golden new file mode 100644 index 00000000..805d26f3 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-sort.golden @@ -0,0 +1,4 @@ +ID NAME DRIVER CREATED UPDATED +ID-1-foo 1-foo 2 hours ago About an hour ago +ID-2-foo 2-foo driver 2 hours ago About an hour ago +ID-10-foo 10-foo driver 2 hours ago About an hour ago diff --git a/cli/cli/command/secret/testdata/secret-list-with-config-format.golden b/cli/cli/command/secret/testdata/secret-list-with-config-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-config-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/secret/testdata/secret-list-with-filter.golden b/cli/cli/command/secret/testdata/secret-list-with-filter.golden new file mode 100644 index 00000000..388d2874 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-filter.golden @@ -0,0 +1,3 @@ +ID NAME DRIVER CREATED UPDATED +ID-bar bar 2 hours ago About an hour ago +ID-foo foo 2 hours ago About an hour ago diff --git a/cli/cli/command/secret/testdata/secret-list-with-format.golden b/cli/cli/command/secret/testdata/secret-list-with-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden b/cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden new file mode 100644 index 00000000..145fc38d --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden @@ -0,0 +1,2 @@ +ID-bar +ID-foo diff --git a/cli/cli/command/service/client_test.go b/cli/cli/command/service/client_test.go new file mode 100644 index 00000000..8d0d592c --- /dev/null +++ b/cli/cli/command/service/client_test.go @@ -0,0 +1,77 @@ +package service + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +type fakeClient struct { + client.Client + serviceInspectWithRawFunc func(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + serviceUpdateFunc func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + serviceListFunc func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) + taskListFunc func(context.Context, types.TaskListOptions) ([]swarm.Task, error) + infoFunc func(ctx context.Context) (types.Info, error) + networkInspectFunc func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) +} + +func (f *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + return nil, nil +} + +func (f *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if f.taskListFunc != nil { + return f.taskListFunc(ctx, options) + } + return nil, nil +} + +func (f *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if f.serviceInspectWithRawFunc != nil { + return f.serviceInspectWithRawFunc(ctx, serviceID, options) + } + + return *Service(ServiceID(serviceID)), []byte{}, nil +} + +func (f *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + if f.serviceListFunc != nil { + return f.serviceListFunc(ctx, options) + } + + return nil, nil +} + +func (f *fakeClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + if f.serviceUpdateFunc != nil { + return f.serviceUpdateFunc(ctx, serviceID, version, service, options) + } + + return types.ServiceUpdateResponse{}, nil +} + +func (f *fakeClient) Info(ctx context.Context) (types.Info, error) { + if f.infoFunc == nil { + return types.Info{}, nil + } + return f.infoFunc(ctx) +} + +func (f *fakeClient) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + if f.networkInspectFunc != nil { + return f.networkInspectFunc(ctx, networkID, options) + } + return types.NetworkResource{}, nil +} + +func newService(id string, name string) swarm.Service { + return swarm.Service{ + ID: id, + Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: name}}, + } +} diff --git a/cli/cli/command/service/cmd.go b/cli/cli/command/service/cmd.go new file mode 100644 index 00000000..98af9852 --- /dev/null +++ b/cli/cli/command/service/cmd.go @@ -0,0 +1,34 @@ +package service + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewServiceCommand returns a cobra command for `service` subcommands +func NewServiceCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "service", + Short: "Manage services", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.24", + "swarm": "", + }, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newPsCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newScaleCommand(dockerCli), + newUpdateCommand(dockerCli), + newLogsCommand(dockerCli), + newRollbackCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/service/create.go b/cli/cli/command/service/create.go new file mode 100644 index 00000000..ec74eb43 --- /dev/null +++ b/cli/cli/command/service/create.go @@ -0,0 +1,137 @@ +package service + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + cliopts "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + opts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new service", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + if len(args) > 1 { + opts.args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), opts) + }, + } + flags := cmd.Flags() + flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") + flags.StringVar(&opts.name, flagName, "", "Service name") + + addServiceFlags(flags, opts, buildServiceDefaultFlagMapping()) + + flags.VarP(&opts.labels, flagLabel, "l", "Service labels") + flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") + flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") + flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") + flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") + flags.Var(&opts.constraints, flagConstraint, "Placement constraints") + flags.Var(&opts.placementPrefs, flagPlacementPref, "Add a placement preference") + flags.SetAnnotation(flagPlacementPref, "version", []string{"1.28"}) + flags.Var(&opts.networks, flagNetwork, "Network attachments") + flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") + flags.SetAnnotation(flagSecret, "version", []string{"1.25"}) + flags.Var(&opts.configs, flagConfig, "Specify configurations to expose to the service") + flags.SetAnnotation(flagConfig, "version", []string{"1.30"}) + flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") + flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") + flags.SetAnnotation(flagGroup, "version", []string{"1.25"}) + flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") + flags.SetAnnotation(flagDNS, "version", []string{"1.25"}) + flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") + flags.SetAnnotation(flagDNSOption, "version", []string{"1.25"}) + flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") + flags.SetAnnotation(flagDNSSearch, "version", []string{"1.25"}) + flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") + flags.SetAnnotation(flagHost, "version", []string{"1.25"}) + flags.BoolVar(&opts.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") + flags.SetAnnotation(flagInit, "version", []string{"1.37"}) + + flags.Var(cliopts.NewListOptsRef(&opts.resources.resGenericResources, ValidateSingleGenericResource), "generic-resource", "User defined resources") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) + + flags.SetInterspersed(false) + return cmd +} + +func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, opts *serviceOptions) error { + apiClient := dockerCli.Client() + createOpts := types.ServiceCreateOptions{} + + ctx := context.Background() + + service, err := opts.ToService(ctx, apiClient, flags) + if err != nil { + return err + } + + specifiedSecrets := opts.secrets.Value() + if len(specifiedSecrets) > 0 { + // parse and validate secrets + secrets, err := ParseSecrets(apiClient, specifiedSecrets) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Secrets = secrets + } + + specifiedConfigs := opts.configs.Value() + if len(specifiedConfigs) > 0 { + // parse and validate configs + configs, err := ParseConfigs(apiClient, specifiedConfigs) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Configs = configs + } + + if err := resolveServiceImageDigestContentTrust(dockerCli, &service); err != nil { + return err + } + + // only send auth if flag was set + if opts.registryAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) + if err != nil { + return err + } + createOpts.EncodedRegistryAuth = encodedAuth + } + + // query registry if flag disabling it was not set + if !opts.noResolveImage && versions.GreaterThanOrEqualTo(apiClient.ClientVersion(), "1.30") { + createOpts.QueryRegistry = true + } + + response, err := apiClient.ServiceCreate(ctx, service, createOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + + if opts.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { + return nil + } + + return waitOnService(ctx, dockerCli, response.ID, opts.quiet) +} diff --git a/cli/cli/command/service/generic_resource_opts.go b/cli/cli/command/service/generic_resource_opts.go new file mode 100644 index 00000000..66385888 --- /dev/null +++ b/cli/cli/command/service/generic_resource_opts.go @@ -0,0 +1,105 @@ +package service + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" +) + +// GenericResource is a concept that a user can use to advertise user-defined +// resources on a node and thus better place services based on these resources. +// E.g: NVIDIA GPUs, Intel FPGAs, ... +// See https://github.com/docker/swarmkit/blob/master/design/generic_resources.md + +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid generic-resource format `%s` expected `name=value`", val) + } + + return val, nil +} + +// ParseGenericResources parses an array of Generic resourceResources +// Requesting Named Generic Resources for a service is not supported this +// is filtered here. +func ParseGenericResources(value []string) ([]swarm.GenericResource, error) { + if len(value) == 0 { + return nil, nil + } + + resources, err := genericresource.Parse(value) + if err != nil { + return nil, errors.Wrapf(err, "invalid generic resource specification") + } + + swarmResources := genericResourcesFromGRPC(resources) + for _, res := range swarmResources { + if res.NamedResourceSpec != nil { + return nil, fmt.Errorf("invalid generic-resource request `%s=%s`, Named Generic Resources is not supported for service create or update", res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value) + } + } + + return swarmResources, nil +} + +// genericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource +func genericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []swarm.GenericResource { + var generic []swarm.GenericResource + for _, res := range genericRes { + var current swarm.GenericResource + + switch r := res.Resource.(type) { + case *swarmapi.GenericResource_DiscreteResourceSpec: + current.DiscreteResourceSpec = &swarm.DiscreteGenericResource{ + Kind: r.DiscreteResourceSpec.Kind, + Value: r.DiscreteResourceSpec.Value, + } + case *swarmapi.GenericResource_NamedResourceSpec: + current.NamedResourceSpec = &swarm.NamedGenericResource{ + Kind: r.NamedResourceSpec.Kind, + Value: r.NamedResourceSpec.Value, + } + } + + generic = append(generic, current) + } + + return generic +} + +func buildGenericResourceMap(genericRes []swarm.GenericResource) (map[string]swarm.GenericResource, error) { + m := make(map[string]swarm.GenericResource) + + for _, res := range genericRes { + if res.DiscreteResourceSpec == nil { + return nil, fmt.Errorf("invalid generic-resource `%+v` for service task", res) + } + + _, ok := m[res.DiscreteResourceSpec.Kind] + if ok { + return nil, fmt.Errorf("duplicate generic-resource `%+v` for service task", res.DiscreteResourceSpec.Kind) + } + + m[res.DiscreteResourceSpec.Kind] = res + } + + return m, nil +} + +func buildGenericResourceList(genericRes map[string]swarm.GenericResource) []swarm.GenericResource { + var l []swarm.GenericResource + + for _, res := range genericRes { + l = append(l, res) + } + + return l +} diff --git a/cli/cli/command/service/generic_resource_opts_test.go b/cli/cli/command/service/generic_resource_opts_test.go new file mode 100644 index 00000000..c750f1dc --- /dev/null +++ b/cli/cli/command/service/generic_resource_opts_test.go @@ -0,0 +1,23 @@ +package service + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestValidateSingleGenericResource(t *testing.T) { + incorrect := []string{"foo", "fooo-bar"} + correct := []string{"foo=bar", "bar=1", "foo=barbar"} + + for _, v := range incorrect { + _, err := ValidateSingleGenericResource(v) + assert.Check(t, is.ErrorContains(err, "")) + } + + for _, v := range correct { + _, err := ValidateSingleGenericResource(v) + assert.NilError(t, err) + } +} diff --git a/cli/cli/command/service/helpers.go b/cli/cli/command/service/helpers.go new file mode 100644 index 00000000..eb508e85 --- /dev/null +++ b/cli/cli/command/service/helpers.go @@ -0,0 +1,33 @@ +package service + +import ( + "context" + "io" + "io/ioutil" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/service/progress" + "github.com/docker/docker/pkg/jsonmessage" +) + +// waitOnService waits for the service to converge. It outputs a progress bar, +// if appropriate based on the CLI flags. +func waitOnService(ctx context.Context, dockerCli command.Cli, serviceID string, quiet bool) error { + errChan := make(chan error, 1) + pipeReader, pipeWriter := io.Pipe() + + go func() { + errChan <- progress.ServiceProgress(ctx, dockerCli.Client(), serviceID, pipeWriter) + }() + + if quiet { + go io.Copy(ioutil.Discard, pipeReader) + return <-errChan + } + + err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, dockerCli.Out(), nil) + if err == nil { + err = <-errChan + } + return err +} diff --git a/cli/cli/command/service/inspect.go b/cli/cli/command/service/inspect.go new file mode 100644 index 00000000..7f988fae --- /dev/null +++ b/cli/cli/command/service/inspect.go @@ -0,0 +1,93 @@ +package service + +import ( + "context" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + refs []string + format string + pretty bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SERVICE [SERVICE...]", + Short: "Display detailed information on one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + + if opts.pretty && len(opts.format) > 0 { + return errors.Errorf("--format is incompatible with human friendly format") + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + // Service inspect shows defaults values in empty fields. + service, _, err := client.ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) + if err == nil || !apiclient.IsErrNotFound(err) { + return service, nil, err + } + return nil, nil, errors.Errorf("Error: no such service: %s", ref) + } + + getNetwork := func(ref string) (interface{}, []byte, error) { + network, _, err := client.NetworkInspectWithRaw(ctx, ref, types.NetworkInspectOptions{Scope: "swarm"}) + if err == nil || !apiclient.IsErrNotFound(err) { + return network, nil, err + } + return nil, nil, errors.Errorf("Error: no such network: %s", ref) + } + + f := opts.format + if len(f) == 0 { + f = "raw" + if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { + f = dockerCli.ConfigFile().ServiceInspectFormat + } + } + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return errors.Errorf("Cannot supply extra formatting options to the pretty template") + } + + serviceCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceFormat(f), + } + + if err := formatter.ServiceInspectWrite(serviceCtx, opts.refs, getRef, getNetwork); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/cli/cli/command/service/inspect_test.go b/cli/cli/command/service/inspect_test.go new file mode 100644 index 00000000..c27a9a88 --- /dev/null +++ b/cli/cli/command/service/inspect_test.go @@ -0,0 +1,160 @@ +package service + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { + b := new(bytes.Buffer) + + endpointSpec := &swarm.EndpointSpec{ + Mode: "vip", + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + }, + }, + } + + two := uint64(2) + + s := swarm.Service{ + ID: "de179gar9d0o7ltdybungplod", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 315}, + CreatedAt: now, + UpdatedAt: now, + }, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "my_service", + Labels: map[string]string{"com.label": "foo"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "foo/bar@sha256:this_is_a_test", + Configs: []*swarm.ConfigReference{ + { + ConfigID: "mtc3i44r1awdoziy2iceg73z8", + ConfigName: "configtest.conf", + File: &swarm.ConfigReferenceFileTarget{ + Name: "/configtest.conf", + }, + }, + }, + Secrets: []*swarm.SecretReference{ + { + SecretID: "3hv39ehbbb4hdozo7spod9ftn", + SecretName: "secrettest.conf", + File: &swarm.SecretReferenceFileTarget{ + Name: "/secrettest.conf", + }, + }, + }, + }, + Networks: []swarm.NetworkAttachmentConfig{ + { + Target: "5vpyomhb6ievnk0i0o60gcnei", + Aliases: []string{"web"}, + }, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &two, + }, + }, + EndpointSpec: endpointSpec, + }, + Endpoint: swarm.Endpoint{ + Spec: *endpointSpec, + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + PublishedPort: 30000, + }, + }, + VirtualIPs: []swarm.EndpointVirtualIP{ + { + NetworkID: "6o4107cj2jx9tihgb0jyts6pj", + Addr: "10.255.0.4/16", + }, + }, + }, + UpdateStatus: &swarm.UpdateStatus{ + StartedAt: &now, + CompletedAt: &now, + }, + } + + ctx := formatter.Context{ + Output: b, + Format: format, + } + + err := formatter.ServiceInspectWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, + func(ref string) (interface{}, []byte, error) { + return s, nil, nil + }, + func(ref string) (interface{}, []byte, error) { + return types.NetworkResource{ + ID: "5vpyomhb6ievnk0i0o60gcnei", + Name: "mynetwork", + }, nil, nil + }, + ) + if err != nil { + t.Fatal(err) + } + return b.String() +} + +func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { + s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) + if strings.Contains(s, "UpdateStatus") { + t.Fatal("Pretty print failed before parsing UpdateStatus") + } + if !strings.Contains(s, "mynetwork") { + t.Fatal("network name not found in inspect output") + } +} + +func TestJSONFormatWithNoUpdateConfig(t *testing.T) { + now := time.Now() + // s1: [{"ID":..}] + // s2: {"ID":..} + s1 := formatServiceInspect(t, formatter.NewServiceFormat(""), now) + s2 := formatServiceInspect(t, formatter.NewServiceFormat("{{json .}}"), now) + var m1Wrap []map[string]interface{} + if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { + t.Fatal(err) + } + if len(m1Wrap) != 1 { + t.Fatalf("strange s1=%s", s1) + } + m1 := m1Wrap[0] + var m2 map[string]interface{} + if err := json.Unmarshal([]byte(s2), &m2); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(m1, m2)) +} + +func TestPrettyPrintWithConfigsAndSecrets(t *testing.T) { + s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) + + assert.Check(t, is.Contains(s, "Configs:"), "Pretty print missing configs") + assert.Check(t, is.Contains(s, "Secrets:"), "Pretty print missing secrets") +} diff --git a/cli/cli/command/service/list.go b/cli/cli/command/service/list.go new file mode 100644 index 00000000..db4b0376 --- /dev/null +++ b/cli/cli/command/service/list.go @@ -0,0 +1,139 @@ +package service + +import ( + "context" + "fmt" + "sort" + + "vbom.ml/util/sortorder" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" +) + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List services", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&options.format, "format", "", "Pretty-print services using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +type byName []swarm.Service + +func (n byName) Len() int { return len(n) } +func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } +func (n byName) Less(i, j int) bool { return sortorder.NaturalLess(n[i].Spec.Name, n[j].Spec.Name) } + +func runList(dockerCli command.Cli, options listOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + serviceFilters := options.filter.Value() + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceFilters}) + if err != nil { + return err + } + + sort.Sort(byName(services)) + info := map[string]formatter.ServiceListInfo{} + if len(services) > 0 && !options.quiet { + // only non-empty services and not quiet, should we call TaskList and NodeList api + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + info = GetServicesStatus(services, nodes, tasks) + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ServicesFormat + } else { + format = formatter.TableFormatKey + } + } + + servicesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceListFormat(format, options.quiet), + } + return formatter.ServiceListWrite(servicesCtx, services, info) +} + +// GetServicesStatus returns a map of mode and replicas +func GetServicesStatus(services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) map[string]formatter.ServiceListInfo { + running := map[string]int{} + tasksNoShutdown := map[string]int{} + + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + + for _, task := range tasks { + if task.DesiredState != swarm.TaskStateShutdown { + tasksNoShutdown[task.ServiceID]++ + } + + if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { + running[task.ServiceID]++ + } + } + + info := map[string]formatter.ServiceListInfo{} + for _, service := range services { + info[service.ID] = formatter.ServiceListInfo{} + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + info[service.ID] = formatter.ServiceListInfo{ + Mode: "replicated", + Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas), + } + } else if service.Spec.Mode.Global != nil { + info[service.ID] = formatter.ServiceListInfo{ + Mode: "global", + Replicas: fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]), + } + } + } + return info +} diff --git a/cli/cli/command/service/list_test.go b/cli/cli/command/service/list_test.go new file mode 100644 index 00000000..e52e7e03 --- /dev/null +++ b/cli/cli/command/service/list_test.go @@ -0,0 +1,28 @@ +package service + +import ( + "context" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestServiceListOrder(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + newService("a57dbe8", "service-1-foo"), + newService("a57dbdd", "service-10-foo"), + newService("aaaaaaa", "service-2-foo"), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{.Name}}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "service-list-sort.golden") +} diff --git a/cli/cli/command/service/logs.go b/cli/cli/command/service/logs.go new file mode 100644 index 00000000..107c9d21 --- /dev/null +++ b/cli/cli/command/service/logs.go @@ -0,0 +1,349 @@ +package service + +import ( + "bytes" + "context" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/service/logs" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type logsOptions struct { + noResolve bool + noTrunc bool + noTaskIDs bool + follow bool + since string + timestamps bool + tail string + details bool + raw bool + + target string +} + +func newLogsCommand(dockerCli command.Cli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] SERVICE|TASK", + Short: "Fetch the logs of a service or task", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + return runLogs(dockerCli, &opts) + }, + Annotations: map[string]string{"version": "1.29"}, + } + + flags := cmd.Flags() + // options specific to service logs + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names in output") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.raw, "raw", false, "Do not neatly format logs") + flags.SetAnnotation("raw", "version", []string{"1.30"}) + flags.BoolVar(&opts.noTaskIDs, "no-task-ids", false, "Do not include task IDs in output") + // options identical to container logs + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.SetAnnotation("details", "version", []string{"1.30"}) + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli command.Cli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + // get the details if we request it OR if we're not doing raw mode + // (we need them for the context to pretty print) + Details: opts.details || !opts.raw, + } + + cli := dockerCli.Client() + + var ( + maxLength = 1 + responseBody io.ReadCloser + tty bool + // logfunc is used to delay the call to logs so that we can do some + // processing before we actually get the logs + logfunc func(context.Context, string, types.ContainerLogsOptions) (io.ReadCloser, error) + ) + + service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target, types.ServiceInspectOptions{}) + if err != nil { + // if it's any error other than service not found, it's Real + if !client.IsErrNotFound(err) { + return err + } + task, _, err := cli.TaskInspectWithRaw(ctx, opts.target) + if err != nil { + if client.IsErrNotFound(err) { + // if the task isn't found, rewrite the error to be clear + // that we looked for services AND tasks and found none + err = fmt.Errorf("no such task or service: %v", opts.target) + } + return err + } + + tty = task.Spec.ContainerSpec.TTY + maxLength = getMaxLength(task.Slot) + + // use the TaskLogs api function + logfunc = cli.TaskLogs + } else { + // use ServiceLogs api function + logfunc = cli.ServiceLogs + tty = service.Spec.TaskTemplate.ContainerSpec.TTY + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + // if replicas are initialized, figure out if we need to pad them + replicas := *service.Spec.Mode.Replicated.Replicas + maxLength = getMaxLength(int(replicas)) + } + } + + // we can't prettify tty logs. tell the user that this is the case. + // this is why we assign the logs function to a variable and delay calling + // it. we want to check this before we make the call and checking twice in + // each branch is even sloppier than this CLI disaster already is + if tty && !opts.raw { + return errors.New("tty service logs only supported with --raw") + } + + // now get the logs + responseBody, err = logfunc(ctx, opts.target, options) + if err != nil { + return err + } + defer responseBody.Close() + + // tty logs get straight copied. they're not muxed with stdcopy + if tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + return err + } + + // otherwise, logs are multiplexed. if we're doing pretty printing, also + // create a task formatter. + var stdout, stderr io.Writer + stdout = dockerCli.Out() + stderr = dockerCli.Err() + if !opts.raw { + taskFormatter := newTaskFormatter(cli, opts, maxLength) + + stdout = &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: stdout} + stderr = &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: stderr} + } + + _, err = stdcopy.StdCopy(stdout, stderr, responseBody) + return err +} + +// getMaxLength gets the maximum length of the number in base 10 +func getMaxLength(i int) int { + return len(strconv.Itoa(i)) +} + +type taskFormatter struct { + client client.APIClient + opts *logsOptions + padding int + + r *idresolver.IDResolver + // cache saves a pre-cooked logContext formatted string based on a + // logcontext object, so we don't have to resolve names every time + cache map[logContext]string +} + +func newTaskFormatter(client client.APIClient, opts *logsOptions, padding int) *taskFormatter { + return &taskFormatter{ + client: client, + opts: opts, + padding: padding, + r: idresolver.New(client, opts.noResolve), + cache: make(map[logContext]string), + } +} + +func (f *taskFormatter) format(ctx context.Context, logCtx logContext) (string, error) { + if cached, ok := f.cache[logCtx]; ok { + return cached, nil + } + + nodeName, err := f.r.Resolve(ctx, swarm.Node{}, logCtx.nodeID) + if err != nil { + return "", err + } + + serviceName, err := f.r.Resolve(ctx, swarm.Service{}, logCtx.serviceID) + if err != nil { + return "", err + } + + task, _, err := f.client.TaskInspectWithRaw(ctx, logCtx.taskID) + if err != nil { + return "", err + } + + taskName := fmt.Sprintf("%s.%d", serviceName, task.Slot) + if !f.opts.noTaskIDs { + if f.opts.noTrunc { + taskName += fmt.Sprintf(".%s", task.ID) + } else { + taskName += fmt.Sprintf(".%s", stringid.TruncateID(task.ID)) + } + } + + paddingCount := f.padding - getMaxLength(task.Slot) + padding := "" + if paddingCount > 0 { + padding = strings.Repeat(" ", paddingCount) + } + formatted := taskName + "@" + nodeName + padding + f.cache[logCtx] = formatted + return formatted, nil +} + +type logWriter struct { + ctx context.Context + opts *logsOptions + f *taskFormatter + w io.Writer +} + +func (lw *logWriter) Write(buf []byte) (int, error) { + // this works but ONLY because stdcopy calls write a whole line at a time. + // if this ends up horribly broken or panics, check to see if stdcopy has + // reneged on that assumption. (@god forgive me) + // also this only works because the logs format is, like, barely parsable. + // if something changes in the logs format, this is gonna break + + // there should always be at least 2 parts: details and message. if there + // is no timestamp, details will be first (index 0) when we split on + // spaces. if there is a timestamp, details will be 2nd (`index 1) + detailsIndex := 0 + numParts := 2 + if lw.opts.timestamps { + detailsIndex++ + numParts++ + } + + // break up the log line into parts. + parts := bytes.SplitN(buf, []byte(" "), numParts) + if len(parts) != numParts { + return 0, errors.Errorf("invalid context in log message: %v", string(buf)) + } + // parse the details out + details, err := logs.ParseLogDetails(string(parts[detailsIndex])) + if err != nil { + return 0, err + } + // and then create a context from the details + // this removes the context-specific details from the details map, so we + // can more easily print the details later + logCtx, err := lw.parseContext(details) + if err != nil { + return 0, err + } + + output := []byte{} + // if we included timestamps, add them to the front + if lw.opts.timestamps { + output = append(output, parts[0]...) + output = append(output, ' ') + } + // add the context, nice and formatted + formatted, err := lw.f.format(lw.ctx, logCtx) + if err != nil { + return 0, err + } + output = append(output, []byte(formatted+" | ")...) + // if the user asked for details, add them to be log message + if lw.opts.details { + // ugh i hate this it's basically a dupe of api/server/httputils/write_log_stream.go:stringAttrs() + // ok but we're gonna do it a bit different + + // there are optimizations that can be made here. for starters, i'd + // suggest caching the details keys. then, we can maybe draw maps and + // slices from a pool to avoid alloc overhead on them. idk if it's + // worth the time yet. + + // first we need a slice + d := make([]string, 0, len(details)) + // then let's add all the pairs + for k := range details { + d = append(d, k+"="+details[k]) + } + // then sort em + sort.Strings(d) + // then join and append + output = append(output, []byte(strings.Join(d, ","))...) + output = append(output, ' ') + } + + // add the log message itself, finally + output = append(output, parts[detailsIndex+1]...) + + _, err = lw.w.Write(output) + if err != nil { + return 0, err + } + + return len(buf), nil +} + +// parseContext returns a log context and REMOVES the context from the details map +func (lw *logWriter) parseContext(details map[string]string) (logContext, error) { + nodeID, ok := details["com.docker.swarm.node.id"] + if !ok { + return logContext{}, errors.Errorf("missing node id in details: %v", details) + } + delete(details, "com.docker.swarm.node.id") + + serviceID, ok := details["com.docker.swarm.service.id"] + if !ok { + return logContext{}, errors.Errorf("missing service id in details: %v", details) + } + delete(details, "com.docker.swarm.service.id") + + taskID, ok := details["com.docker.swarm.task.id"] + if !ok { + return logContext{}, errors.Errorf("missing task id in details: %s", details) + } + delete(details, "com.docker.swarm.task.id") + + return logContext{ + nodeID: nodeID, + serviceID: serviceID, + taskID: taskID, + }, nil +} + +type logContext struct { + nodeID string + serviceID string + taskID string +} diff --git a/cli/cli/command/service/opts.go b/cli/cli/command/service/opts.go new file mode 100644 index 00000000..6a9591ab --- /dev/null +++ b/cli/cli/command/service/opts.go @@ -0,0 +1,909 @@ +package service + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + shlex "github.com/flynn-archive/go-shlex" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +type int64Value interface { + Value() int64 +} + +// Uint64Opt represents a uint64. +type Uint64Opt struct { + value *uint64 +} + +// Set a new value on the option +func (i *Uint64Opt) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + i.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (i *Uint64Opt) Type() string { + return "uint" +} + +// String returns a string repr of this option +func (i *Uint64Opt) String() string { + if i.value != nil { + return fmt.Sprintf("%v", *i.value) + } + return "" +} + +// Value returns the uint64 +func (i *Uint64Opt) Value() *uint64 { + return i.value +} + +type floatValue float32 + +func (f *floatValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = floatValue(v) + return err +} + +func (f *floatValue) Type() string { + return "float" +} + +func (f *floatValue) String() string { + return strconv.FormatFloat(float64(*f), 'g', -1, 32) +} + +func (f *floatValue) Value() float32 { + return float32(*f) +} + +// placementPrefOpts holds a list of placement preferences. +type placementPrefOpts struct { + prefs []swarm.PlacementPreference + strings []string +} + +func (opts *placementPrefOpts) String() string { + if len(opts.strings) == 0 { + return "" + } + return fmt.Sprintf("%v", opts.strings) +} + +// Set validates the input value and adds it to the internal slices. +// Note: in the future strategies other than "spread", may be supported, +// as well as additional comma-separated options. +func (opts *placementPrefOpts) Set(value string) error { + fields := strings.Split(value, "=") + if len(fields) != 2 { + return errors.New(`placement preference must be of the format "="`) + } + if fields[0] != "spread" { + return errors.Errorf("unsupported placement preference %s (only spread is supported)", fields[0]) + } + + opts.prefs = append(opts.prefs, swarm.PlacementPreference{ + Spread: &swarm.SpreadOver{ + SpreadDescriptor: fields[1], + }, + }) + opts.strings = append(opts.strings, value) + return nil +} + +// Type returns a string name for this Option type +func (opts *placementPrefOpts) Type() string { + return "pref" +} + +// ShlexOpt is a flag Value which parses a string as a list of shell words +type ShlexOpt []string + +// Set the value +func (s *ShlexOpt) Set(value string) error { + valueSlice, err := shlex.Split(value) + *s = ShlexOpt(valueSlice) + return err +} + +// Type returns the tyep of the value +func (s *ShlexOpt) Type() string { + return "command" +} + +func (s *ShlexOpt) String() string { + if len(*s) == 0 { + return "" + } + return fmt.Sprint(*s) +} + +// Value returns the value as a string slice +func (s *ShlexOpt) Value() []string { + return []string(*s) +} + +type updateOptions struct { + parallelism uint64 + delay time.Duration + monitor time.Duration + onFailure string + maxFailureRatio floatValue + order string +} + +func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.UpdateConfig { + defaultFailureAction := strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaultUpdateConfig.FailureAction)]) + defaultMonitor, _ := gogotypes.DurationFromProto(defaultUpdateConfig.Monitor) + return &swarm.UpdateConfig{ + Parallelism: defaultUpdateConfig.Parallelism, + Delay: defaultUpdateConfig.Delay, + Monitor: defaultMonitor, + FailureAction: defaultFailureAction, + MaxFailureRatio: defaultUpdateConfig.MaxFailureRatio, + Order: defaultOrder(defaultUpdateConfig.Order), + } +} + +func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig { + if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { + return nil + } + + updateConfig := updateConfigFromDefaults(defaults.Service.Update) + + if flags.Changed(flagUpdateParallelism) { + updateConfig.Parallelism = opts.parallelism + } + if flags.Changed(flagUpdateDelay) { + updateConfig.Delay = opts.delay + } + if flags.Changed(flagUpdateMonitor) { + updateConfig.Monitor = opts.monitor + } + if flags.Changed(flagUpdateFailureAction) { + updateConfig.FailureAction = opts.onFailure + } + if flags.Changed(flagUpdateMaxFailureRatio) { + updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value() + } + if flags.Changed(flagUpdateOrder) { + updateConfig.Order = opts.order + } + + return updateConfig +} + +func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig { + if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio) { + return nil + } + + updateConfig := updateConfigFromDefaults(defaults.Service.Rollback) + + if flags.Changed(flagRollbackParallelism) { + updateConfig.Parallelism = opts.parallelism + } + if flags.Changed(flagRollbackDelay) { + updateConfig.Delay = opts.delay + } + if flags.Changed(flagRollbackMonitor) { + updateConfig.Monitor = opts.monitor + } + if flags.Changed(flagRollbackFailureAction) { + updateConfig.FailureAction = opts.onFailure + } + if flags.Changed(flagRollbackMaxFailureRatio) { + updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value() + } + if flags.Changed(flagRollbackOrder) { + updateConfig.Order = opts.order + } + + return updateConfig +} + +type resourceOptions struct { + limitCPU opts.NanoCPUs + limitMemBytes opts.MemBytes + resCPU opts.NanoCPUs + resMemBytes opts.MemBytes + resGenericResources []string +} + +func (r *resourceOptions) ToResourceRequirements() (*swarm.ResourceRequirements, error) { + generic, err := ParseGenericResources(r.resGenericResources) + if err != nil { + return nil, err + } + + return &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: r.limitCPU.Value(), + MemoryBytes: r.limitMemBytes.Value(), + }, + Reservations: &swarm.Resources{ + NanoCPUs: r.resCPU.Value(), + MemoryBytes: r.resMemBytes.Value(), + GenericResources: generic, + }, + }, nil +} + +type restartPolicyOptions struct { + condition string + delay opts.DurationOpt + maxAttempts Uint64Opt + window opts.DurationOpt +} + +func defaultRestartPolicy() *swarm.RestartPolicy { + defaultMaxAttempts := defaults.Service.Task.Restart.MaxAttempts + rp := &swarm.RestartPolicy{ + MaxAttempts: &defaultMaxAttempts, + } + + if defaults.Service.Task.Restart.Delay != nil { + defaultRestartDelay, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + rp.Delay = &defaultRestartDelay + } + if defaults.Service.Task.Restart.Window != nil { + defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window) + rp.Window = &defaultRestartWindow + } + rp.Condition = defaultRestartCondition() + + return rp +} + +func defaultRestartCondition() swarm.RestartPolicyCondition { + switch defaults.Service.Task.Restart.Condition { + case api.RestartOnNone: + return "none" + case api.RestartOnFailure: + return "on-failure" + case api.RestartOnAny: + return "any" + default: + return "" + } +} + +func defaultOrder(order api.UpdateConfig_UpdateOrder) string { + switch order { + case api.UpdateConfig_STOP_FIRST: + return "stop-first" + case api.UpdateConfig_START_FIRST: + return "start-first" + default: + return "" + } +} + +func (r *restartPolicyOptions) ToRestartPolicy(flags *pflag.FlagSet) *swarm.RestartPolicy { + if !anyChanged(flags, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow, flagRestartCondition) { + return nil + } + + restartPolicy := defaultRestartPolicy() + + if flags.Changed(flagRestartDelay) { + restartPolicy.Delay = r.delay.Value() + } + if flags.Changed(flagRestartCondition) { + restartPolicy.Condition = swarm.RestartPolicyCondition(r.condition) + } + if flags.Changed(flagRestartMaxAttempts) { + restartPolicy.MaxAttempts = r.maxAttempts.Value() + } + if flags.Changed(flagRestartWindow) { + restartPolicy.Window = r.window.Value() + } + + return restartPolicy +} + +type credentialSpecOpt struct { + value *swarm.CredentialSpec + source string +} + +func (c *credentialSpecOpt) Set(value string) error { + c.source = value + c.value = &swarm.CredentialSpec{} + switch { + case strings.HasPrefix(value, "file://"): + c.value.File = strings.TrimPrefix(value, "file://") + case strings.HasPrefix(value, "registry://"): + c.value.Registry = strings.TrimPrefix(value, "registry://") + default: + return errors.New("Invalid credential spec - value must be prefixed file:// or registry:// followed by a value") + } + + return nil +} + +func (c *credentialSpecOpt) Type() string { + return "credential-spec" +} + +func (c *credentialSpecOpt) String() string { + return c.source +} + +func (c *credentialSpecOpt) Value() *swarm.CredentialSpec { + return c.value +} + +func resolveNetworkID(ctx context.Context, apiClient client.NetworkAPIClient, networkIDOrName string) (string, error) { + nw, err := apiClient.NetworkInspect(ctx, networkIDOrName, types.NetworkInspectOptions{Scope: "swarm"}) + return nw.ID, err +} + +func convertNetworks(networks opts.NetworkOpt) []swarm.NetworkAttachmentConfig { + var netAttach []swarm.NetworkAttachmentConfig + for _, net := range networks.Value() { + netAttach = append(netAttach, swarm.NetworkAttachmentConfig{ + Target: net.Target, + Aliases: net.Aliases, + DriverOpts: net.DriverOpts, + }) + } + return netAttach +} + +type endpointOptions struct { + mode string + publishPorts opts.PortOpt +} + +func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { + return &swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), + Ports: e.publishPorts.Value(), + } +} + +type logDriverOptions struct { + name string + opts opts.ListOpts +} + +func newLogDriverOptions() logDriverOptions { + return logDriverOptions{opts: opts.NewListOpts(opts.ValidateEnv)} +} + +func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { + if ldo.name == "" { + return nil + } + + // set the log driver only if specified. + return &swarm.Driver{ + Name: ldo.name, + Options: opts.ConvertKVStringsToMap(ldo.opts.GetAll()), + } +} + +type healthCheckOptions struct { + cmd string + interval opts.PositiveDurationOpt + timeout opts.PositiveDurationOpt + retries int + startPeriod opts.PositiveDurationOpt + noHealthcheck bool +} + +func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { + var healthConfig *container.HealthConfig + haveHealthSettings := opts.cmd != "" || + opts.interval.Value() != nil || + opts.timeout.Value() != nil || + opts.retries != 0 + if opts.noHealthcheck { + if haveHealthSettings { + return nil, errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + healthConfig = &container.HealthConfig{Test: []string{"NONE"}} + } else if haveHealthSettings { + var test []string + if opts.cmd != "" { + test = []string{"CMD-SHELL", opts.cmd} + } + var interval, timeout, startPeriod time.Duration + if ptr := opts.interval.Value(); ptr != nil { + interval = *ptr + } + if ptr := opts.timeout.Value(); ptr != nil { + timeout = *ptr + } + if ptr := opts.startPeriod.Value(); ptr != nil { + startPeriod = *ptr + } + healthConfig = &container.HealthConfig{ + Test: test, + Interval: interval, + Timeout: timeout, + Retries: opts.retries, + StartPeriod: startPeriod, + } + } + return healthConfig, nil +} + +// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli +// : +// into a swarmkit host format: +// IP_address canonical_hostname [aliases...] +// This assumes input value (:) has already been validated +func convertExtraHostsToSwarmHosts(extraHosts []string) []string { + hosts := []string{} + for _, extraHost := range extraHosts { + parts := strings.SplitN(extraHost, ":", 2) + hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) + } + return hosts +} + +type serviceOptions struct { + detach bool + quiet bool + + name string + labels opts.ListOpts + containerLabels opts.ListOpts + image string + entrypoint ShlexOpt + args []string + hostname string + env opts.ListOpts + envFile opts.ListOpts + workdir string + user string + groups opts.ListOpts + credentialSpec credentialSpecOpt + init bool + stopSignal string + tty bool + readOnly bool + mounts opts.MountOpt + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOption opts.ListOpts + hosts opts.ListOpts + + resources resourceOptions + stopGrace opts.DurationOpt + + replicas Uint64Opt + mode string + + restartPolicy restartPolicyOptions + constraints opts.ListOpts + placementPrefs placementPrefOpts + update updateOptions + rollback updateOptions + networks opts.NetworkOpt + endpoint endpointOptions + + registryAuth bool + noResolveImage bool + + logDriver logDriverOptions + + healthcheck healthCheckOptions + secrets opts.SecretOpt + configs opts.ConfigOpt + + isolation string +} + +func newServiceOptions() *serviceOptions { + return &serviceOptions{ + labels: opts.NewListOpts(opts.ValidateEnv), + constraints: opts.NewListOpts(nil), + containerLabels: opts.NewListOpts(opts.ValidateEnv), + env: opts.NewListOpts(opts.ValidateEnv), + envFile: opts.NewListOpts(nil), + groups: opts.NewListOpts(nil), + logDriver: newLogDriverOptions(), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOption: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + hosts: opts.NewListOpts(opts.ValidateExtraHost), + } +} + +func (options *serviceOptions) ToServiceMode() (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + switch options.mode { + case "global": + if options.replicas.Value() != nil { + return serviceMode, errors.Errorf("replicas can only be used with replicated mode") + } + + serviceMode.Global = &swarm.GlobalService{} + case "replicated": + serviceMode.Replicated = &swarm.ReplicatedService{ + Replicas: options.replicas.Value(), + } + default: + return serviceMode, errors.Errorf("Unknown mode: %s, only replicated and global supported", options.mode) + } + return serviceMode, nil +} + +func (options *serviceOptions) ToStopGracePeriod(flags *pflag.FlagSet) *time.Duration { + if flags.Changed(flagStopGracePeriod) { + return options.stopGrace.Value() + } + return nil +} + +func (options *serviceOptions) ToService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet) (swarm.ServiceSpec, error) { + var service swarm.ServiceSpec + + envVariables, err := opts.ReadKVEnvStrings(options.envFile.GetAll(), options.env.GetAll()) + if err != nil { + return service, err + } + + currentEnv := make([]string, 0, len(envVariables)) + for _, env := range envVariables { // need to process each var, in order + k := strings.SplitN(env, "=", 2)[0] + for i, current := range currentEnv { // remove duplicates + if current == env { + continue // no update required, may hide this behind flag to preserve order of envVariables + } + if strings.HasPrefix(current, k+"=") { + currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) + } + } + currentEnv = append(currentEnv, env) + } + + healthConfig, err := options.healthcheck.toHealthConfig() + if err != nil { + return service, err + } + + serviceMode, err := options.ToServiceMode() + if err != nil { + return service, err + } + + networks := convertNetworks(options.networks) + for i, net := range networks { + nwID, err := resolveNetworkID(ctx, apiClient, net.Target) + if err != nil { + return service, err + } + networks[i].Target = nwID + } + sort.Sort(byNetworkTarget(networks)) + + resources, err := options.resources.ToResourceRequirements() + if err != nil { + return service, err + } + + service = swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: options.image, + Args: options.args, + Command: options.entrypoint.Value(), + Env: currentEnv, + Hostname: options.hostname, + Labels: opts.ConvertKVStringsToMap(options.containerLabels.GetAll()), + Dir: options.workdir, + User: options.user, + Groups: options.groups.GetAll(), + StopSignal: options.stopSignal, + TTY: options.tty, + ReadOnly: options.readOnly, + Mounts: options.mounts.Value(), + Init: &options.init, + DNSConfig: &swarm.DNSConfig{ + Nameservers: options.dns.GetAll(), + Search: options.dnsSearch.GetAll(), + Options: options.dnsOption.GetAll(), + }, + Hosts: convertExtraHostsToSwarmHosts(options.hosts.GetAll()), + StopGracePeriod: options.ToStopGracePeriod(flags), + Healthcheck: healthConfig, + Isolation: container.Isolation(options.isolation), + }, + Networks: networks, + Resources: resources, + RestartPolicy: options.restartPolicy.ToRestartPolicy(flags), + Placement: &swarm.Placement{ + Constraints: options.constraints.GetAll(), + Preferences: options.placementPrefs.prefs, + }, + LogDriver: options.logDriver.toLogDriver(), + }, + Mode: serviceMode, + UpdateConfig: options.update.updateConfig(flags), + RollbackConfig: options.rollback.rollbackConfig(flags), + EndpointSpec: options.endpoint.ToEndpointSpec(), + } + + if options.credentialSpec.Value() != nil { + service.TaskTemplate.ContainerSpec.Privileges = &swarm.Privileges{ + CredentialSpec: options.credentialSpec.Value(), + } + } + + return service, nil +} + +type flagDefaults map[string]interface{} + +func (fd flagDefaults) getUint64(flagName string) uint64 { + if val, ok := fd[flagName].(uint64); ok { + return val + } + return 0 +} + +func (fd flagDefaults) getString(flagName string) string { + if val, ok := fd[flagName].(string); ok { + return val + } + return "" +} + +func buildServiceDefaultFlagMapping() flagDefaults { + defaultFlagValues := make(map[string]interface{}) + + defaultFlagValues[flagStopGracePeriod], _ = gogotypes.DurationFromProto(defaults.Service.Task.GetContainer().StopGracePeriod) + defaultFlagValues[flagRestartCondition] = `"` + defaultRestartCondition() + `"` + defaultFlagValues[flagRestartDelay], _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + + if defaults.Service.Task.Restart.MaxAttempts != 0 { + defaultFlagValues[flagRestartMaxAttempts] = defaults.Service.Task.Restart.MaxAttempts + } + + defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window) + if defaultRestartWindow != 0 { + defaultFlagValues[flagRestartWindow] = defaultRestartWindow + } + + defaultFlagValues[flagUpdateParallelism] = defaults.Service.Update.Parallelism + defaultFlagValues[flagUpdateDelay] = defaults.Service.Update.Delay + defaultFlagValues[flagUpdateMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Update.Monitor) + defaultFlagValues[flagUpdateFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Update.FailureAction)]) + `"` + defaultFlagValues[flagUpdateMaxFailureRatio] = defaults.Service.Update.MaxFailureRatio + defaultFlagValues[flagUpdateOrder] = `"` + defaultOrder(defaults.Service.Update.Order) + `"` + + defaultFlagValues[flagRollbackParallelism] = defaults.Service.Rollback.Parallelism + defaultFlagValues[flagRollbackDelay] = defaults.Service.Rollback.Delay + defaultFlagValues[flagRollbackMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Rollback.Monitor) + defaultFlagValues[flagRollbackFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Rollback.FailureAction)]) + `"` + defaultFlagValues[flagRollbackMaxFailureRatio] = defaults.Service.Rollback.MaxFailureRatio + defaultFlagValues[flagRollbackOrder] = `"` + defaultOrder(defaults.Service.Rollback.Order) + `"` + + defaultFlagValues[flagEndpointMode] = "vip" + + return defaultFlagValues +} + +func addDetachFlag(flags *pflag.FlagSet, detach *bool) { + flags.BoolVarP(detach, flagDetach, "d", false, "Exit immediately instead of waiting for the service to converge") + flags.SetAnnotation(flagDetach, "version", []string{"1.29"}) +} + +// addServiceFlags adds all flags that are common to both `create` and `update`. +// Any flags that are not common are added separately in the individual command +func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValues flagDefaults) { + flagDesc := func(flagName string, desc string) string { + if defaultValue, ok := defaultFlagValues[flagName]; ok { + return fmt.Sprintf("%s (default %v)", desc, defaultValue) + } + return desc + } + + addDetachFlag(flags, &opts.detach) + flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Suppress progress output") + + flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") + flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") + flags.Var(&opts.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)") + flags.SetAnnotation(flagCredentialSpec, "version", []string{"1.29"}) + flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") + flags.SetAnnotation(flagHostname, "version", []string{"1.25"}) + flags.Var(&opts.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image") + + flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") + flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") + flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") + flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") + + flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")) + flags.Var(&opts.replicas, flagReplicas, "Number of tasks") + + flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`)) + flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)")) + flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up")) + + flags.Var(&opts.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)")) + + flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)") + flags.DurationVar(&opts.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)")) + flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)")) + flags.SetAnnotation(flagUpdateMonitor, "version", []string{"1.25"}) + flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause"|"continue"|"rollback")`)) + flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update")) + flags.SetAnnotation(flagUpdateMaxFailureRatio, "version", []string{"1.25"}) + flags.StringVar(&opts.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first"|"stop-first")`)) + flags.SetAnnotation(flagUpdateOrder, "version", []string{"1.29"}) + + flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism), + "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)") + flags.SetAnnotation(flagRollbackParallelism, "version", []string{"1.28"}) + flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)")) + flags.SetAnnotation(flagRollbackDelay, "version", []string{"1.28"}) + flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)")) + flags.SetAnnotation(flagRollbackMonitor, "version", []string{"1.28"}) + flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause"|"continue")`)) + flags.SetAnnotation(flagRollbackFailureAction, "version", []string{"1.28"}) + flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback")) + flags.SetAnnotation(flagRollbackMaxFailureRatio, "version", []string{"1.28"}) + flags.StringVar(&opts.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first"|"stop-first")`)) + flags.SetAnnotation(flagRollbackOrder, "version", []string{"1.29"}) + + flags.StringVar(&opts.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)") + + flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") + flags.BoolVar(&opts.noResolveImage, flagNoResolveImage, false, "Do not query the registry to resolve image digest and supported platforms") + flags.SetAnnotation(flagNoResolveImage, "version", []string{"1.30"}) + + flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") + flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") + + flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") + flags.SetAnnotation(flagHealthCmd, "version", []string{"1.25"}) + flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ms|s|m|h)") + flags.SetAnnotation(flagHealthInterval, "version", []string{"1.25"}) + flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ms|s|m|h)") + flags.SetAnnotation(flagHealthTimeout, "version", []string{"1.25"}) + flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") + flags.SetAnnotation(flagHealthRetries, "version", []string{"1.25"}) + flags.Var(&opts.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)") + flags.SetAnnotation(flagHealthStartPeriod, "version", []string{"1.29"}) + flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") + flags.SetAnnotation(flagNoHealthcheck, "version", []string{"1.25"}) + + flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") + flags.SetAnnotation(flagTTY, "version", []string{"1.25"}) + + flags.BoolVar(&opts.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only") + flags.SetAnnotation(flagReadOnly, "version", []string{"1.28"}) + + flags.StringVar(&opts.stopSignal, flagStopSignal, "", "Signal to stop the container") + flags.SetAnnotation(flagStopSignal, "version", []string{"1.28"}) + flags.StringVar(&opts.isolation, flagIsolation, "", "Service container isolation mode") + flags.SetAnnotation(flagIsolation, "version", []string{"1.35"}) +} + +const ( + flagCredentialSpec = "credential-spec" + flagPlacementPref = "placement-pref" + flagPlacementPrefAdd = "placement-pref-add" + flagPlacementPrefRemove = "placement-pref-rm" + flagConstraint = "constraint" + flagConstraintRemove = "constraint-rm" + flagConstraintAdd = "constraint-add" + flagContainerLabel = "container-label" + flagContainerLabelRemove = "container-label-rm" + flagContainerLabelAdd = "container-label-add" + flagDetach = "detach" + flagDNS = "dns" + flagDNSRemove = "dns-rm" + flagDNSAdd = "dns-add" + flagDNSOption = "dns-option" + flagDNSOptionRemove = "dns-option-rm" + flagDNSOptionAdd = "dns-option-add" + flagDNSSearch = "dns-search" + flagDNSSearchRemove = "dns-search-rm" + flagDNSSearchAdd = "dns-search-add" + flagEndpointMode = "endpoint-mode" + flagEntrypoint = "entrypoint" + flagEnv = "env" + flagEnvFile = "env-file" + flagEnvRemove = "env-rm" + flagEnvAdd = "env-add" + flagGenericResourcesRemove = "generic-resource-rm" + flagGenericResourcesAdd = "generic-resource-add" + flagGroup = "group" + flagGroupAdd = "group-add" + flagGroupRemove = "group-rm" + flagHost = "host" + flagHostAdd = "host-add" + flagHostRemove = "host-rm" + flagHostname = "hostname" + flagLabel = "label" + flagLabelRemove = "label-rm" + flagLabelAdd = "label-add" + flagLimitCPU = "limit-cpu" + flagLimitMemory = "limit-memory" + flagMode = "mode" + flagMount = "mount" + flagMountRemove = "mount-rm" + flagMountAdd = "mount-add" + flagName = "name" + flagNetwork = "network" + flagNetworkAdd = "network-add" + flagNetworkRemove = "network-rm" + flagPublish = "publish" + flagPublishRemove = "publish-rm" + flagPublishAdd = "publish-add" + flagQuiet = "quiet" + flagReadOnly = "read-only" + flagReplicas = "replicas" + flagReserveCPU = "reserve-cpu" + flagReserveMemory = "reserve-memory" + flagRestartCondition = "restart-condition" + flagRestartDelay = "restart-delay" + flagRestartMaxAttempts = "restart-max-attempts" + flagRestartWindow = "restart-window" + flagRollback = "rollback" + flagRollbackDelay = "rollback-delay" + flagRollbackFailureAction = "rollback-failure-action" + flagRollbackMaxFailureRatio = "rollback-max-failure-ratio" + flagRollbackMonitor = "rollback-monitor" + flagRollbackOrder = "rollback-order" + flagRollbackParallelism = "rollback-parallelism" + flagInit = "init" + flagStopGracePeriod = "stop-grace-period" + flagStopSignal = "stop-signal" + flagTTY = "tty" + flagUpdateDelay = "update-delay" + flagUpdateFailureAction = "update-failure-action" + flagUpdateMaxFailureRatio = "update-max-failure-ratio" + flagUpdateMonitor = "update-monitor" + flagUpdateOrder = "update-order" + flagUpdateParallelism = "update-parallelism" + flagUser = "user" + flagWorkdir = "workdir" + flagRegistryAuth = "with-registry-auth" + flagNoResolveImage = "no-resolve-image" + flagLogDriver = "log-driver" + flagLogOpt = "log-opt" + flagHealthCmd = "health-cmd" + flagHealthInterval = "health-interval" + flagHealthRetries = "health-retries" + flagHealthTimeout = "health-timeout" + flagHealthStartPeriod = "health-start-period" + flagNoHealthcheck = "no-healthcheck" + flagSecret = "secret" + flagSecretAdd = "secret-add" + flagSecretRemove = "secret-rm" + flagConfig = "config" + flagConfigAdd = "config-add" + flagConfigRemove = "config-rm" + flagIsolation = "isolation" +) diff --git a/cli/cli/command/service/opts_test.go b/cli/cli/command/service/opts_test.go new file mode 100644 index 00000000..344893f5 --- /dev/null +++ b/cli/cli/command/service/opts_test.go @@ -0,0 +1,226 @@ +package service + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestMemBytesString(t *testing.T) { + var mem opts.MemBytes = 1048576 + assert.Check(t, is.Equal("1MiB", mem.String())) +} + +func TestMemBytesSetAndValue(t *testing.T) { + var mem opts.MemBytes + assert.NilError(t, mem.Set("5kb")) + assert.Check(t, is.Equal(int64(5120), mem.Value())) +} + +func TestNanoCPUsString(t *testing.T) { + var cpus opts.NanoCPUs = 6100000000 + assert.Check(t, is.Equal("6.100", cpus.String())) +} + +func TestNanoCPUsSetAndValue(t *testing.T) { + var cpus opts.NanoCPUs + assert.NilError(t, cpus.Set("0.35")) + assert.Check(t, is.Equal(int64(350000000), cpus.Value())) +} + +func TestUint64OptString(t *testing.T) { + value := uint64(2345678) + opt := Uint64Opt{value: &value} + assert.Check(t, is.Equal("2345678", opt.String())) + + opt = Uint64Opt{} + assert.Check(t, is.Equal("", opt.String())) +} + +func TestUint64OptSetAndValue(t *testing.T) { + var opt Uint64Opt + assert.NilError(t, opt.Set("14445")) + assert.Check(t, is.Equal(uint64(14445), *opt.Value())) +} + +func TestHealthCheckOptionsToHealthConfig(t *testing.T) { + dur := time.Second + opt := healthCheckOptions{ + cmd: "curl", + interval: opts.PositiveDurationOpt{DurationOpt: *opts.NewDurationOpt(&dur)}, + timeout: opts.PositiveDurationOpt{DurationOpt: *opts.NewDurationOpt(&dur)}, + startPeriod: opts.PositiveDurationOpt{DurationOpt: *opts.NewDurationOpt(&dur)}, + retries: 10, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(&container.HealthConfig{ + Test: []string{"CMD-SHELL", "curl"}, + Interval: time.Second, + Timeout: time.Second, + StartPeriod: time.Second, + Retries: 10, + }, config)) +} + +func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { + opt := healthCheckOptions{ + noHealthcheck: true, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(&container.HealthConfig{ + Test: []string{"NONE"}, + }, config)) +} + +func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { + opt := healthCheckOptions{ + cmd: "curl", + noHealthcheck: true, + } + _, err := opt.toHealthConfig() + assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") +} + +func TestResourceOptionsToResourceRequirements(t *testing.T) { + incorrectOptions := []resourceOptions{ + { + resGenericResources: []string{"foo=bar", "foo=1"}, + }, + { + resGenericResources: []string{"foo=bar", "foo=baz"}, + }, + { + resGenericResources: []string{"foo=bar"}, + }, + { + resGenericResources: []string{"foo=1", "foo=2"}, + }, + } + + for _, opt := range incorrectOptions { + _, err := opt.ToResourceRequirements() + assert.Check(t, is.ErrorContains(err, "")) + } + + correctOptions := []resourceOptions{ + { + resGenericResources: []string{"foo=1"}, + }, + { + resGenericResources: []string{"foo=1", "bar=2"}, + }, + } + + for _, opt := range correctOptions { + r, err := opt.ToResourceRequirements() + assert.NilError(t, err) + assert.Check(t, is.Len(r.Reservations.GenericResources, len(opt.resGenericResources))) + } + +} + +func TestToServiceNetwork(t *testing.T) { + nws := []types.NetworkResource{ + {Name: "aaa-network", ID: "id555"}, + {Name: "mmm-network", ID: "id999"}, + {Name: "zzz-network", ID: "id111"}, + } + + client := &fakeClient{ + networkInspectFunc: func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + for _, network := range nws { + if network.ID == networkID || network.Name == networkID { + return network, nil + } + } + return types.NetworkResource{}, fmt.Errorf("network not found: %s", networkID) + }, + } + + nwo := opts.NetworkOpt{} + nwo.Set("zzz-network") + nwo.Set("mmm-network") + nwo.Set("aaa-network") + + o := newServiceOptions() + o.mode = "replicated" + o.networks = nwo + + ctx := context.Background() + flags := newCreateCommand(nil).Flags() + service, err := o.ToService(ctx, client, flags) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id111"}, {Target: "id555"}, {Target: "id999"}}, service.TaskTemplate.Networks)) +} + +func TestToServiceUpdateRollback(t *testing.T) { + expected := swarm.ServiceSpec{ + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 23, + Delay: 34 * time.Second, + Monitor: 54321 * time.Nanosecond, + FailureAction: "pause", + MaxFailureRatio: 0.6, + Order: "stop-first", + }, + RollbackConfig: &swarm.UpdateConfig{ + Parallelism: 12, + Delay: 23 * time.Second, + Monitor: 12345 * time.Nanosecond, + FailureAction: "continue", + MaxFailureRatio: 0.5, + Order: "start-first", + }, + } + + // Note: in test-situation, the flags are only used to detect if an option + // was set; the actual value itself is read from the serviceOptions below. + flags := newCreateCommand(nil).Flags() + flags.Set("update-parallelism", "23") + flags.Set("update-delay", "34s") + flags.Set("update-monitor", "54321ns") + flags.Set("update-failure-action", "pause") + flags.Set("update-max-failure-ratio", "0.6") + flags.Set("update-order", "stop-first") + + flags.Set("rollback-parallelism", "12") + flags.Set("rollback-delay", "23s") + flags.Set("rollback-monitor", "12345ns") + flags.Set("rollback-failure-action", "continue") + flags.Set("rollback-max-failure-ratio", "0.5") + flags.Set("rollback-order", "start-first") + + o := newServiceOptions() + o.mode = "replicated" + o.update = updateOptions{ + parallelism: 23, + delay: 34 * time.Second, + monitor: 54321 * time.Nanosecond, + onFailure: "pause", + maxFailureRatio: 0.6, + order: "stop-first", + } + o.rollback = updateOptions{ + parallelism: 12, + delay: 23 * time.Second, + monitor: 12345 * time.Nanosecond, + onFailure: "continue", + maxFailureRatio: 0.5, + order: "start-first", + } + + service, err := o.ToService(context.Background(), &fakeClient{}, flags) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(service.UpdateConfig, expected.UpdateConfig)) + assert.Check(t, is.DeepEqual(service.RollbackConfig, expected.RollbackConfig)) +} diff --git a/cli/cli/command/service/parse.go b/cli/cli/command/service/parse.go new file mode 100644 index 00000000..25410707 --- /dev/null +++ b/cli/cli/command/service/parse.go @@ -0,0 +1,118 @@ +package service + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +// ParseSecrets retrieves the secrets with the requested names and fills +// secret IDs into the secret references. +func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) { + if len(requestedSecrets) == 0 { + return []*swarmtypes.SecretReference{}, nil + } + + secretRefs := make(map[string]*swarmtypes.SecretReference) + ctx := context.Background() + + for _, secret := range requestedSecrets { + if _, exists := secretRefs[secret.File.Name]; exists { + return nil, errors.Errorf("duplicate secret target for %s not allowed", secret.SecretName) + } + secretRef := new(swarmtypes.SecretReference) + *secretRef = *secret + secretRefs[secret.File.Name] = secretRef + } + + args := filters.NewArgs() + for _, s := range secretRefs { + args.Add("name", s.SecretName) + } + + secrets, err := client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundSecrets := make(map[string]string) + for _, secret := range secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret.ID + } + + addedSecrets := []*swarmtypes.SecretReference{} + + for _, ref := range secretRefs { + id, ok := foundSecrets[ref.SecretName] + if !ok { + return nil, errors.Errorf("secret not found: %s", ref.SecretName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.SecretID = id + addedSecrets = append(addedSecrets, ref) + } + + return addedSecrets, nil +} + +// ParseConfigs retrieves the configs from the requested names and converts +// them to config references to use with the spec +func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) { + if len(requestedConfigs) == 0 { + return []*swarmtypes.ConfigReference{}, nil + } + + configRefs := make(map[string]*swarmtypes.ConfigReference) + ctx := context.Background() + + for _, config := range requestedConfigs { + if _, exists := configRefs[config.File.Name]; exists { + return nil, errors.Errorf("duplicate config target for %s not allowed", config.ConfigName) + } + + configRef := new(swarmtypes.ConfigReference) + *configRef = *config + configRefs[config.File.Name] = configRef + } + + args := filters.NewArgs() + for _, s := range configRefs { + args.Add("name", s.ConfigName) + } + + configs, err := client.ConfigList(ctx, types.ConfigListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundConfigs := make(map[string]string) + for _, config := range configs { + foundConfigs[config.Spec.Annotations.Name] = config.ID + } + + addedConfigs := []*swarmtypes.ConfigReference{} + + for _, ref := range configRefs { + id, ok := foundConfigs[ref.ConfigName] + if !ok { + return nil, errors.Errorf("config not found: %s", ref.ConfigName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.ConfigID = id + addedConfigs = append(addedConfigs, ref) + } + + return addedConfigs, nil +} diff --git a/cli/cli/command/service/progress/progress.go b/cli/cli/command/service/progress/progress.go new file mode 100644 index 00000000..4b9cdd73 --- /dev/null +++ b/cli/cli/command/service/progress/progress.go @@ -0,0 +1,504 @@ +package progress + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" +) + +var ( + numberedStates = map[swarm.TaskState]int64{ + swarm.TaskStateNew: 1, + swarm.TaskStateAllocated: 2, + swarm.TaskStatePending: 3, + swarm.TaskStateAssigned: 4, + swarm.TaskStateAccepted: 5, + swarm.TaskStatePreparing: 6, + swarm.TaskStateReady: 7, + swarm.TaskStateStarting: 8, + swarm.TaskStateRunning: 9, + + // The following states are not actually shown in progress + // output, but are used internally for ordering. + swarm.TaskStateComplete: 10, + swarm.TaskStateShutdown: 11, + swarm.TaskStateFailed: 12, + swarm.TaskStateRejected: 13, + } + + longestState int +) + +const ( + maxProgress = 9 + maxProgressBars = 20 +) + +type progressUpdater interface { + update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) +} + +func init() { + for state := range numberedStates { + if !terminalState(state) && len(state) > longestState { + longestState = len(state) + } + } +} + +func terminalState(state swarm.TaskState) bool { + return numberedStates[state] > numberedStates[swarm.TaskStateRunning] +} + +func stateToProgress(state swarm.TaskState, rollback bool) int64 { + if !rollback { + return numberedStates[state] + } + return numberedStates[swarm.TaskStateRunning] - numberedStates[state] +} + +// ServiceProgress outputs progress information for convergence of a service. +// nolint: gocyclo +func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error { + defer progressWriter.Close() + + progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false) + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, os.Interrupt) + defer signal.Stop(sigint) + + taskFilter := filters.NewArgs() + taskFilter.Add("service", serviceID) + taskFilter.Add("_up-to-date", "true") + + getUpToDateTasks := func() ([]swarm.Task, error) { + return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + } + + var ( + updater progressUpdater + converged bool + convergedAt time.Time + monitor = 5 * time.Second + rollback bool + ) + + for { + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 { + monitor = service.Spec.UpdateConfig.Monitor + } + + if updater == nil { + updater, err = initializeUpdater(service, progressOut) + if err != nil { + return err + } + } + + if service.UpdateStatus != nil { + switch service.UpdateStatus.State { + case swarm.UpdateStateUpdating: + rollback = false + case swarm.UpdateStateCompleted: + if !converged { + return nil + } + case swarm.UpdateStatePaused: + return fmt.Errorf("service update paused: %s", service.UpdateStatus.Message) + case swarm.UpdateStateRollbackStarted: + if !rollback && service.UpdateStatus.Message != "" { + progressOut.WriteProgress(progress.Progress{ + ID: "rollback", + Action: service.UpdateStatus.Message, + }) + } + rollback = true + case swarm.UpdateStateRollbackPaused: + return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message) + case swarm.UpdateStateRollbackCompleted: + if !converged { + return fmt.Errorf("service rolled back: %s", service.UpdateStatus.Message) + } + } + } + if converged && time.Since(convergedAt) >= monitor { + progressOut.WriteProgress(progress.Progress{ + ID: "verify", + Action: "Service converged", + }) + + return nil + } + + tasks, err := getUpToDateTasks() + if err != nil { + return err + } + + activeNodes, err := getActiveNodes(ctx, client) + if err != nil { + return err + } + + converged, err = updater.update(service, tasks, activeNodes, rollback) + if err != nil { + return err + } + if converged { + if convergedAt.IsZero() { + convergedAt = time.Now() + } + wait := monitor - time.Since(convergedAt) + if wait >= 0 { + progressOut.WriteProgress(progress.Progress{ + // Ideally this would have no ID, but + // the progress rendering code behaves + // poorly on an "action" with no ID. It + // returns the cursor to the beginning + // of the line, so the first character + // may be difficult to read. Then the + // output is overwritten by the shell + // prompt when the command finishes. + ID: "verify", + Action: fmt.Sprintf("Waiting %d seconds to verify that tasks are stable...", wait/time.Second+1), + }) + } + } else { + if !convergedAt.IsZero() { + progressOut.WriteProgress(progress.Progress{ + ID: "verify", + Action: "Detected task failure", + }) + } + convergedAt = time.Time{} + } + + select { + case <-time.After(200 * time.Millisecond): + case <-sigint: + if !converged { + progress.Message(progressOut, "", "Operation continuing in background.") + progress.Messagef(progressOut, "", "Use `docker service ps %s` to check progress.", serviceID) + } + return nil + } + } +} + +func getActiveNodes(ctx context.Context, client client.APIClient) (map[string]struct{}, error) { + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return nil, err + } + + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + return activeNodes, nil +} + +func initializeUpdater(service swarm.Service, progressOut progress.Output) (progressUpdater, error) { + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + return &replicatedProgressUpdater{ + progressOut: progressOut, + }, nil + } + if service.Spec.Mode.Global != nil { + return &globalProgressUpdater{ + progressOut: progressOut, + }, nil + } + return nil, errors.New("unrecognized service mode") +} + +func writeOverallProgress(progressOut progress.Output, numerator, denominator int, rollback bool) { + if rollback { + progressOut.WriteProgress(progress.Progress{ + ID: "overall progress", + Action: fmt.Sprintf("rolling back update: %d out of %d tasks", numerator, denominator), + }) + return + } + progressOut.WriteProgress(progress.Progress{ + ID: "overall progress", + Action: fmt.Sprintf("%d out of %d tasks", numerator, denominator), + }) +} + +func truncError(errMsg string) string { + // Remove newlines from the error, which corrupt the output. + errMsg = strings.Replace(errMsg, "\n", " ", -1) + + // Limit the length to 75 characters, so that even on narrow terminals + // this will not overflow to the next line. + if len(errMsg) > 75 { + errMsg = errMsg[:74] + "…" + } + return errMsg +} + +type replicatedProgressUpdater struct { + progressOut progress.Output + + // used for mapping slots to a contiguous space + // this also causes progress bars to appear in order + slotMap map[int]int + + initialized bool + done bool +} + +func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { + if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil { + return false, errors.New("no replica count") + } + replicas := *service.Spec.Mode.Replicated.Replicas + + if !u.initialized { + u.slotMap = make(map[int]int) + + // Draw progress bars in order + writeOverallProgress(u.progressOut, 0, int(replicas), rollback) + + if replicas <= maxProgressBars { + for i := uint64(1); i <= replicas; i++ { + progress.Update(u.progressOut, fmt.Sprintf("%d/%d", i, replicas), " ") + } + } + u.initialized = true + } + + tasksBySlot := u.tasksBySlot(tasks, activeNodes) + + // If we had reached a converged state, check if we are still converged. + if u.done { + for _, task := range tasksBySlot { + if task.Status.State != swarm.TaskStateRunning { + u.done = false + break + } + } + } + + running := uint64(0) + + for _, task := range tasksBySlot { + mappedSlot := u.slotMap[task.Slot] + if mappedSlot == 0 { + mappedSlot = len(u.slotMap) + 1 + u.slotMap[task.Slot] = mappedSlot + } + + if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { + running++ + } + + u.writeTaskProgress(task, mappedSlot, replicas, rollback) + } + + if !u.done { + writeOverallProgress(u.progressOut, int(running), int(replicas), rollback) + + if running == replicas { + u.done = true + } + } + + return running == replicas, nil +} + +func (u *replicatedProgressUpdater) tasksBySlot(tasks []swarm.Task, activeNodes map[string]struct{}) map[int]swarm.Task { + // If there are multiple tasks with the same slot number, favor the one + // with the *lowest* desired state. This can happen in restart + // scenarios. + tasksBySlot := make(map[int]swarm.Task) + for _, task := range tasks { + if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { + continue + } + if existingTask, ok := tasksBySlot[task.Slot]; ok { + if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { + continue + } + // If the desired states match, observed state breaks + // ties. This can happen with the "start first" service + // update mode. + if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && + numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { + continue + } + } + if task.NodeID != "" { + if _, nodeActive := activeNodes[task.NodeID]; !nodeActive { + continue + } + } + tasksBySlot[task.Slot] = task + } + + return tasksBySlot +} + +func (u *replicatedProgressUpdater) writeTaskProgress(task swarm.Task, mappedSlot int, replicas uint64, rollback bool) { + if u.done || replicas > maxProgressBars || uint64(mappedSlot) > replicas { + return + } + + if task.Status.Err != "" { + u.progressOut.WriteProgress(progress.Progress{ + ID: fmt.Sprintf("%d/%d", mappedSlot, replicas), + Action: truncError(task.Status.Err), + }) + return + } + + if !terminalState(task.DesiredState) && !terminalState(task.Status.State) { + u.progressOut.WriteProgress(progress.Progress{ + ID: fmt.Sprintf("%d/%d", mappedSlot, replicas), + Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State), + Current: stateToProgress(task.Status.State, rollback), + Total: maxProgress, + HideCounts: true, + }) + } +} + +type globalProgressUpdater struct { + progressOut progress.Output + + initialized bool + done bool +} + +func (u *globalProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { + tasksByNode := u.tasksByNode(tasks) + + // We don't have perfect knowledge of how many nodes meet the + // constraints for this service. But the orchestrator creates tasks + // for all eligible nodes at the same time, so we should see all those + // nodes represented among the up-to-date tasks. + nodeCount := len(tasksByNode) + + if !u.initialized { + if nodeCount == 0 { + // Two possibilities: either the orchestrator hasn't created + // the tasks yet, or the service doesn't meet constraints for + // any node. Either way, we wait. + u.progressOut.WriteProgress(progress.Progress{ + ID: "overall progress", + Action: "waiting for new tasks", + }) + return false, nil + } + + writeOverallProgress(u.progressOut, 0, nodeCount, rollback) + u.initialized = true + } + + // If we had reached a converged state, check if we are still converged. + if u.done { + for _, task := range tasksByNode { + if task.Status.State != swarm.TaskStateRunning { + u.done = false + break + } + } + } + + running := 0 + + for _, task := range tasksByNode { + if _, nodeActive := activeNodes[task.NodeID]; nodeActive { + if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { + running++ + } + + u.writeTaskProgress(task, nodeCount, rollback) + } + } + + if !u.done { + writeOverallProgress(u.progressOut, running, nodeCount, rollback) + + if running == nodeCount { + u.done = true + } + } + + return running == nodeCount, nil +} + +func (u *globalProgressUpdater) tasksByNode(tasks []swarm.Task) map[string]swarm.Task { + // If there are multiple tasks with the same node ID, favor the one + // with the *lowest* desired state. This can happen in restart + // scenarios. + tasksByNode := make(map[string]swarm.Task) + for _, task := range tasks { + if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { + continue + } + if existingTask, ok := tasksByNode[task.NodeID]; ok { + if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { + continue + } + + // If the desired states match, observed state breaks + // ties. This can happen with the "start first" service + // update mode. + if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && + numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { + continue + } + + } + tasksByNode[task.NodeID] = task + } + + return tasksByNode +} + +func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int, rollback bool) { + if u.done || nodeCount > maxProgressBars { + return + } + + if task.Status.Err != "" { + u.progressOut.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(task.NodeID), + Action: truncError(task.Status.Err), + }) + return + } + + if !terminalState(task.DesiredState) && !terminalState(task.Status.State) { + u.progressOut.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(task.NodeID), + Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State), + Current: stateToProgress(task.Status.State, rollback), + Total: maxProgress, + HideCounts: true, + }) + } +} diff --git a/cli/cli/command/service/progress/progress_test.go b/cli/cli/command/service/progress/progress_test.go new file mode 100644 index 00000000..2a386d64 --- /dev/null +++ b/cli/cli/command/service/progress/progress_test.go @@ -0,0 +1,375 @@ +package progress + +import ( + "fmt" + "strconv" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/progress" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +type mockProgress struct { + p []progress.Progress +} + +func (mp *mockProgress) WriteProgress(p progress.Progress) error { + mp.p = append(mp.p, p) + return nil +} + +func (mp *mockProgress) clear() { + mp.p = nil +} + +type updaterTester struct { + t *testing.T + updater progressUpdater + p *mockProgress + service swarm.Service + activeNodes map[string]struct{} + rollback bool +} + +func (u updaterTester) testUpdater(tasks []swarm.Task, expectedConvergence bool, expectedProgress []progress.Progress) { + u.p.clear() + + converged, err := u.updater.update(u.service, tasks, u.activeNodes, u.rollback) + assert.Check(u.t, err) + assert.Check(u.t, is.Equal(expectedConvergence, converged)) + assert.Check(u.t, is.DeepEqual(expectedProgress, u.p.p)) +} + +func TestReplicatedProgressUpdaterOneReplica(t *testing.T) { + replicas := uint64(1) + + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &replicas, + }, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &replicatedProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{"a": {}, "b": {}}, + service: service, + } + + tasks := []swarm.Task{} + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "0 out of 1 tasks"}, + {ID: "1/1", Action: " "}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // Task with DesiredState beyond Running is ignored + tasks = append(tasks, + swarm.Task{ID: "1", + NodeID: "a", + DesiredState: swarm.TaskStateShutdown, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // Task with valid DesiredState and State updates progress bar + tasks[0].DesiredState = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "new ", Current: 1, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task exposes an error, we should show that instead of the + // progress bar. + tasks[0].Status.Err = "something is wrong" + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "something is wrong"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // When the task reaches running, update should return true + tasks[0].Status.Err = "" + tasks[0].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "1/1", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // If the task fails, update should return false again + tasks[0].Status.Err = "task failed" + tasks[0].Status.State = swarm.TaskStateFailed + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "task failed"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task is restarted, progress output should be shown for the + // replacement task, not the old task. + tasks[0].DesiredState = swarm.TaskStateShutdown + tasks = append(tasks, + swarm.Task{ID: "2", + NodeID: "b", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateRunning}, + }) + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "1/1", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // Add a new task while the current one is still running, to simulate + // "start-then-stop" updates. + tasks = append(tasks, + swarm.Task{ID: "3", + NodeID: "b", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStatePreparing}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "preparing", Current: 6, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) +} + +func TestReplicatedProgressUpdaterManyReplicas(t *testing.T) { + replicas := uint64(50) + + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &replicas, + }, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &replicatedProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{"a": {}, "b": {}}, + service: service, + } + + tasks := []swarm.Task{} + + // No per-task progress bars because there are too many replicas + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", replicas)}, + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", replicas)}, + }) + + for i := 0; i != int(replicas); i++ { + tasks = append(tasks, + swarm.Task{ + ID: strconv.Itoa(i), + Slot: i + 1, + NodeID: "a", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + + if i%2 == 1 { + tasks[i].NodeID = "b" + } + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i, replicas)}, + }) + + tasks[i].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, uint64(i) == replicas-1, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i+1, replicas)}, + }) + } +} + +func TestGlobalProgressUpdaterOneNode(t *testing.T) { + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &globalProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{"a": {}, "b": {}}, + service: service, + } + + tasks := []swarm.Task{} + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "waiting for new tasks"}, + }) + + // Task with DesiredState beyond Running is ignored + tasks = append(tasks, + swarm.Task{ID: "1", + NodeID: "a", + DesiredState: swarm.TaskStateShutdown, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "0 out of 1 tasks"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // Task with valid DesiredState and State updates progress bar + tasks[0].DesiredState = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "new ", Current: 1, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task exposes an error, we should show that instead of the + // progress bar. + tasks[0].Status.Err = "something is wrong" + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "something is wrong"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // When the task reaches running, update should return true + tasks[0].Status.Err = "" + tasks[0].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "a", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // If the task fails, update should return false again + tasks[0].Status.Err = "task failed" + tasks[0].Status.State = swarm.TaskStateFailed + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "task failed"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task is restarted, progress output should be shown for the + // replacement task, not the old task. + tasks[0].DesiredState = swarm.TaskStateShutdown + tasks = append(tasks, + swarm.Task{ID: "2", + NodeID: "a", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateRunning}, + }) + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "a", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // Add a new task while the current one is still running, to simulate + // "start-then-stop" updates. + tasks = append(tasks, + swarm.Task{ID: "3", + NodeID: "a", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStatePreparing}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "preparing", Current: 6, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) +} + +func TestGlobalProgressUpdaterManyNodes(t *testing.T) { + nodes := 50 + + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &globalProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{}, + service: service, + } + + for i := 0; i != nodes; i++ { + updaterTester.activeNodes[strconv.Itoa(i)] = struct{}{} + } + + tasks := []swarm.Task{} + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "waiting for new tasks"}, + }) + + for i := 0; i != nodes; i++ { + tasks = append(tasks, + swarm.Task{ + ID: "task" + strconv.Itoa(i), + NodeID: strconv.Itoa(i), + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + } + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", nodes)}, + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", nodes)}, + }) + + for i := 0; i != nodes; i++ { + tasks[i].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, i == nodes-1, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i+1, nodes)}, + }) + } +} diff --git a/cli/cli/command/service/ps.go b/cli/cli/command/service/ps.go new file mode 100644 index 00000000..0220f7e0 --- /dev/null +++ b/cli/cli/command/service/ps.go @@ -0,0 +1,155 @@ +package service + +import ( + "context" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/command/node" + "github.com/docker/cli/cli/command/task" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type psOptions struct { + services []string + quiet bool + noResolve bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newPsCommand(dockerCli command.Cli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] SERVICE [SERVICE...]", + Short: "List the tasks of one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.services = args + return runPS(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display task IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&options.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.StringVar(&options.format, "format", "", "Pretty-print tasks using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli command.Cli, options psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + filter, notfound, err := createFilter(ctx, client, options) + if err != nil { + return err + } + if err := updateNodeFilter(ctx, client, filter); err != nil { + return err + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + format = task.DefaultFormat(dockerCli.ConfigFile(), options.quiet) + } + if options.quiet { + options.noTrunc = true + } + if err := task.Print(ctx, dockerCli, tasks, idresolver.New(client, options.noResolve), !options.noTrunc, options.quiet, format); err != nil { + return err + } + if len(notfound) != 0 { + return errors.New(strings.Join(notfound, "\n")) + } + return nil +} + +func createFilter(ctx context.Context, client client.APIClient, options psOptions) (filters.Args, []string, error) { + filter := options.filter.Value() + + serviceIDFilter := filters.NewArgs() + serviceNameFilter := filters.NewArgs() + for _, service := range options.services { + serviceIDFilter.Add("id", service) + serviceNameFilter.Add("name", service) + } + serviceByIDList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter}) + if err != nil { + return filter, nil, err + } + serviceByNameList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter}) + if err != nil { + return filter, nil, err + } + + var notfound []string + serviceCount := 0 +loop: + // Match services by 1. Full ID, 2. Full name, 3. ID prefix. An error is returned if the ID-prefix match is ambiguous + for _, service := range options.services { + for _, s := range serviceByIDList { + if s.ID == service { + filter.Add("service", s.ID) + serviceCount++ + continue loop + } + } + for _, s := range serviceByNameList { + if s.Spec.Annotations.Name == service { + filter.Add("service", s.ID) + serviceCount++ + continue loop + } + } + found := false + for _, s := range serviceByIDList { + if strings.HasPrefix(s.ID, service) { + if found { + return filter, nil, errors.New("multiple services found with provided prefix: " + service) + } + filter.Add("service", s.ID) + serviceCount++ + found = true + } + } + if !found { + notfound = append(notfound, "no such service: "+service) + } + } + if serviceCount == 0 { + return filter, nil, errors.New(strings.Join(notfound, "\n")) + } + return filter, notfound, err +} + +func updateNodeFilter(ctx context.Context, client client.APIClient, filter filters.Args) error { + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + nodeReference, err := node.Reference(ctx, client, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", nodeReference) + } + } + return nil +} diff --git a/cli/cli/command/service/ps_test.go b/cli/cli/command/service/ps_test.go new file mode 100644 index 00000000..6459cfcc --- /dev/null +++ b/cli/cli/command/service/ps_test.go @@ -0,0 +1,135 @@ +package service + +import ( + "context" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCreateFilter(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + {ID: "idmatch"}, + {ID: "idprefixmatch"}, + newService("cccccccc", "namematch"), + newService("01010101", "notfoundprefix"), + }, nil + }, + } + + filter := opts.NewFilterOpt() + assert.NilError(t, filter.Set("node=somenode")) + options := psOptions{ + services: []string{"idmatch", "idprefix", "namematch", "notfound"}, + filter: filter, + } + + actual, notfound, err := createFilter(context.Background(), client, options) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(notfound, []string{"no such service: notfound"})) + + expected := filters.NewArgs( + filters.Arg("service", "idmatch"), + filters.Arg("service", "idprefixmatch"), + filters.Arg("service", "cccccccc"), + filters.Arg("node", "somenode"), + ) + assert.DeepEqual(t, expected, actual, cmpFilters) +} + +func TestCreateFilterWithAmbiguousIDPrefixError(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + {ID: "aaaone"}, + {ID: "aaatwo"}, + }, nil + }, + } + options := psOptions{ + services: []string{"aaa"}, + filter: opts.NewFilterOpt(), + } + _, _, err := createFilter(context.Background(), client, options) + assert.Error(t, err, "multiple services found with provided prefix: aaa") +} + +func TestCreateFilterNoneFound(t *testing.T) { + client := &fakeClient{} + options := psOptions{ + services: []string{"foo", "notfound"}, + filter: opts.NewFilterOpt(), + } + _, _, err := createFilter(context.Background(), client, options) + assert.Error(t, err, "no such service: foo\nno such service: notfound") +} + +func TestRunPSWarnsOnNotFound(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + {ID: "foo"}, + }, nil + }, + } + + cli := test.NewFakeCli(client) + options := psOptions{ + services: []string{"foo", "bar"}, + filter: opts.NewFilterOpt(), + format: "{{.ID}}", + } + err := runPS(cli, options) + assert.Error(t, err, "no such service: bar") +} + +func TestRunPSQuiet(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{{ID: "foo"}}, nil + }, + taskListFunc: func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{{ID: "sxabyp0obqokwekpun4rjo0b3"}}, nil + }, + } + + cli := test.NewFakeCli(client) + err := runPS(cli, psOptions{services: []string{"foo"}, quiet: true, filter: opts.NewFilterOpt()}) + assert.NilError(t, err) + assert.Check(t, is.Equal("sxabyp0obqokwekpun4rjo0b3\n", cli.OutBuffer().String())) +} + +func TestUpdateNodeFilter(t *testing.T) { + selfNodeID := "foofoo" + filter := filters.NewArgs( + filters.Arg("node", "one"), + filters.Arg("node", "two"), + filters.Arg("node", "self"), + ) + + client := &fakeClient{ + infoFunc: func(_ context.Context) (types.Info, error) { + return types.Info{Swarm: swarm.Info{NodeID: selfNodeID}}, nil + }, + } + + updateNodeFilter(context.Background(), client, filter) + + expected := filters.NewArgs( + filters.Arg("node", "one"), + filters.Arg("node", "two"), + filters.Arg("node", selfNodeID), + ) + assert.DeepEqual(t, expected, filter, cmpFilters) +} + +var cmpFilters = cmp.AllowUnexported(filters.Args{}) diff --git a/cli/cli/command/service/remove.go b/cli/cli/command/service/remove.go new file mode 100644 index 00000000..ee810b03 --- /dev/null +++ b/cli/cli/command/service/remove.go @@ -0,0 +1,48 @@ +package service + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + + cmd := &cobra.Command{ + Use: "rm SERVICE [SERVICE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } + cmd.Flags() + + return cmd +} + +func runRemove(dockerCli command.Cli, sids []string) error { + client := dockerCli.Client() + + ctx := context.Background() + + var errs []string + for _, sid := range sids { + err := client.ServiceRemove(ctx, sid) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", sid) + } + if len(errs) > 0 { + return errors.Errorf(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/service/rollback.go b/cli/cli/command/service/rollback.go new file mode 100644 index 00000000..2196815c --- /dev/null +++ b/cli/cli/command/service/rollback.go @@ -0,0 +1,64 @@ +package service + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/spf13/cobra" +) + +func newRollbackCommand(dockerCli command.Cli) *cobra.Command { + options := newServiceOptions() + + cmd := &cobra.Command{ + Use: "rollback [OPTIONS] SERVICE", + Short: "Revert changes to a service's configuration", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRollback(dockerCli, options, args[0]) + }, + Annotations: map[string]string{"version": "1.31"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, flagQuiet, "q", false, "Suppress progress output") + addDetachFlag(flags, &options.detach) + + return cmd +} + +func runRollback(dockerCli command.Cli, options *serviceOptions, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + spec := &service.Spec + updateOpts := types.ServiceUpdateOptions{ + Rollback: "previous", + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + + if options.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { + return nil + } + + return waitOnService(ctx, dockerCli, serviceID, options.quiet) +} diff --git a/cli/cli/command/service/rollback_test.go b/cli/cli/command/service/rollback_test.go new file mode 100644 index 00000000..e61d1c20 --- /dev/null +++ b/cli/cli/command/service/rollback_test.go @@ -0,0 +1,104 @@ +package service + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRollback(t *testing.T) { + testCases := []struct { + name string + args []string + serviceUpdateFunc func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + expectedDockerCliErr string + }{ + { + name: "rollback-service", + args: []string{"service-id"}, + }, + { + name: "rollback-service-with-warnings", + args: []string{"service-id"}, + serviceUpdateFunc: func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + response := types.ServiceUpdateResponse{} + + response.Warnings = []string{ + "- warning 1", + "- warning 2", + } + + return response, nil + }, + expectedDockerCliErr: "- warning 1\n- warning 2", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + serviceUpdateFunc: tc.serviceUpdateFunc, + }) + cmd := newRollbackCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("quiet", "true") + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(strings.TrimSpace(cli.ErrBuffer().String()), tc.expectedDockerCliErr)) + } +} + +func TestRollbackWithErrors(t *testing.T) { + testCases := []struct { + name string + args []string + serviceInspectWithRawFunc func(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + serviceUpdateFunc func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"service-id-1", "service-id-2"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "service-does-not-exists", + args: []string{"service-id"}, + serviceInspectWithRawFunc: func(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return swarm.Service{}, []byte{}, fmt.Errorf("no such services: %s", serviceID) + }, + expectedError: "no such services: service-id", + }, + { + name: "service-update-failed", + args: []string{"service-id"}, + serviceUpdateFunc: func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + return types.ServiceUpdateResponse{}, fmt.Errorf("no such services: %s", serviceID) + }, + expectedError: "no such services: service-id", + }, + } + + for _, tc := range testCases { + cmd := newRollbackCommand( + test.NewFakeCli(&fakeClient{ + serviceInspectWithRawFunc: tc.serviceInspectWithRawFunc, + serviceUpdateFunc: tc.serviceUpdateFunc, + })) + cmd.SetArgs(tc.args) + cmd.Flags().Set("quiet", "true") + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} diff --git a/cli/cli/command/service/scale.go b/cli/cli/command/service/scale.go new file mode 100644 index 00000000..5b656a7f --- /dev/null +++ b/cli/cli/command/service/scale.go @@ -0,0 +1,122 @@ +package service + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type scaleOptions struct { + detach bool +} + +func newScaleCommand(dockerCli command.Cli) *cobra.Command { + options := &scaleOptions{} + + cmd := &cobra.Command{ + Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", + Short: "Scale one or multiple replicated services", + Args: scaleArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runScale(dockerCli, options, args) + }, + } + + flags := cmd.Flags() + addDetachFlag(flags, &options.detach) + return cmd +} + +func scaleArgs(cmd *cobra.Command, args []string) error { + if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { + return err + } + for _, arg := range args { + if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { + return errors.Errorf( + "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", + arg, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } + } + return nil +} + +func runScale(dockerCli command.Cli, options *scaleOptions, args []string) error { + var errs []string + var serviceIDs []string + ctx := context.Background() + + for _, arg := range args { + parts := strings.SplitN(arg, "=", 2) + serviceID, scaleStr := parts[0], parts[1] + + // validate input arg scale number + scale, err := strconv.ParseUint(scaleStr, 10, 64) + if err != nil { + errs = append(errs, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) + continue + } + + if err := runServiceScale(ctx, dockerCli, serviceID, scale); err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", serviceID, err)) + } else { + serviceIDs = append(serviceIDs, serviceID) + } + + } + + if len(serviceIDs) > 0 { + if !options.detach && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.29") { + for _, serviceID := range serviceIDs { + if err := waitOnService(ctx, dockerCli, serviceID, false); err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", serviceID, err)) + } + } + } + } + + if len(errs) == 0 { + return nil + } + return errors.Errorf(strings.Join(errs, "\n")) +} + +func runServiceScale(ctx context.Context, dockerCli command.Cli, serviceID string, scale uint64) error { + client := dockerCli.Client() + + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + serviceMode := &service.Spec.Mode + if serviceMode.Replicated == nil { + return errors.Errorf("scale can only be used with replicated mode") + } + + serviceMode.Replicated.Replicas = &scale + + response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) + return nil +} diff --git a/cli/cli/command/service/testdata/service-list-sort.golden b/cli/cli/command/service/testdata/service-list-sort.golden new file mode 100644 index 00000000..3b0cb214 --- /dev/null +++ b/cli/cli/command/service/testdata/service-list-sort.golden @@ -0,0 +1,3 @@ +service-1-foo +service-2-foo +service-10-foo diff --git a/cli/cli/command/service/trust.go b/cli/cli/command/service/trust.go new file mode 100644 index 00000000..b7453ccb --- /dev/null +++ b/cli/cli/command/service/trust.go @@ -0,0 +1,87 @@ +package service + +import ( + "context" + "encoding/hex" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/tuf/data" +) + +func resolveServiceImageDigestContentTrust(dockerCli command.Cli, service *swarm.ServiceSpec) error { + if !dockerCli.ContentTrustEnabled() { + // When not using content trust, digest resolution happens later when + // contacting the registry to retrieve image information. + return nil + } + + ref, err := reference.ParseAnyReference(service.TaskTemplate.ContainerSpec.Image) + if err != nil { + return errors.Wrapf(err, "invalid reference %s", service.TaskTemplate.ContainerSpec.Image) + } + + // If reference does not have digest (is not canonical nor image id) + if _, ok := ref.(reference.Digested); !ok { + namedRef, ok := ref.(reference.Named) + if !ok { + return errors.New("failed to resolve image digest using content trust: reference is not named") + } + namedRef = reference.TagNameOnly(namedRef) + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return errors.New("failed to resolve image digest using content trust: reference is not tagged") + } + + resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) + if err != nil { + return errors.Wrap(err, "failed to resolve image digest using content trust") + } + resolvedFamiliar := reference.FamiliarString(resolvedImage) + logrus.Debugf("resolved image tag to %s using content trust", resolvedFamiliar) + service.TaskTemplate.ContainerSpec.Image = resolvedFamiliar + } + + return nil +} + +func trustedResolveDigest(ctx context.Context, cli command.Cli, ref reference.NamedTagged) (reference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, err + } + + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli.In(), cli.Out(), command.UserAgent(), repoInfo, &authConfig, "pull") + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.Name.Name(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref))) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + h, ok := t.Hashes["sha256"] + if !ok { + return nil, errors.New("no valid hash, expecting sha256") + } + + dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) + + // Allow returning canonical reference with tag + return reference.WithDigest(ref, dgst) +} diff --git a/cli/cli/command/service/update.go b/cli/cli/command/service/update.go new file mode 100644 index 00000000..3e380db2 --- /dev/null +++ b/cli/cli/command/service/update.go @@ -0,0 +1,1205 @@ +package service + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/swarmkit/api/defaults" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + options := newServiceOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] SERVICE", + Short: "Update a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), options, args[0]) + }, + } + + flags := cmd.Flags() + flags.String("image", "", "Service image tag") + flags.Var(&ShlexOpt{}, "args", "Service command args") + flags.Bool(flagRollback, false, "Rollback to previous specification") + flags.SetAnnotation(flagRollback, "version", []string{"1.25"}) + flags.Bool("force", false, "Force update even if no changes require it") + flags.SetAnnotation("force", "version", []string{"1.25"}) + addServiceFlags(flags, options, nil) + + flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") + flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") + flags.SetAnnotation(flagGroupRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") + flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") + flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") + // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") + flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") + flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") + flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") + flags.SetAnnotation(flagDNSRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") + flags.SetAnnotation(flagDNSOptionRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") + flags.SetAnnotation(flagDNSSearchRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") + flags.SetAnnotation(flagHostRemove, "version", []string{"1.25"}) + flags.Var(&options.labels, flagLabelAdd, "Add or update a service label") + flags.Var(&options.containerLabels, flagContainerLabelAdd, "Add or update a container label") + flags.Var(&options.env, flagEnvAdd, "Add or update an environment variable") + flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") + flags.SetAnnotation(flagSecretRemove, "version", []string{"1.25"}) + flags.Var(&options.secrets, flagSecretAdd, "Add or update a secret on a service") + flags.SetAnnotation(flagSecretAdd, "version", []string{"1.25"}) + + flags.Var(newListOptsVar(), flagConfigRemove, "Remove a configuration file") + flags.SetAnnotation(flagConfigRemove, "version", []string{"1.30"}) + flags.Var(&options.configs, flagConfigAdd, "Add or update a config file on a service") + flags.SetAnnotation(flagConfigAdd, "version", []string{"1.30"}) + + flags.Var(&options.mounts, flagMountAdd, "Add or update a mount on a service") + flags.Var(&options.constraints, flagConstraintAdd, "Add or update a placement constraint") + flags.Var(&options.placementPrefs, flagPlacementPrefAdd, "Add a placement preference") + flags.SetAnnotation(flagPlacementPrefAdd, "version", []string{"1.28"}) + flags.Var(&placementPrefOpts{}, flagPlacementPrefRemove, "Remove a placement preference") + flags.SetAnnotation(flagPlacementPrefRemove, "version", []string{"1.28"}) + flags.Var(&options.networks, flagNetworkAdd, "Add a network") + flags.SetAnnotation(flagNetworkAdd, "version", []string{"1.29"}) + flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network") + flags.SetAnnotation(flagNetworkRemove, "version", []string{"1.29"}) + flags.Var(&options.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") + flags.Var(&options.groups, flagGroupAdd, "Add an additional supplementary user group to the container") + flags.SetAnnotation(flagGroupAdd, "version", []string{"1.25"}) + flags.Var(&options.dns, flagDNSAdd, "Add or update a custom DNS server") + flags.SetAnnotation(flagDNSAdd, "version", []string{"1.25"}) + flags.Var(&options.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") + flags.SetAnnotation(flagDNSOptionAdd, "version", []string{"1.25"}) + flags.Var(&options.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") + flags.SetAnnotation(flagDNSSearchAdd, "version", []string{"1.25"}) + flags.Var(&options.hosts, flagHostAdd, "Add a custom host-to-IP mapping (host:ip)") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.25"}) + flags.BoolVar(&options.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") + flags.SetAnnotation(flagInit, "version", []string{"1.37"}) + + // Add needs parsing, Remove only needs the key + flags.Var(newListOptsVar(), flagGenericResourcesRemove, "Remove a Generic resource") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) + flags.Var(newListOptsVarWithValidator(ValidateSingleGenericResource), flagGenericResourcesAdd, "Add a Generic resource") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) + + return cmd +} + +func newListOptsVar() *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, nil) +} + +func newListOptsVarWithValidator(validator opts.ValidatorFctType) *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, validator) +} + +// nolint: gocyclo +func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOptions, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + rollback, err := flags.GetBool(flagRollback) + if err != nil { + return err + } + + // There are two ways to do user-requested rollback. The old way is + // client-side, but with a sufficiently recent daemon we prefer + // server-side, because it will honor the rollback parameters. + var ( + clientSideRollback bool + serverSideRollback bool + ) + + spec := &service.Spec + if rollback { + // Rollback can't be combined with other flags. + otherFlagsPassed := false + flags.VisitAll(func(f *pflag.Flag) { + if f.Name == flagRollback || f.Name == flagDetach || f.Name == flagQuiet { + return + } + if flags.Changed(f.Name) { + otherFlagsPassed = true + } + }) + if otherFlagsPassed { + return errors.New("other flags may not be combined with --rollback") + } + + if versions.LessThan(apiClient.ClientVersion(), "1.28") { + clientSideRollback = true + spec = service.PreviousSpec + if spec == nil { + return errors.Errorf("service does not have a previous specification to roll back to") + } + } else { + serverSideRollback = true + } + } + + updateOpts := types.ServiceUpdateOptions{} + if serverSideRollback { + updateOpts.Rollback = "previous" + } + + err = updateService(ctx, apiClient, flags, spec) + if err != nil { + return err + } + + if flags.Changed("image") { + if err := resolveServiceImageDigestContentTrust(dockerCli, spec); err != nil { + return err + } + if !options.noResolveImage && versions.GreaterThanOrEqualTo(apiClient.ClientVersion(), "1.30") { + updateOpts.QueryRegistry = true + } + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets + + updatedConfigs, err := getUpdatedConfigs(apiClient, flags, spec.TaskTemplate.ContainerSpec.Configs) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Configs = updatedConfigs + + // only send auth if flag was set + sendAuth, err := flags.GetBool(flagRegistryAuth) + if err != nil { + return err + } + if sendAuth { + // Retrieve encoded auth token from the image reference + // This would be the old image if it didn't change in this update + image := spec.TaskTemplate.ContainerSpec.Image + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + updateOpts.EncodedRegistryAuth = encodedAuth + } else if clientSideRollback { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec + } else { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + + if options.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { + return nil + } + + return waitOnService(ctx, dockerCli, serviceID, options.quiet) +} + +// nolint: gocyclo +func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + updateBoolPtr := func(flag string, field **bool) { + if flags.Changed(flag) { + b, _ := flags.GetBool(flag) + *field = &b + } + } + updateString := func(flag string, field *string) { + if flags.Changed(flag) { + *field, _ = flags.GetString(flag) + } + } + + updateInt64Value := func(flag string, field *int64) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(int64Value).Value() + } + } + + updateFloatValue := func(flag string, field *float32) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(*floatValue).Value() + } + } + + updateDuration := func(flag string, field *time.Duration) { + if flags.Changed(flag) { + *field, _ = flags.GetDuration(flag) + } + } + + updateDurationOpt := func(flag string, field **time.Duration) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*opts.DurationOpt).Value() + *field = &val + } + } + + updateUint64 := func(flag string, field *uint64) { + if flags.Changed(flag) { + *field, _ = flags.GetUint64(flag) + } + } + + updateUint64Opt := func(flag string, field **uint64) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() + *field = &val + } + } + + updateIsolation := func(flag string, field *container.Isolation) error { + if flags.Changed(flag) { + val, _ := flags.GetString(flag) + *field = container.Isolation(val) + } + return nil + } + + cspec := spec.TaskTemplate.ContainerSpec + task := &spec.TaskTemplate + + taskResources := func() *swarm.ResourceRequirements { + if task.Resources == nil { + task.Resources = &swarm.ResourceRequirements{} + } + return task.Resources + } + + updateLabels(flags, &spec.Labels) + updateContainerLabels(flags, &cspec.Labels) + updateString("image", &cspec.Image) + updateStringToSlice(flags, "args", &cspec.Args) + updateStringToSlice(flags, flagEntrypoint, &cspec.Command) + updateEnvironment(flags, &cspec.Env) + updateString(flagWorkdir, &cspec.Dir) + updateString(flagUser, &cspec.User) + updateString(flagHostname, &cspec.Hostname) + updateBoolPtr(flagInit, &cspec.Init) + if err := updateIsolation(flagIsolation, &cspec.Isolation); err != nil { + return err + } + if err := updateMounts(flags, &cspec.Mounts); err != nil { + return err + } + + if anyChanged(flags, flagLimitCPU, flagLimitMemory) { + taskResources().Limits = spec.TaskTemplate.Resources.Limits + updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) + updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) + } + + if anyChanged(flags, flagReserveCPU, flagReserveMemory) { + taskResources().Reservations = spec.TaskTemplate.Resources.Reservations + updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) + updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) + } + + if err := addGenericResources(flags, task); err != nil { + return err + } + + if err := removeGenericResources(flags, task); err != nil { + return err + } + + updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) + + if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { + if task.RestartPolicy == nil { + task.RestartPolicy = defaultRestartPolicy() + } + if flags.Changed(flagRestartCondition) { + value, _ := flags.GetString(flagRestartCondition) + task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) + } + updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) + updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) + updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) + } + + if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacementConstraints(flags, task.Placement) + } + + if anyChanged(flags, flagPlacementPrefAdd, flagPlacementPrefRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacementPreferences(flags, task.Placement) + } + + if anyChanged(flags, flagNetworkAdd, flagNetworkRemove) { + if err := updateNetworks(ctx, apiClient, flags, spec); err != nil { + return err + } + } + + if err := updateReplicas(flags, &spec.Mode); err != nil { + return err + } + + if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) { + if spec.UpdateConfig == nil { + spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update) + } + updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) + updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) + updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) + updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) + updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) + updateString(flagUpdateOrder, &spec.UpdateConfig.Order) + } + + if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) { + if spec.RollbackConfig == nil { + spec.RollbackConfig = updateConfigFromDefaults(defaults.Service.Rollback) + } + updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism) + updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay) + updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor) + updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction) + updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio) + updateString(flagRollbackOrder, &spec.RollbackConfig.Order) + } + + if flags.Changed(flagEndpointMode) { + value, _ := flags.GetString(flagEndpointMode) + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + spec.EndpointSpec.Mode = swarm.ResolutionMode(value) + } + + if anyChanged(flags, flagGroupAdd, flagGroupRemove) { + if err := updateGroups(flags, &cspec.Groups); err != nil { + return err + } + } + + if anyChanged(flags, flagPublishAdd, flagPublishRemove) { + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { + return err + } + } + + if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { + if cspec.DNSConfig == nil { + cspec.DNSConfig = &swarm.DNSConfig{} + } + if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { + return err + } + } + + if anyChanged(flags, flagHostAdd, flagHostRemove) { + if err := updateHosts(flags, &cspec.Hosts); err != nil { + return err + } + } + + if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { + return err + } + + force, err := flags.GetBool("force") + if err != nil { + return err + } + + if force { + spec.TaskTemplate.ForceUpdate++ + } + + if err := updateHealthcheck(flags, cspec); err != nil { + return err + } + + if flags.Changed(flagTTY) { + tty, err := flags.GetBool(flagTTY) + if err != nil { + return err + } + cspec.TTY = tty + } + + if flags.Changed(flagReadOnly) { + readOnly, err := flags.GetBool(flagReadOnly) + if err != nil { + return err + } + cspec.ReadOnly = readOnly + } + + updateString(flagStopSignal, &cspec.StopSignal) + + return nil +} + +func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) { + if !flags.Changed(flag) { + return + } + + *field = flags.Lookup(flag).Value.(*ShlexOpt).Value() +} + +func anyChanged(flags *pflag.FlagSet, fields ...string) bool { + for _, flag := range fields { + if flags.Changed(flag) { + return true + } + } + return false +} + +func addGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error { + if !flags.Changed(flagGenericResourcesAdd) { + return nil + } + + if spec.Resources == nil { + spec.Resources = &swarm.ResourceRequirements{} + } + + if spec.Resources.Reservations == nil { + spec.Resources.Reservations = &swarm.Resources{} + } + + values := flags.Lookup(flagGenericResourcesAdd).Value.(*opts.ListOpts).GetAll() + generic, err := ParseGenericResources(values) + if err != nil { + return err + } + + m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources) + if err != nil { + return err + } + + for _, toAddRes := range generic { + m[toAddRes.DiscreteResourceSpec.Kind] = toAddRes + } + + spec.Resources.Reservations.GenericResources = buildGenericResourceList(m) + + return nil +} + +func removeGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error { + // Can only be Discrete Resources + if !flags.Changed(flagGenericResourcesRemove) { + return nil + } + + if spec.Resources == nil { + spec.Resources = &swarm.ResourceRequirements{} + } + + if spec.Resources.Reservations == nil { + spec.Resources.Reservations = &swarm.Resources{} + } + + values := flags.Lookup(flagGenericResourcesRemove).Value.(*opts.ListOpts).GetAll() + + m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources) + if err != nil { + return err + } + + for _, toRemoveRes := range values { + if _, ok := m[toRemoveRes]; !ok { + return fmt.Errorf("could not find generic-resource `%s` to remove it", toRemoveRes) + } + + delete(m, toRemoveRes) + } + + spec.Resources.Reservations.GenericResources = buildGenericResourceList(m) + return nil +} + +func updatePlacementConstraints(flags *pflag.FlagSet, placement *swarm.Placement) { + if flags.Changed(flagConstraintAdd) { + values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() + placement.Constraints = append(placement.Constraints, values...) + } + toRemove := buildToRemoveSet(flags, flagConstraintRemove) + + newConstraints := []string{} + for _, constraint := range placement.Constraints { + if _, exists := toRemove[constraint]; !exists { + newConstraints = append(newConstraints, constraint) + } + } + // Sort so that result is predictable. + sort.Strings(newConstraints) + + placement.Constraints = newConstraints +} + +func updatePlacementPreferences(flags *pflag.FlagSet, placement *swarm.Placement) { + var newPrefs []swarm.PlacementPreference + + if flags.Changed(flagPlacementPrefRemove) { + for _, existing := range placement.Preferences { + removed := false + for _, removal := range flags.Lookup(flagPlacementPrefRemove).Value.(*placementPrefOpts).prefs { + if removal.Spread != nil && existing.Spread != nil && removal.Spread.SpreadDescriptor == existing.Spread.SpreadDescriptor { + removed = true + break + } + } + if !removed { + newPrefs = append(newPrefs, existing) + } + } + } else { + newPrefs = placement.Preferences + } + + if flags.Changed(flagPlacementPrefAdd) { + newPrefs = append(newPrefs, + flags.Lookup(flagPlacementPrefAdd).Value.(*placementPrefOpts).prefs...) + } + + placement.Preferences = newPrefs +} + +func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagContainerLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range opts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagContainerLabelRemove) { + toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range opts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagLabelRemove) { + toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateEnvironment(flags *pflag.FlagSet, field *[]string) { + if flags.Changed(flagEnvAdd) { + envSet := map[string]string{} + for _, v := range *field { + envSet[envKey(v)] = v + } + + value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) + for _, v := range value.GetAll() { + envSet[envKey(v)] = v + } + + *field = []string{} + for _, v := range envSet { + *field = append(*field, v) + } + } + + toRemove := buildToRemoveSet(flags, flagEnvRemove) + *field = removeItems(*field, toRemove, envKey) +} + +func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { + newSecrets := []*swarm.SecretReference{} + + toRemove := buildToRemoveSet(flags, flagSecretRemove) + for _, secret := range secrets { + if _, exists := toRemove[secret.SecretName]; !exists { + newSecrets = append(newSecrets, secret) + } + } + + if flags.Changed(flagSecretAdd) { + values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() + + addSecrets, err := ParseSecrets(apiClient, values) + if err != nil { + return nil, err + } + newSecrets = append(newSecrets, addSecrets...) + } + + return newSecrets, nil +} + +func getUpdatedConfigs(apiClient client.ConfigAPIClient, flags *pflag.FlagSet, configs []*swarm.ConfigReference) ([]*swarm.ConfigReference, error) { + newConfigs := []*swarm.ConfigReference{} + + toRemove := buildToRemoveSet(flags, flagConfigRemove) + for _, config := range configs { + if _, exists := toRemove[config.ConfigName]; !exists { + newConfigs = append(newConfigs, config) + } + } + + if flags.Changed(flagConfigAdd) { + values := flags.Lookup(flagConfigAdd).Value.(*opts.ConfigOpt).Value() + + addConfigs, err := ParseConfigs(apiClient, values) + if err != nil { + return nil, err + } + newConfigs = append(newConfigs, addConfigs...) + } + + return newConfigs, nil +} + +func envKey(value string) string { + kv := strings.SplitN(value, "=", 2) + return kv[0] +} + +func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { + var empty struct{} + toRemove := make(map[string]struct{}) + + if !flags.Changed(flag) { + return toRemove + } + + toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() + for _, key := range toRemoveSlice { + toRemove[key] = empty + } + return toRemove +} + +func removeItems( + seq []string, + toRemove map[string]struct{}, + keyFunc func(string) string, +) []string { + newSeq := []string{} + for _, item := range seq { + if _, exists := toRemove[keyFunc(item)]; !exists { + newSeq = append(newSeq, item) + } + } + return newSeq +} + +type byMountSource []mounttypes.Mount + +func (m byMountSource) Len() int { return len(m) } +func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMountSource) Less(i, j int) bool { + a, b := m[i], m[j] + + if a.Source == b.Source { + return a.Target < b.Target + } + + return a.Source < b.Source +} + +func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { + mountsByTarget := map[string]mounttypes.Mount{} + + if flags.Changed(flagMountAdd) { + values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() + for _, mount := range values { + if _, ok := mountsByTarget[mount.Target]; ok { + return errors.Errorf("duplicate mount target") + } + mountsByTarget[mount.Target] = mount + } + } + + // Add old list of mount points minus updated one. + for _, mount := range *mounts { + if _, ok := mountsByTarget[mount.Target]; !ok { + mountsByTarget[mount.Target] = mount + } + } + + newMounts := []mounttypes.Mount{} + + toRemove := buildToRemoveSet(flags, flagMountRemove) + + for _, mount := range mountsByTarget { + if _, exists := toRemove[mount.Target]; !exists { + newMounts = append(newMounts, mount) + } + } + sort.Sort(byMountSource(newMounts)) + *mounts = newMounts + return nil +} + +func updateGroups(flags *pflag.FlagSet, groups *[]string) error { + if flags.Changed(flagGroupAdd) { + values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() + *groups = append(*groups, values...) + } + toRemove := buildToRemoveSet(flags, flagGroupRemove) + + newGroups := []string{} + for _, group := range *groups { + if _, exists := toRemove[group]; !exists { + newGroups = append(newGroups, group) + } + } + // Sort so that result is predictable. + sort.Strings(newGroups) + + *groups = newGroups + return nil +} + +func removeDuplicates(entries []string) []string { + hit := map[string]bool{} + newEntries := []string{} + for _, v := range entries { + if !hit[v] { + newEntries = append(newEntries, v) + hit[v] = true + } + } + return newEntries +} + +func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { + newConfig := &swarm.DNSConfig{} + + nameservers := (*config).Nameservers + if flags.Changed(flagDNSAdd) { + values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() + nameservers = append(nameservers, values...) + } + nameservers = removeDuplicates(nameservers) + toRemove := buildToRemoveSet(flags, flagDNSRemove) + for _, nameserver := range nameservers { + if _, exists := toRemove[nameserver]; !exists { + newConfig.Nameservers = append(newConfig.Nameservers, nameserver) + + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Nameservers) + + search := (*config).Search + if flags.Changed(flagDNSSearchAdd) { + values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() + search = append(search, values...) + } + search = removeDuplicates(search) + toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) + for _, entry := range search { + if _, exists := toRemove[entry]; !exists { + newConfig.Search = append(newConfig.Search, entry) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Search) + + options := (*config).Options + if flags.Changed(flagDNSOptionAdd) { + values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() + options = append(options, values...) + } + options = removeDuplicates(options) + toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) + for _, option := range options { + if _, exists := toRemove[option]; !exists { + newConfig.Options = append(newConfig.Options, option) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Options) + + *config = newConfig + return nil +} + +type byPortConfig []swarm.PortConfig + +func (r byPortConfig) Len() int { return len(r) } +func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortConfig) Less(i, j int) bool { + // We convert PortConfig into `port/protocol`, e.g., `80/tcp` + // In updatePorts we already filter out with map so there is duplicate entries + return portConfigToString(&r[i]) < portConfigToString(&r[j]) +} + +func portConfigToString(portConfig *swarm.PortConfig) string { + protocol := portConfig.Protocol + mode := portConfig.PublishMode + return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) +} + +func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { + // The key of the map is `port/protocol`, e.g., `80/tcp` + portSet := map[string]swarm.PortConfig{} + + // Build the current list of portConfig + for _, entry := range *portConfig { + if _, ok := portSet[portConfigToString(&entry)]; !ok { + portSet[portConfigToString(&entry)] = entry + } + } + + newPorts := []swarm.PortConfig{} + + // Clean current ports + toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() +portLoop: + for _, port := range portSet { + for _, pConfig := range toRemove { + if equalProtocol(port.Protocol, pConfig.Protocol) && + port.TargetPort == pConfig.TargetPort && + equalPublishMode(port.PublishMode, pConfig.PublishMode) { + continue portLoop + } + } + + newPorts = append(newPorts, port) + } + + // Check to see if there are any conflict in flags. + if flags.Changed(flagPublishAdd) { + ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() + + for _, port := range ports { + if _, ok := portSet[portConfigToString(&port)]; ok { + continue + } + //portSet[portConfigToString(&port)] = port + newPorts = append(newPorts, port) + } + } + + // Sort the PortConfig to avoid unnecessary updates + sort.Sort(byPortConfig(newPorts)) + *portConfig = newPorts + return nil +} + +func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { + return prot1 == prot2 || + (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || + (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) +} + +func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { + return mode1 == mode2 || + (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || + (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) +} + +func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { + if !flags.Changed(flagReplicas) { + return nil + } + + if serviceMode == nil || serviceMode.Replicated == nil { + return errors.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() + return nil +} + +type hostMapping struct { + IPAddr string + Host string +} + +// updateHosts performs a diff between existing host entries, entries to be +// removed, and entries to be added. Host entries preserve the order in which they +// were added, as the specification mentions that in case multiple entries for a +// host exist, the first entry should be used (by default). +// +// Note that, even though unsupported by the the CLI, the service specs format +// allow entries with both a _canonical_ hostname, and one or more aliases +// in an entry (IP-address canonical_hostname [alias ...]) +// +// Entries can be removed by either a specific `:` mapping, +// or by `` alone: +// +// - If both IP-address and host-name is provided, the hostname is removed only +// from entries that match the given IP-address. +// - If only a host-name is provided, the hostname is removed from any entry it +// is part of (either as canonical host-name, or as alias). +// - If, after removing the host-name from an entry, no host-names remain in +// the entry, the entry itself is removed. +// +// For example, the list of host-entries before processing could look like this: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host1 host2 host4", +// "127.0.0.1 host1 host4", +// "127.0.0.3 host1", +// "127.0.0.1 host1", +// } +// +// Removing `host1` removes every occurrence: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host2 host4", +// "127.0.0.1 host4", +// } +// +// Removing `host1:127.0.0.1` on the other hand, only remove the host if the +// IP-address matches: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host1 host2 host4", +// "127.0.0.1 host4", +// "127.0.0.3 host1", +// } +func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { + var toRemove []hostMapping + if flags.Changed(flagHostRemove) { + extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() + for _, entry := range extraHostsToRemove { + v := strings.SplitN(entry, ":", 2) + if len(v) > 1 { + toRemove = append(toRemove, hostMapping{IPAddr: v[1], Host: v[0]}) + } else { + toRemove = append(toRemove, hostMapping{Host: v[0]}) + } + } + } + + var newHosts []string + for _, entry := range *hosts { + // Since this is in SwarmKit format, we need to find the key, which is canonical_hostname of: + // IP_address canonical_hostname [aliases...] + parts := strings.Fields(entry) + if len(parts) == 0 { + continue + } + ip := parts[0] + hostNames := parts[1:] + for _, rm := range toRemove { + if rm.IPAddr != "" && rm.IPAddr != ip { + continue + } + for i, h := range hostNames { + if h == rm.Host { + hostNames = append(hostNames[:i], hostNames[i+1:]...) + } + } + } + if len(hostNames) > 0 { + newHosts = append(newHosts, fmt.Sprintf("%s %s", ip, strings.Join(hostNames, " "))) + } + } + + // Append new hosts (in SwarmKit format) + if flags.Changed(flagHostAdd) { + values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) + newHosts = append(newHosts, values...) + } + *hosts = removeDuplicates(newHosts) + return nil +} + +// updateLogDriver updates the log driver only if the log driver flag is set. +// All options will be replaced with those provided on the command line. +func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { + if !flags.Changed(flagLogDriver) { + return nil + } + + name, err := flags.GetString(flagLogDriver) + if err != nil { + return err + } + + if name == "" { + return nil + } + + taskTemplate.LogDriver = &swarm.Driver{ + Name: name, + Options: opts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), + } + + return nil +} + +func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { + if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { + return nil + } + if containerSpec.Healthcheck == nil { + containerSpec.Healthcheck = &container.HealthConfig{} + } + noHealthcheck, err := flags.GetBool(flagNoHealthcheck) + if err != nil { + return err + } + if noHealthcheck { + if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { + containerSpec.Healthcheck = &container.HealthConfig{ + Test: []string{"NONE"}, + } + return nil + } + return errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { + containerSpec.Healthcheck.Test = nil + } + if flags.Changed(flagHealthInterval) { + val := *flags.Lookup(flagHealthInterval).Value.(*opts.PositiveDurationOpt).Value() + containerSpec.Healthcheck.Interval = val + } + if flags.Changed(flagHealthTimeout) { + val := *flags.Lookup(flagHealthTimeout).Value.(*opts.PositiveDurationOpt).Value() + containerSpec.Healthcheck.Timeout = val + } + if flags.Changed(flagHealthStartPeriod) { + val := *flags.Lookup(flagHealthStartPeriod).Value.(*opts.PositiveDurationOpt).Value() + containerSpec.Healthcheck.StartPeriod = val + } + if flags.Changed(flagHealthRetries) { + containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) + } + if flags.Changed(flagHealthCmd) { + cmd, _ := flags.GetString(flagHealthCmd) + if cmd != "" { + containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} + } else { + containerSpec.Healthcheck.Test = nil + } + } + return nil +} + +type byNetworkTarget []swarm.NetworkAttachmentConfig + +func (m byNetworkTarget) Len() int { return len(m) } +func (m byNetworkTarget) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byNetworkTarget) Less(i, j int) bool { + return m[i].Target < m[j].Target +} + +func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + // spec.TaskTemplate.Networks takes precedence over the deprecated + // spec.Networks field. If spec.Network is in use, we'll migrate those + // values to spec.TaskTemplate.Networks. + specNetworks := spec.TaskTemplate.Networks + if len(specNetworks) == 0 { + specNetworks = spec.Networks + } + spec.Networks = nil + + toRemove := buildToRemoveSet(flags, flagNetworkRemove) + idsToRemove := make(map[string]struct{}) + for networkIDOrName := range toRemove { + network, err := apiClient.NetworkInspect(ctx, networkIDOrName, types.NetworkInspectOptions{Scope: "swarm"}) + if err != nil { + return err + } + idsToRemove[network.ID] = struct{}{} + } + + existingNetworks := make(map[string]struct{}) + var newNetworks []swarm.NetworkAttachmentConfig + for _, network := range specNetworks { + if _, exists := idsToRemove[network.Target]; exists { + continue + } + + newNetworks = append(newNetworks, network) + existingNetworks[network.Target] = struct{}{} + } + + if flags.Changed(flagNetworkAdd) { + values := flags.Lookup(flagNetworkAdd).Value.(*opts.NetworkOpt) + networks := convertNetworks(*values) + for _, network := range networks { + nwID, err := resolveNetworkID(ctx, apiClient, network.Target) + if err != nil { + return err + } + if _, exists := existingNetworks[nwID]; exists { + return errors.Errorf("service is already attached to network %s", network.Target) + } + network.Target = nwID + newNetworks = append(newNetworks, network) + existingNetworks[network.Target] = struct{}{} + } + } + + sort.Sort(byNetworkTarget(newNetworks)) + + spec.TaskTemplate.Networks = newNetworks + return nil +} diff --git a/cli/cli/command/service/update_test.go b/cli/cli/command/service/update_test.go new file mode 100644 index 00000000..847b6ab1 --- /dev/null +++ b/cli/cli/command/service/update_test.go @@ -0,0 +1,778 @@ +package service + +import ( + "context" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestUpdateServiceArgs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("args", "the \"new args\"") + + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + cspec.Args = []string{"old", "args"} + + updateService(nil, nil, flags, spec) + assert.Check(t, is.DeepEqual([]string{"the", "new args"}, cspec.Args)) +} + +func TestUpdateLabels(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-add", "toadd=newlabel") + flags.Set("label-rm", "toremove") + + labels := map[string]string{ + "toremove": "thelabeltoremove", + "tokeep": "value", + } + + updateLabels(flags, &labels) + assert.Check(t, is.Len(labels, 2)) + assert.Check(t, is.Equal("value", labels["tokeep"])) + assert.Check(t, is.Equal("newlabel", labels["toadd"])) +} + +func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-rm", "dne") + + labels := map[string]string{"foo": "theoldlabel"} + updateLabels(flags, &labels) + assert.Check(t, is.Len(labels, 1)) +} + +func TestUpdatePlacementConstraints(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("constraint-add", "node=toadd") + flags.Set("constraint-rm", "node!=toremove") + + placement := &swarm.Placement{ + Constraints: []string{"node!=toremove", "container=tokeep"}, + } + + updatePlacementConstraints(flags, placement) + assert.Assert(t, is.Len(placement.Constraints, 2)) + assert.Check(t, is.Equal("container=tokeep", placement.Constraints[0])) + assert.Check(t, is.Equal("node=toadd", placement.Constraints[1])) +} + +func TestUpdatePlacementPrefs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("placement-pref-add", "spread=node.labels.dc") + flags.Set("placement-pref-rm", "spread=node.labels.rack") + + placement := &swarm.Placement{ + Preferences: []swarm.PlacementPreference{ + { + Spread: &swarm.SpreadOver{ + SpreadDescriptor: "node.labels.rack", + }, + }, + { + Spread: &swarm.SpreadOver{ + SpreadDescriptor: "node.labels.row", + }, + }, + }, + } + + updatePlacementPreferences(flags, placement) + assert.Assert(t, is.Len(placement.Preferences, 2)) + assert.Check(t, is.Equal("node.labels.row", placement.Preferences[0].Spread.SpreadDescriptor)) + assert.Check(t, is.Equal("node.labels.dc", placement.Preferences[1].Spread.SpreadDescriptor)) +} + +func TestUpdateEnvironment(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "toadd=newenv") + flags.Set("env-rm", "toremove") + + envs := []string{"toremove=theenvtoremove", "tokeep=value"} + + updateEnvironment(flags, &envs) + assert.Assert(t, is.Len(envs, 2)) + // Order has been removed in updateEnvironment (map) + sort.Strings(envs) + assert.Check(t, is.Equal("toadd=newenv", envs[0])) + assert.Check(t, is.Equal("tokeep=value", envs[1])) +} + +func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "foo=newenv") + flags.Set("env-add", "foo=dupe") + flags.Set("env-rm", "foo") + + envs := []string{"foo=value"} + + updateEnvironment(flags, &envs) + assert.Check(t, is.Len(envs, 0)) +} + +func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { + // Test case for #25404 + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "A=b") + + envs := []string{"A=c"} + + updateEnvironment(flags, &envs) + assert.Assert(t, is.Len(envs, 1)) + assert.Check(t, is.Equal("A=b", envs[0])) +} + +func TestUpdateGroups(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("group-add", "wheel") + flags.Set("group-add", "docker") + flags.Set("group-rm", "root") + flags.Set("group-add", "foo") + flags.Set("group-rm", "docker") + + groups := []string{"bar", "root"} + + updateGroups(flags, &groups) + assert.Assert(t, is.Len(groups, 3)) + assert.Check(t, is.Equal("bar", groups[0])) + assert.Check(t, is.Equal("foo", groups[1])) + assert.Check(t, is.Equal("wheel", groups[2])) +} + +func TestUpdateDNSConfig(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + + // IPv4, with duplicates + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "2.2.2.2") + flags.Set("dns-rm", "3.3.3.3") + flags.Set("dns-rm", "2.2.2.2") + // IPv6 + flags.Set("dns-add", "2001:db8:abc8::1") + // Invalid dns record + assert.ErrorContains(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") + + // domains with duplicates + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.org") + flags.Set("dns-search-rm", "example.org") + // Invalid dns search domain + assert.ErrorContains(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") + + flags.Set("dns-option-add", "ndots:9") + flags.Set("dns-option-rm", "timeout:3") + + config := &swarm.DNSConfig{ + Nameservers: []string{"3.3.3.3", "5.5.5.5"}, + Search: []string{"localdomain"}, + Options: []string{"timeout:3"}, + } + + updateDNSConfig(flags, &config) + + assert.Assert(t, is.Len(config.Nameservers, 3)) + assert.Check(t, is.Equal("1.1.1.1", config.Nameservers[0])) + assert.Check(t, is.Equal("2001:db8:abc8::1", config.Nameservers[1])) + assert.Check(t, is.Equal("5.5.5.5", config.Nameservers[2])) + + assert.Assert(t, is.Len(config.Search, 2)) + assert.Check(t, is.Equal("example.com", config.Search[0])) + assert.Check(t, is.Equal("localdomain", config.Search[1])) + + assert.Assert(t, is.Len(config.Options, 1)) + assert.Check(t, is.Equal(config.Options[0], "ndots:9")) +} + +func TestUpdateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") + flags.Set("mount-rm", "/toremove") + + mounts := []mounttypes.Mount{ + {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Assert(t, is.Len(mounts, 2)) + assert.Check(t, is.Equal("/toadd", mounts[0].Target)) + assert.Check(t, is.Equal("/tokeep", mounts[1].Target)) +} + +func TestUpdateMountsWithDuplicateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") + + mounts := []mounttypes.Mount{ + {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, + {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Assert(t, is.Len(mounts, 3)) + assert.Check(t, is.Equal("/tokeep1", mounts[0].Target)) + assert.Check(t, is.Equal("/tokeep2", mounts[1].Target)) + assert.Check(t, is.Equal("/toadd", mounts[2].Target)) +} + +func TestUpdatePorts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "1000:1000") + flags.Set("publish-rm", "333/udp") + + portConfigs := []swarm.PortConfig{ + {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, + {TargetPort: 555}, + } + + err := updatePorts(flags, &portConfigs) + assert.NilError(t, err) + assert.Assert(t, is.Len(portConfigs, 2)) + // Do a sort to have the order (might have changed by map) + targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} + sort.Ints(targetPorts) + assert.Check(t, is.Equal(555, targetPorts[0])) + assert.Check(t, is.Equal(1000, targetPorts[1])) +} + +func TestUpdatePortsDuplicate(t *testing.T) { + // Test case for #25375 + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "80:80") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.NilError(t, err) + assert.Assert(t, is.Len(portConfigs, 1)) + assert.Check(t, is.Equal(uint32(80), portConfigs[0].TargetPort)) +} + +func TestUpdateHealthcheckTable(t *testing.T) { + type test struct { + flags [][2]string + initial *container.HealthConfig + expected *container.HealthConfig + err string + } + testCases := []test{ + { + flags: [][2]string{{"no-healthcheck", "true"}}, + initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"NONE"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + }, + { + flags: [][2]string{{"health-interval", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", ""}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "0"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + }, + { + flags: [][2]string{{"health-start-period", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, StartPeriod: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + } + for i, c := range testCases { + flags := newUpdateCommand(nil).Flags() + for _, flag := range c.flags { + flags.Set(flag[0], flag[1]) + } + cspec := &swarm.ContainerSpec{ + Healthcheck: c.initial, + } + err := updateHealthcheck(flags, cspec) + if c.err != "" { + assert.Error(t, err, c.err) + } else { + assert.NilError(t, err) + if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { + t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) + } + } + } +} + +func TestUpdateHosts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "example.net:2.2.2.2") + flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") + // remove with ipv6 should work + flags.Set("host-rm", "example.net:2001:db8:abc8::1") + // just hostname should work as well + flags.Set("host-rm", "example.net") + // bad format error + assert.ErrorContains(t, flags.Set("host-add", "$example.com$"), `bad format for add-host: "$example.com$"`) + + hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} + expected := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2.2.2.2 example.net", "2001:db8:abc8::1 ipv6.net"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, hosts)) +} + +func TestUpdateHostsPreservesOrder(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "foobar:127.0.0.2") + flags.Set("host-add", "foobar:127.0.0.1") + flags.Set("host-add", "foobar:127.0.0.3") + + hosts := []string{} + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 foobar", "127.0.0.1 foobar", "127.0.0.3 foobar"}, hosts)) +} + +func TestUpdateHostsReplaceEntry(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "foobar:127.0.0.4") + flags.Set("host-rm", "foobar:127.0.0.2") + + hosts := []string{"127.0.0.2 foobar", "127.0.0.1 foobar", "127.0.0.3 foobar"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.1 foobar", "127.0.0.3 foobar", "127.0.0.4 foobar"}, hosts)) +} + +func TestUpdateHostsRemoveHost(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-rm", "host1") + + hosts := []string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host1 host4", "127.0.0.3 host1"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + + // Removing host `host1` should remove the entry from each line it appears in. + // If there are no other hosts in the entry, the entry itself should be removed. + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host3 host2 host4", "127.0.0.1 host4"}, hosts)) +} + +func TestUpdateHostsRemoveHostIP(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-rm", "host1:127.0.0.1") + + hosts := []string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host1 host4", "127.0.0.3 host1", "127.0.0.1 host1"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + + // Removing host `host1` should remove the entry from each line it appears in, + // but only if the IP-address matches. If there are no other hosts in the entry, + // the entry itself should be removed. + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host4", "127.0.0.3 host1"}, hosts)) +} + +func TestUpdateHostsRemoveAll(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "host-three:127.0.0.4") + flags.Set("host-add", "host-one:127.0.0.5") + flags.Set("host-rm", "host-one") + + hosts := []string{"127.0.0.1 host-one", "127.0.0.2 host-two", "127.0.0.3 host-one"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host-two", "127.0.0.4 host-three", "127.0.0.5 host-one"}, hosts)) +} + +func TestUpdatePortsRmWithProtocol(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "8081:81") + flags.Set("publish-add", "8082:82") + flags.Set("publish-rm", "80") + flags.Set("publish-rm", "81/tcp") + flags.Set("publish-rm", "82/udp") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.NilError(t, err) + assert.Assert(t, is.Len(portConfigs, 2)) + assert.Check(t, is.Equal(uint32(81), portConfigs[0].TargetPort)) + assert.Check(t, is.Equal(uint32(82), portConfigs[1].TargetPort)) +} + +type secretAPIClientMock struct { + listResult []swarm.Secret +} + +func (s secretAPIClientMock) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + return s.listResult, nil +} +func (s secretAPIClientMock) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + return types.SecretCreateResponse{}, nil +} +func (s secretAPIClientMock) SecretRemove(ctx context.Context, id string) error { + return nil +} +func (s secretAPIClientMock) SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) { + return swarm.Secret{}, []byte{}, nil +} +func (s secretAPIClientMock) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + return nil +} + +// TestUpdateSecretUpdateInPlace tests the ability to update the "target" of an secret with "docker service update" +// by combining "--secret-rm" and "--secret-add" for the same secret. +func TestUpdateSecretUpdateInPlace(t *testing.T) { + apiClient := secretAPIClientMock{ + listResult: []swarm.Secret{ + { + ID: "tn9qiblgnuuut11eufquw5dev", + Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo"}}, + }, + }, + } + + flags := newUpdateCommand(nil).Flags() + flags.Set("secret-add", "source=foo,target=foo2") + flags.Set("secret-rm", "foo") + + secrets := []*swarm.SecretReference{ + { + File: &swarm.SecretReferenceFileTarget{ + Name: "foo", + UID: "0", + GID: "0", + Mode: 292, + }, + SecretID: "tn9qiblgnuuut11eufquw5dev", + SecretName: "foo", + }, + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, secrets) + + assert.NilError(t, err) + assert.Assert(t, is.Len(updatedSecrets, 1)) + assert.Check(t, is.Equal("tn9qiblgnuuut11eufquw5dev", updatedSecrets[0].SecretID)) + assert.Check(t, is.Equal("foo", updatedSecrets[0].SecretName)) + assert.Check(t, is.Equal("foo2", updatedSecrets[0].File.Name)) +} + +func TestUpdateReadOnly(t *testing.T) { + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + + // Update with --read-only=true, changed to true + flags := newUpdateCommand(nil).Flags() + flags.Set("read-only", "true") + updateService(nil, nil, flags, spec) + assert.Check(t, cspec.ReadOnly) + + // Update without --read-only, no change + flags = newUpdateCommand(nil).Flags() + updateService(nil, nil, flags, spec) + assert.Check(t, cspec.ReadOnly) + + // Update with --read-only=false, changed to false + flags = newUpdateCommand(nil).Flags() + flags.Set("read-only", "false") + updateService(nil, nil, flags, spec) + assert.Check(t, !cspec.ReadOnly) +} + +func TestUpdateInit(t *testing.T) { + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + + // Update with --init=true + flags := newUpdateCommand(nil).Flags() + flags.Set("init", "true") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal(true, *cspec.Init)) + + // Update without --init, no change + flags = newUpdateCommand(nil).Flags() + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal(true, *cspec.Init)) + + // Update with --init=false + flags = newUpdateCommand(nil).Flags() + flags.Set("init", "false") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal(false, *cspec.Init)) +} + +func TestUpdateStopSignal(t *testing.T) { + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + + // Update with --stop-signal=SIGUSR1 + flags := newUpdateCommand(nil).Flags() + flags.Set("stop-signal", "SIGUSR1") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal("SIGUSR1", cspec.StopSignal)) + + // Update without --stop-signal, no change + flags = newUpdateCommand(nil).Flags() + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal("SIGUSR1", cspec.StopSignal)) + + // Update with --stop-signal=SIGWINCH + flags = newUpdateCommand(nil).Flags() + flags.Set("stop-signal", "SIGWINCH") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal("SIGWINCH", cspec.StopSignal)) +} + +func TestUpdateIsolationValid(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + err := flags.Set("isolation", "process") + assert.NilError(t, err) + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(container.IsolationProcess, spec.TaskTemplate.ContainerSpec.Isolation)) +} + +// TestUpdateLimitsReservations tests that limits and reservations are updated, +// and that values are not updated are not reset to their default value +func TestUpdateLimitsReservations(t *testing.T) { + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + Resources: &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: 1000000000, + MemoryBytes: 104857600, + }, + Reservations: &swarm.Resources{ + NanoCPUs: 1000000000, + MemoryBytes: 104857600, + }, + }, + }, + } + + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagReserveCPU, "2") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600))) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagLimitMemory, "200M") + assert.NilError(t, err) + err = flags.Set(flagReserveMemory, "200M") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200))) +} + +func TestUpdateIsolationInvalid(t *testing.T) { + // validation depends on daemon os / version so validation should be done on the daemon side + flags := newUpdateCommand(nil).Flags() + err := flags.Set("isolation", "test") + assert.NilError(t, err) + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(container.Isolation("test"), spec.TaskTemplate.ContainerSpec.Isolation)) +} + +func TestAddGenericResources(t *testing.T) { + task := &swarm.TaskSpec{} + flags := newUpdateCommand(nil).Flags() + + assert.Check(t, addGenericResources(flags, task)) + + flags.Set(flagGenericResourcesAdd, "foo=1") + assert.Check(t, addGenericResources(flags, task)) + assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 1)) + + // Checks that foo isn't added a 2nd time + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesAdd, "bar=1") + assert.Check(t, addGenericResources(flags, task)) + assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 2)) +} + +func TestRemoveGenericResources(t *testing.T) { + task := &swarm.TaskSpec{} + flags := newUpdateCommand(nil).Flags() + + assert.Check(t, removeGenericResources(flags, task)) + + flags.Set(flagGenericResourcesRemove, "foo") + assert.Check(t, is.ErrorContains(removeGenericResources(flags, task), "")) + + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesAdd, "foo=1") + addGenericResources(flags, task) + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesAdd, "bar=1") + addGenericResources(flags, task) + + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesRemove, "foo") + assert.Check(t, removeGenericResources(flags, task)) + assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 1)) +} + +func TestUpdateNetworks(t *testing.T) { + ctx := context.Background() + nws := []types.NetworkResource{ + {Name: "aaa-network", ID: "id555"}, + {Name: "mmm-network", ID: "id999"}, + {Name: "zzz-network", ID: "id111"}, + } + + client := &fakeClient{ + networkInspectFunc: func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + for _, network := range nws { + if network.ID == networkID || network.Name == networkID { + return network, nil + } + } + return types.NetworkResource{}, fmt.Errorf("network not found: %s", networkID) + }, + } + + svc := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + Networks: []swarm.NetworkAttachmentConfig{ + {Target: "id999"}, + }, + }, + } + + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagNetworkAdd, "aaa-network") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkAdd, "aaa-network") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.Error(t, err, "service is already attached to network aaa-network") + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkAdd, "id555") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.Error(t, err, "service is already attached to network id555") + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkRemove, "id999") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkAdd, "mmm-network") + assert.NilError(t, err) + err = flags.Set(flagNetworkRemove, "aaa-network") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id999"}}, svc.TaskTemplate.Networks)) +} diff --git a/cli/cli/command/stack/client_test.go b/cli/cli/command/stack/client_test.go new file mode 100644 index 00000000..c028d668 --- /dev/null +++ b/cli/cli/command/stack/client_test.go @@ -0,0 +1,239 @@ +package stack + +import ( + "context" + "strings" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + + version string + + services []string + networks []string + secrets []string + configs []string + + removedServices []string + removedNetworks []string + removedSecrets []string + removedConfigs []string + + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + networkListFunc func(options types.NetworkListOptions) ([]types.NetworkResource, error) + secretListFunc func(options types.SecretListOptions) ([]swarm.Secret, error) + configListFunc func(options types.ConfigListOptions) ([]swarm.Config, error) + nodeListFunc func(options types.NodeListOptions) ([]swarm.Node, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + nodeInspectWithRaw func(ref string) (swarm.Node, []byte, error) + + serviceUpdateFunc func(serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + + serviceRemoveFunc func(serviceID string) error + networkRemoveFunc func(networkID string) error + secretRemoveFunc func(secretID string) error + configRemoveFunc func(configID string) error +} + +func (cli *fakeClient) ServerVersion(ctx context.Context) (types.Version, error) { + return types.Version{ + Version: "docker-dev", + APIVersion: api.DefaultVersion, + }, nil +} + +func (cli *fakeClient) ClientVersion() string { + return cli.version +} + +func (cli *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + if cli.serviceListFunc != nil { + return cli.serviceListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + servicesList := []swarm.Service{} + for _, name := range cli.services { + if belongToNamespace(name, namespace) { + servicesList = append(servicesList, serviceFromName(name)) + } + } + return servicesList, nil +} + +func (cli *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + if cli.networkListFunc != nil { + return cli.networkListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + networksList := []types.NetworkResource{} + for _, name := range cli.networks { + if belongToNamespace(name, namespace) { + networksList = append(networksList, networkFromName(name)) + } + } + return networksList, nil +} + +func (cli *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if cli.secretListFunc != nil { + return cli.secretListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + secretsList := []swarm.Secret{} + for _, name := range cli.secrets { + if belongToNamespace(name, namespace) { + secretsList = append(secretsList, secretFromName(name)) + } + } + return secretsList, nil +} + +func (cli *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if cli.configListFunc != nil { + return cli.configListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + configsList := []swarm.Config{} + for _, name := range cli.configs { + if belongToNamespace(name, namespace) { + configsList = append(configsList, configFromName(name)) + } + } + return configsList, nil +} + +func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if cli.taskListFunc != nil { + return cli.taskListFunc(options) + } + return []swarm.Task{}, nil +} + +func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + if cli.nodeListFunc != nil { + return cli.nodeListFunc(options) + } + return []swarm.Node{}, nil +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectWithRaw != nil { + return cli.nodeInspectWithRaw(ref) + } + return swarm.Node{}, nil, nil +} + +func (cli *fakeClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + if cli.serviceUpdateFunc != nil { + return cli.serviceUpdateFunc(serviceID, version, service, options) + } + + return types.ServiceUpdateResponse{}, nil +} + +func (cli *fakeClient) ServiceRemove(ctx context.Context, serviceID string) error { + if cli.serviceRemoveFunc != nil { + return cli.serviceRemoveFunc(serviceID) + } + + cli.removedServices = append(cli.removedServices, serviceID) + return nil +} + +func (cli *fakeClient) NetworkRemove(ctx context.Context, networkID string) error { + if cli.networkRemoveFunc != nil { + return cli.networkRemoveFunc(networkID) + } + + cli.removedNetworks = append(cli.removedNetworks, networkID) + return nil +} + +func (cli *fakeClient) SecretRemove(ctx context.Context, secretID string) error { + if cli.secretRemoveFunc != nil { + return cli.secretRemoveFunc(secretID) + } + + cli.removedSecrets = append(cli.removedSecrets, secretID) + return nil +} + +func (cli *fakeClient) ConfigRemove(ctx context.Context, configID string) error { + if cli.configRemoveFunc != nil { + return cli.configRemoveFunc(configID) + } + + cli.removedConfigs = append(cli.removedConfigs, configID) + return nil +} + +func serviceFromName(name string) swarm.Service { + return swarm.Service{ + ID: "ID-" + name, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func networkFromName(name string) types.NetworkResource { + return types.NetworkResource{ + ID: "ID-" + name, + Name: name, + } +} + +func secretFromName(name string) swarm.Secret { + return swarm.Secret{ + ID: "ID-" + name, + Spec: swarm.SecretSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func configFromName(name string) swarm.Config { + return swarm.Config{ + ID: "ID-" + name, + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func namespaceFromFilters(filters filters.Args) string { + label := filters.Get("label")[0] + return strings.TrimPrefix(label, convert.LabelNamespace+"=") +} + +func belongToNamespace(id, namespace string) bool { + return strings.HasPrefix(id, namespace+"_") +} + +func objectName(namespace, name string) string { + return namespace + "_" + name +} + +func objectID(name string) string { + return "ID-" + name +} + +func buildObjectIDs(objectNames []string) []string { + IDs := make([]string, len(objectNames)) + for i, name := range objectNames { + IDs[i] = objectID(name) + } + return IDs +} diff --git a/cli/cli/command/stack/cmd.go b/cli/cli/command/stack/cmd.go new file mode 100644 index 00000000..851ac13c --- /dev/null +++ b/cli/cli/command/stack/cmd.go @@ -0,0 +1,128 @@ +package stack + +import ( + "errors" + "fmt" + "io" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var errUnsupportedAllOrchestrator = fmt.Errorf(`no orchestrator specified: use either "kubernetes" or "swarm"`) + +type commonOptions struct { + orchestrator command.Orchestrator +} + +// NewStackCommand returns a cobra command for `stack` subcommands +func NewStackCommand(dockerCli command.Cli) *cobra.Command { + var opts commonOptions + cmd := &cobra.Command{ + Use: "stack [OPTIONS]", + Short: "Manage Docker stacks", + Args: cli.NoArgs, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + configFile := dockerCli.ConfigFile() + if configFile == nil { + configFile = cliconfig.LoadDefaultConfigFile(dockerCli.Err()) + } + orchestrator, err := getOrchestrator(configFile, cmd, dockerCli.Err()) + if err != nil { + return err + } + opts.orchestrator = orchestrator + hideOrchestrationFlags(cmd, orchestrator) + return checkSupportedFlag(cmd, orchestrator) + }, + + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.25", + }, + } + defaultHelpFunc := cmd.HelpFunc() + cmd.SetHelpFunc(func(c *cobra.Command, args []string) { + if err := cmd.PersistentPreRunE(c, args); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return + } + hideOrchestrationFlags(c, opts.orchestrator) + defaultHelpFunc(c, args) + }) + cmd.AddCommand( + newDeployCommand(dockerCli, &opts), + newListCommand(dockerCli, &opts), + newPsCommand(dockerCli, &opts), + newRemoveCommand(dockerCli, &opts), + newServicesCommand(dockerCli, &opts), + ) + flags := cmd.PersistentFlags() + flags.String("kubeconfig", "", "Kubernetes config file") + flags.SetAnnotation("kubeconfig", "kubernetes", nil) + flags.String("orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)") + return cmd +} + +// NewTopLevelDeployCommand returns a command for `docker deploy` +func NewTopLevelDeployCommand(dockerCli command.Cli) *cobra.Command { + cmd := newDeployCommand(dockerCli, nil) + // Remove the aliases at the top level + cmd.Aliases = []string{} + cmd.Annotations = map[string]string{ + "experimental": "", + "version": "1.25", + } + return cmd +} + +func getOrchestrator(config *configfile.ConfigFile, cmd *cobra.Command, stderr io.Writer) (command.Orchestrator, error) { + var orchestratorFlag string + if o, err := cmd.Flags().GetString("orchestrator"); err == nil { + orchestratorFlag = o + } + return command.GetStackOrchestrator(orchestratorFlag, config.StackOrchestrator, stderr) +} + +func hideOrchestrationFlags(cmd *cobra.Command, orchestrator command.Orchestrator) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() { + f.Hidden = true + } + if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() { + f.Hidden = true + } + }) + for _, subcmd := range cmd.Commands() { + hideOrchestrationFlags(subcmd, orchestrator) + } +} + +func checkSupportedFlag(cmd *cobra.Command, orchestrator command.Orchestrator) error { + errs := []string{} + cmd.Flags().VisitAll(func(f *pflag.Flag) { + if !f.Changed { + return + } + if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() { + errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with kubernetes features enabled`, f.Name)) + } + if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() { + errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with swarm features enabled`, f.Name)) + } + }) + for _, subcmd := range cmd.Commands() { + if err := checkSupportedFlag(subcmd, orchestrator); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/stack/common.go b/cli/cli/command/stack/common.go new file mode 100644 index 00000000..6de410da --- /dev/null +++ b/cli/cli/command/stack/common.go @@ -0,0 +1,31 @@ +package stack + +import ( + "fmt" + "strings" + "unicode" +) + +// validateStackName checks if the provided string is a valid stack name (namespace). +// It currently only does a rudimentary check if the string is empty, or consists +// of only whitespace and quoting characters. +func validateStackName(namespace string) error { + v := strings.TrimFunc(namespace, quotesOrWhitespace) + if v == "" { + return fmt.Errorf("invalid stack name: %q", namespace) + } + return nil +} + +func validateStackNames(namespaces []string) error { + for _, ns := range namespaces { + if err := validateStackName(ns); err != nil { + return err + } + } + return nil +} + +func quotesOrWhitespace(r rune) bool { + return unicode.IsSpace(r) || r == '"' || r == '\'' +} diff --git a/cli/cli/command/stack/deploy.go b/cli/cli/command/stack/deploy.go new file mode 100644 index 00000000..6c083eb2 --- /dev/null +++ b/cli/cli/command/stack/deploy.go @@ -0,0 +1,90 @@ +package stack + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/loader" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + var opts options.Deploy + + cmd := &cobra.Command{ + Use: "deploy [OPTIONS] STACK", + Aliases: []string{"up"}, + Short: "Deploy a new stack or update an existing stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + commonOrchestrator := command.OrchestratorSwarm // default for top-level deploy command + if common != nil { + commonOrchestrator = common.orchestrator + } + + switch { + case opts.Bundlefile == "" && len(opts.Composefiles) == 0: + return errors.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") + case opts.Bundlefile != "" && len(opts.Composefiles) != 0: + return errors.Errorf("You cannot specify both a bundle file and a Compose file.") + case opts.Bundlefile != "": + if commonOrchestrator != command.OrchestratorSwarm { + return errors.Errorf("bundle files are not supported on another orchestrator than swarm.") + } + return swarm.DeployBundle(context.Background(), dockerCli, opts) + } + + config, err := loader.LoadComposefile(dockerCli, opts) + if err != nil { + return err + } + return RunDeploy(dockerCli, cmd.Flags(), config, commonOrchestrator, opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Bundlefile, "bundle-file", "", "Path to a Distributed Application Bundle file") + flags.SetAnnotation("bundle-file", "experimental", nil) + flags.SetAnnotation("bundle-file", "swarm", nil) + flags.StringSliceVarP(&opts.Composefiles, "compose-file", "c", []string{}, `Path to a Compose file, or "-" to read from stdin`) + flags.SetAnnotation("compose-file", "version", []string{"1.25"}) + flags.BoolVar(&opts.SendRegistryAuth, "with-registry-auth", false, "Send registry authentication details to Swarm agents") + flags.SetAnnotation("with-registry-auth", "swarm", nil) + flags.BoolVar(&opts.Prune, "prune", false, "Prune services that are no longer referenced") + flags.SetAnnotation("prune", "version", []string{"1.27"}) + flags.SetAnnotation("prune", "swarm", nil) + flags.StringVar(&opts.ResolveImage, "resolve-image", swarm.ResolveImageAlways, + `Query the registry to resolve image digest and supported platforms ("`+swarm.ResolveImageAlways+`"|"`+swarm.ResolveImageChanged+`"|"`+swarm.ResolveImageNever+`")`) + flags.SetAnnotation("resolve-image", "version", []string{"1.30"}) + flags.SetAnnotation("resolve-image", "swarm", nil) + kubernetes.AddNamespaceFlag(flags) + return cmd +} + +// RunDeploy performs a stack deploy against the specified orchestrator +func RunDeploy(dockerCli command.Cli, flags *pflag.FlagSet, config *composetypes.Config, commonOrchestrator command.Orchestrator, opts options.Deploy) error { + switch { + case commonOrchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case commonOrchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(flags, commonOrchestrator)) + if err != nil { + return err + } + return kubernetes.RunDeploy(kli, opts, config) + default: + return swarm.RunDeploy(dockerCli, opts, config) + } +} diff --git a/cli/cli/command/stack/deploy_test.go b/cli/cli/command/stack/deploy_test.go new file mode 100644 index 00000000..89dbc6e1 --- /dev/null +++ b/cli/cli/command/stack/deploy_test.go @@ -0,0 +1,17 @@ +package stack + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" +) + +func TestDeployWithEmptyName(t *testing.T) { + cmd := newDeployCommand(test.NewFakeCli(&fakeClient{}), nil) + cmd.SetArgs([]string{"' '"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} diff --git a/cli/cli/command/stack/kubernetes/cli.go b/cli/cli/command/stack/kubernetes/cli.go new file mode 100644 index 00000000..bcd0b01e --- /dev/null +++ b/cli/cli/command/stack/kubernetes/cli.go @@ -0,0 +1,126 @@ +package kubernetes + +import ( + "fmt" + "net" + "net/url" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/kubernetes" + cliv1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1" + flag "github.com/spf13/pflag" + kubeclient "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" +) + +// KubeCli holds kubernetes specifics (client, namespace) with the command.Cli +type KubeCli struct { + command.Cli + kubeConfig *restclient.Config + kubeNamespace string + clientSet *kubeclient.Clientset +} + +// Options contains resolved parameters to initialize kubernetes clients +type Options struct { + Namespace string + Config string + Orchestrator command.Orchestrator +} + +// NewOptions returns an Options initialized with command line flags +func NewOptions(flags *flag.FlagSet, orchestrator command.Orchestrator) Options { + opts := Options{ + Orchestrator: orchestrator, + } + if namespace, err := flags.GetString("namespace"); err == nil { + opts.Namespace = namespace + } + if kubeConfig, err := flags.GetString("kubeconfig"); err == nil { + opts.Config = kubeConfig + } + return opts +} + +// AddNamespaceFlag adds the namespace flag to the given flag set +func AddNamespaceFlag(flags *flag.FlagSet) { + flags.String("namespace", "", "Kubernetes namespace to use") + flags.SetAnnotation("namespace", "kubernetes", nil) +} + +// WrapCli wraps command.Cli with kubernetes specifics +func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) { + cli := &KubeCli{ + Cli: dockerCli, + } + clientConfig := kubernetes.NewKubernetesConfig(opts.Config) + + cli.kubeNamespace = opts.Namespace + if opts.Namespace == "" { + configNamespace, _, err := clientConfig.Namespace() + if err != nil { + return nil, err + } + cli.kubeNamespace = configNamespace + } + + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + cli.kubeConfig = config + + clientSet, err := kubeclient.NewForConfig(config) + if err != nil { + return nil, err + } + cli.clientSet = clientSet + + if opts.Orchestrator.HasAll() { + if err := cli.checkHostsMatch(); err != nil { + return nil, err + } + } + return cli, nil +} + +func (c *KubeCli) composeClient() (*Factory, error) { + return NewFactory(c.kubeNamespace, c.kubeConfig, c.clientSet) +} + +func (c *KubeCli) checkHostsMatch() error { + daemonEndpoint, err := url.Parse(c.Client().DaemonHost()) + if err != nil { + return err + } + kubeEndpoint, err := url.Parse(c.kubeConfig.Host) + if err != nil { + return err + } + if daemonEndpoint.Hostname() == kubeEndpoint.Hostname() { + return nil + } + // The daemon can be local in Docker for Desktop, e.g. "npipe", "unix", ... + if daemonEndpoint.Scheme != "tcp" { + ips, err := net.LookupIP(kubeEndpoint.Hostname()) + if err != nil { + return err + } + for _, ip := range ips { + if ip.IsLoopback() { + return nil + } + } + } + fmt.Fprintf(c.Err(), "WARNING: Swarm and Kubernetes hosts do not match (docker host=%s, kubernetes host=%s).\n"+ + " Update $DOCKER_HOST (or pass -H), or use 'kubectl config use-context' to match.\n", daemonEndpoint.Hostname(), kubeEndpoint.Hostname()) + return nil +} + +func (c *KubeCli) stacksv1beta1() (cliv1beta1.StackInterface, error) { + raw, err := newStackV1Beta1(c.kubeConfig, c.kubeNamespace) + if err != nil { + return nil, err + } + return raw.stacks, nil +} diff --git a/cli/cli/command/stack/kubernetes/client.go b/cli/cli/command/stack/kubernetes/client.go new file mode 100644 index 00000000..5024c923 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/client.go @@ -0,0 +1,103 @@ +package kubernetes + +import ( + "github.com/docker/cli/kubernetes" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + typesappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" +) + +// Factory is the kubernetes client factory +type Factory struct { + namespace string + config *restclient.Config + coreClientSet *corev1.CoreV1Client + appsClientSet *appsv1beta2.AppsV1beta2Client + clientSet *kubeclient.Clientset +} + +// NewFactory creates a kubernetes client factory +func NewFactory(namespace string, config *restclient.Config, clientSet *kubeclient.Clientset) (*Factory, error) { + coreClientSet, err := corev1.NewForConfig(config) + if err != nil { + return nil, err + } + + appsClientSet, err := appsv1beta2.NewForConfig(config) + if err != nil { + return nil, err + } + + return &Factory{ + namespace: namespace, + config: config, + coreClientSet: coreClientSet, + appsClientSet: appsClientSet, + clientSet: clientSet, + }, nil +} + +// ConfigMaps returns a client for kubernetes's config maps +func (s *Factory) ConfigMaps() corev1.ConfigMapInterface { + return s.coreClientSet.ConfigMaps(s.namespace) +} + +// Secrets returns a client for kubernetes's secrets +func (s *Factory) Secrets() corev1.SecretInterface { + return s.coreClientSet.Secrets(s.namespace) +} + +// Services returns a client for kubernetes's secrets +func (s *Factory) Services() corev1.ServiceInterface { + return s.coreClientSet.Services(s.namespace) +} + +// Pods returns a client for kubernetes's pods +func (s *Factory) Pods() corev1.PodInterface { + return s.coreClientSet.Pods(s.namespace) +} + +// Nodes returns a client for kubernetes's nodes +func (s *Factory) Nodes() corev1.NodeInterface { + return s.coreClientSet.Nodes() +} + +// ReplicationControllers returns a client for kubernetes replication controllers +func (s *Factory) ReplicationControllers() corev1.ReplicationControllerInterface { + return s.coreClientSet.ReplicationControllers(s.namespace) +} + +// ReplicaSets returns a client for kubernetes replace sets +func (s *Factory) ReplicaSets() typesappsv1beta2.ReplicaSetInterface { + return s.appsClientSet.ReplicaSets(s.namespace) +} + +// DaemonSets returns a client for kubernetes daemon sets +func (s *Factory) DaemonSets() typesappsv1beta2.DaemonSetInterface { + return s.appsClientSet.DaemonSets(s.namespace) +} + +// Stacks returns a client for Docker's Stack on Kubernetes +func (s *Factory) Stacks(allNamespaces bool) (StackClient, error) { + version, err := kubernetes.GetStackAPIVersion(s.clientSet) + if err != nil { + return nil, err + } + namespace := s.namespace + if allNamespaces { + namespace = metav1.NamespaceAll + } + + switch version { + case kubernetes.StackAPIV1Beta1: + return newStackV1Beta1(s.config, namespace) + case kubernetes.StackAPIV1Beta2: + return newStackV1Beta2(s.config, namespace) + default: + return nil, errors.Errorf("no supported Stack API version") + } +} diff --git a/cli/cli/command/stack/kubernetes/conversion.go b/cli/cli/command/stack/kubernetes/conversion.go new file mode 100644 index 00000000..83986661 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/conversion.go @@ -0,0 +1,240 @@ +package kubernetes + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + appsv1beta2 "k8s.io/api/apps/v1beta2" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Pod conversion +func podToTask(pod apiv1.Pod) swarm.Task { + var startTime time.Time + if pod.Status.StartTime != nil { + startTime = (*pod.Status.StartTime).Time + } + task := swarm.Task{ + ID: string(pod.UID), + NodeID: pod.Spec.NodeName, + Spec: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: getContainerImage(pod.Spec.Containers), + }, + }, + DesiredState: podPhaseToState(pod.Status.Phase), + Status: swarm.TaskStatus{ + State: podPhaseToState(pod.Status.Phase), + Timestamp: startTime, + PortStatus: swarm.PortStatus{ + Ports: getPorts(pod.Spec.Containers), + }, + }, + } + + return task +} + +func podPhaseToState(phase apiv1.PodPhase) swarm.TaskState { + switch phase { + case apiv1.PodPending: + return swarm.TaskStatePending + case apiv1.PodRunning: + return swarm.TaskStateRunning + case apiv1.PodSucceeded: + return swarm.TaskStateComplete + case apiv1.PodFailed: + return swarm.TaskStateFailed + default: + return swarm.TaskState("unknown") + } +} + +func toSwarmProtocol(protocol apiv1.Protocol) swarm.PortConfigProtocol { + switch protocol { + case apiv1.ProtocolTCP: + return swarm.PortConfigProtocolTCP + case apiv1.ProtocolUDP: + return swarm.PortConfigProtocolUDP + } + return swarm.PortConfigProtocol("unknown") +} + +func fetchPods(stackName string, pods corev1.PodInterface, f filters.Args) ([]apiv1.Pod, error) { + services := f.Get("service") + // for existing script compatibility, support either or _ format + stackNamePrefix := stackName + "_" + for _, s := range services { + if strings.HasPrefix(s, stackNamePrefix) { + services = append(services, strings.TrimPrefix(s, stackNamePrefix)) + } + } + listOpts := metav1.ListOptions{LabelSelector: labels.SelectorForStack(stackName, services...)} + var result []apiv1.Pod + podsList, err := pods.List(listOpts) + if err != nil { + return nil, err + } + nodes := f.Get("node") + for _, pod := range podsList.Items { + if filterPod(pod, nodes) && + // name filter is done client side for matching partials + f.FuzzyMatch("name", stackNamePrefix+pod.Name) { + + result = append(result, pod) + } + } + return result, nil +} + +func filterPod(pod apiv1.Pod, nodes []string) bool { + if len(nodes) == 0 { + return true + } + for _, name := range nodes { + if pod.Spec.NodeName == name { + return true + } + } + return false +} + +func getContainerImage(containers []apiv1.Container) string { + if len(containers) == 0 { + return "" + } + return containers[0].Image +} + +func getPorts(containers []apiv1.Container) []swarm.PortConfig { + if len(containers) == 0 || len(containers[0].Ports) == 0 { + return nil + } + ports := make([]swarm.PortConfig, len(containers[0].Ports)) + for i, port := range containers[0].Ports { + ports[i] = swarm.PortConfig{ + PublishedPort: uint32(port.HostPort), + TargetPort: uint32(port.ContainerPort), + Protocol: toSwarmProtocol(port.Protocol), + } + } + return ports +} + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +const ( + publishedServiceSuffix = "-published" + publishedOnRandomPortSuffix = "-random-ports" +) + +func convertToServices(replicas *appsv1beta2.ReplicaSetList, daemons *appsv1beta2.DaemonSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) { + result := make([]swarm.Service, len(replicas.Items)) + infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items)+len(daemons.Items)) + for i, r := range replicas.Items { + s, err := convertToService(r.Labels[labels.ForServiceName], services, r.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err + } + result[i] = *s + infos[s.ID] = formatter.ServiceListInfo{ + Mode: "replicated", + Replicas: fmt.Sprintf("%d/%d", r.Status.AvailableReplicas, r.Status.Replicas), + } + } + for _, d := range daemons.Items { + s, err := convertToService(d.Labels[labels.ForServiceName], services, d.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err + } + result = append(result, *s) + infos[s.ID] = formatter.ServiceListInfo{ + Mode: "global", + Replicas: fmt.Sprintf("%d/%d", d.Status.NumberReady, d.Status.DesiredNumberScheduled), + } + } + sort.Slice(result, func(i, j int) bool { + return result[i].ID < result[j].ID + }) + return result, infos, nil +} + +func convertToService(serviceName string, services *apiv1.ServiceList, containers []apiv1.Container) (*swarm.Service, error) { + serviceHeadless, err := findService(services, serviceName) + if err != nil { + return nil, err + } + stack, ok := serviceHeadless.Labels[labels.ForStackName] + if ok { + stack += "_" + } + uid := string(serviceHeadless.UID) + s := &swarm.Service{ + ID: uid, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: stack + serviceHeadless.Name, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: getContainerImage(containers), + }, + }, + }, + } + if serviceNodePort, err := findService(services, serviceName+publishedOnRandomPortSuffix); err == nil && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort { + s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost) + } + if serviceLoadBalancer, err := findService(services, serviceName+publishedServiceSuffix); err == nil && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer { + s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress) + } + return s, nil +} + +func findService(services *apiv1.ServiceList, name string) (apiv1.Service, error) { + for _, s := range services.Items { + if s.Name == name { + return s, nil + } + } + return apiv1.Service{}, fmt.Errorf("could not find service '%s'", name) +} + +func serviceEndpoint(service apiv1.Service, publishMode swarm.PortConfigPublishMode) swarm.Endpoint { + configs := make([]swarm.PortConfig, len(service.Spec.Ports)) + for i, p := range service.Spec.Ports { + configs[i] = swarm.PortConfig{ + PublishMode: publishMode, + PublishedPort: uint32(p.Port), + TargetPort: uint32(p.TargetPort.IntValue()), + Protocol: toSwarmProtocol(p.Protocol), + } + } + return swarm.Endpoint{Ports: configs} +} diff --git a/cli/cli/command/stack/kubernetes/conversion_test.go b/cli/cli/command/stack/kubernetes/conversion_test.go new file mode 100644 index 00000000..213e9ea6 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/conversion_test.go @@ -0,0 +1,192 @@ +package kubernetes + +import ( + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + appsv1beta2 "k8s.io/api/apps/v1beta2" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachineryTypes "k8s.io/apimachinery/pkg/types" + apimachineryUtil "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestReplicasConversionNeedsAService(t *testing.T) { + replicas := appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{makeReplicaSet("unknown", 0, 0)}, + } + services := apiv1.ServiceList{} + _, _, err := convertToServices(&replicas, &appsv1beta2.DaemonSetList{}, &services) + assert.ErrorContains(t, err, "could not find service") +} + +func TestKubernetesServiceToSwarmServiceConversion(t *testing.T) { + testCases := []struct { + replicas *appsv1beta2.ReplicaSetList + services *apiv1.ServiceList + expectedServices []swarm.Service + expectedListInfo map[string]formatter.ServiceListInfo + }{ + // Match replicas with headless stack services + { + &appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{ + makeReplicaSet("service1", 2, 5), + makeReplicaSet("service2", 3, 3), + }, + }, + &apiv1.ServiceList{ + Items: []apiv1.Service{ + makeKubeService("service1", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service2", "stack", "uid2", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service3", "other-stack", "uid2", apiv1.ServiceTypeClusterIP, nil), + }, + }, + []swarm.Service{ + makeSwarmService("stack_service1", "uid1", nil), + makeSwarmService("stack_service2", "uid2", nil), + }, + map[string]formatter.ServiceListInfo{ + "uid1": {Mode: "replicated", Replicas: "2/5"}, + "uid2": {Mode: "replicated", Replicas: "3/3"}, + }, + }, + // Headless service and LoadBalancer Service are tied to the same Swarm service + { + &appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{ + makeReplicaSet("service", 1, 1), + }, + }, + &apiv1.ServiceList{ + Items: []apiv1.Service{ + makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service-published", "stack", "uid2", apiv1.ServiceTypeLoadBalancer, []apiv1.ServicePort{ + { + Port: 80, + TargetPort: apimachineryUtil.FromInt(80), + Protocol: apiv1.ProtocolTCP, + }, + }), + }, + }, + []swarm.Service{ + makeSwarmService("stack_service", "uid1", []swarm.PortConfig{ + { + PublishMode: swarm.PortConfigPublishModeIngress, + PublishedPort: 80, + TargetPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + }, + }), + }, + map[string]formatter.ServiceListInfo{ + "uid1": {Mode: "replicated", Replicas: "1/1"}, + }, + }, + // Headless service and NodePort Service are tied to the same Swarm service + + { + &appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{ + makeReplicaSet("service", 1, 1), + }, + }, + &apiv1.ServiceList{ + Items: []apiv1.Service{ + makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service-random-ports", "stack", "uid2", apiv1.ServiceTypeNodePort, []apiv1.ServicePort{ + { + Port: 35666, + TargetPort: apimachineryUtil.FromInt(80), + Protocol: apiv1.ProtocolTCP, + }, + }), + }, + }, + []swarm.Service{ + makeSwarmService("stack_service", "uid1", []swarm.PortConfig{ + { + PublishMode: swarm.PortConfigPublishModeHost, + PublishedPort: 35666, + TargetPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + }, + }), + }, + map[string]formatter.ServiceListInfo{ + "uid1": {Mode: "replicated", Replicas: "1/1"}, + }, + }, + } + + for _, tc := range testCases { + swarmServices, listInfo, err := convertToServices(tc.replicas, &appsv1beta2.DaemonSetList{}, tc.services) + assert.NilError(t, err) + assert.DeepEqual(t, tc.expectedServices, swarmServices) + assert.DeepEqual(t, tc.expectedListInfo, listInfo) + } +} + +func makeReplicaSet(service string, available, replicas int32) appsv1beta2.ReplicaSet { + return appsv1beta2.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labels.ForServiceName: service, + }, + }, + Spec: appsv1beta2.ReplicaSetSpec{ + Template: apiv1.PodTemplateSpec{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Image: "image", + }, + }, + }, + }, + }, + Status: appsv1beta2.ReplicaSetStatus{ + AvailableReplicas: available, + Replicas: replicas, + }, + } +} + +func makeKubeService(service, stack, uid string, serviceType apiv1.ServiceType, ports []apiv1.ServicePort) apiv1.Service { + return apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labels.ForStackName: stack, + }, + Name: service, + UID: apimachineryTypes.UID(uid), + }, + Spec: apiv1.ServiceSpec{ + Type: serviceType, + Ports: ports, + }, + } +} + +func makeSwarmService(service, id string, ports []swarm.PortConfig) swarm.Service { + return swarm.Service{ + ID: id, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: service, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "image", + }, + }, + }, + Endpoint: swarm.Endpoint{ + Ports: ports, + }, + } +} diff --git a/cli/cli/command/stack/kubernetes/convert.go b/cli/cli/command/stack/kubernetes/convert.go new file mode 100644 index 00000000..aa63daf2 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/convert.go @@ -0,0 +1,332 @@ +package kubernetes + +import ( + "io" + "io/ioutil" + "regexp" + "strconv" + "strings" + + "github.com/docker/cli/cli/compose/loader" + composeTypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/cli/kubernetes/compose/v1beta1" + "github.com/docker/cli/kubernetes/compose/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func loadStackData(composefile string) (*composetypes.Config, error) { + parsed, err := loader.ParseYAML([]byte(composefile)) + if err != nil { + return nil, err + } + return loader.Load(composetypes.ConfigDetails{ + ConfigFiles: []composetypes.ConfigFile{ + { + Config: parsed, + }, + }, + }) +} + +// Conversions from internal stack to different stack compose component versions. +func stackFromV1beta1(in *v1beta1.Stack) (stack, error) { + cfg, err := loadStackData(in.Spec.ComposeFile) + if err != nil { + return stack{}, err + } + return stack{ + name: in.ObjectMeta.Name, + namespace: in.ObjectMeta.Namespace, + composeFile: in.Spec.ComposeFile, + spec: fromComposeConfig(ioutil.Discard, cfg), + }, nil +} + +func stackToV1beta1(s stack) *v1beta1.Stack { + return &v1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.name, + }, + Spec: v1beta1.StackSpec{ + ComposeFile: s.composeFile, + }, + } +} + +func stackFromV1beta2(in *v1beta2.Stack) stack { + return stack{ + name: in.ObjectMeta.Name, + namespace: in.ObjectMeta.Namespace, + spec: in.Spec, + } +} + +func stackToV1beta2(s stack) *v1beta2.Stack { + return &v1beta2.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.name, + }, + Spec: s.spec, + } +} + +func fromComposeConfig(stderr io.Writer, c *composeTypes.Config) *v1beta2.StackSpec { + if c == nil { + return nil + } + warnUnsupportedFeatures(stderr, c) + serviceConfigs := make([]v1beta2.ServiceConfig, len(c.Services)) + for i, s := range c.Services { + serviceConfigs[i] = fromComposeServiceConfig(s) + } + return &v1beta2.StackSpec{ + Services: serviceConfigs, + Secrets: fromComposeSecrets(c.Secrets), + Configs: fromComposeConfigs(c.Configs), + } +} + +func fromComposeSecrets(s map[string]composeTypes.SecretConfig) map[string]v1beta2.SecretConfig { + if s == nil { + return nil + } + m := map[string]v1beta2.SecretConfig{} + for key, value := range s { + m[key] = v1beta2.SecretConfig{ + Name: value.Name, + File: value.File, + External: v1beta2.External{ + Name: value.External.Name, + External: value.External.External, + }, + Labels: value.Labels, + } + } + return m +} + +func fromComposeConfigs(s map[string]composeTypes.ConfigObjConfig) map[string]v1beta2.ConfigObjConfig { + if s == nil { + return nil + } + m := map[string]v1beta2.ConfigObjConfig{} + for key, value := range s { + m[key] = v1beta2.ConfigObjConfig{ + Name: value.Name, + File: value.File, + External: v1beta2.External{ + Name: value.External.Name, + External: value.External.External, + }, + Labels: value.Labels, + } + } + return m +} + +func fromComposeServiceConfig(s composeTypes.ServiceConfig) v1beta2.ServiceConfig { + var userID *int64 + if s.User != "" { + numerical, err := strconv.Atoi(s.User) + if err == nil { + unixUserID := int64(numerical) + userID = &unixUserID + } + } + return v1beta2.ServiceConfig{ + Name: s.Name, + CapAdd: s.CapAdd, + CapDrop: s.CapDrop, + Command: s.Command, + Configs: fromComposeServiceConfigs(s.Configs), + Deploy: v1beta2.DeployConfig{ + Mode: s.Deploy.Mode, + Replicas: s.Deploy.Replicas, + Labels: s.Deploy.Labels, + UpdateConfig: fromComposeUpdateConfig(s.Deploy.UpdateConfig), + Resources: fromComposeResources(s.Deploy.Resources), + RestartPolicy: fromComposeRestartPolicy(s.Deploy.RestartPolicy), + Placement: fromComposePlacement(s.Deploy.Placement), + }, + Entrypoint: s.Entrypoint, + Environment: s.Environment, + ExtraHosts: s.ExtraHosts, + Hostname: s.Hostname, + HealthCheck: fromComposeHealthcheck(s.HealthCheck), + Image: s.Image, + Ipc: s.Ipc, + Labels: s.Labels, + Pid: s.Pid, + Ports: fromComposePorts(s.Ports), + Privileged: s.Privileged, + ReadOnly: s.ReadOnly, + Secrets: fromComposeServiceSecrets(s.Secrets), + StdinOpen: s.StdinOpen, + StopGracePeriod: s.StopGracePeriod, + Tmpfs: s.Tmpfs, + Tty: s.Tty, + User: userID, + Volumes: fromComposeServiceVolumeConfig(s.Volumes), + WorkingDir: s.WorkingDir, + } +} + +func fromComposePorts(ports []composeTypes.ServicePortConfig) []v1beta2.ServicePortConfig { + if ports == nil { + return nil + } + p := make([]v1beta2.ServicePortConfig, len(ports)) + for i, port := range ports { + p[i] = v1beta2.ServicePortConfig{ + Mode: port.Mode, + Target: port.Target, + Published: port.Published, + Protocol: port.Protocol, + } + } + return p +} + +func fromComposeServiceSecrets(secrets []composeTypes.ServiceSecretConfig) []v1beta2.ServiceSecretConfig { + if secrets == nil { + return nil + } + c := make([]v1beta2.ServiceSecretConfig, len(secrets)) + for i, secret := range secrets { + c[i] = v1beta2.ServiceSecretConfig{ + Source: secret.Source, + Target: secret.Target, + UID: secret.UID, + Mode: secret.Mode, + } + } + return c +} + +func fromComposeServiceConfigs(configs []composeTypes.ServiceConfigObjConfig) []v1beta2.ServiceConfigObjConfig { + if configs == nil { + return nil + } + c := make([]v1beta2.ServiceConfigObjConfig, len(configs)) + for i, config := range configs { + c[i] = v1beta2.ServiceConfigObjConfig{ + Source: config.Source, + Target: config.Target, + UID: config.UID, + Mode: config.Mode, + } + } + return c +} + +func fromComposeHealthcheck(h *composeTypes.HealthCheckConfig) *v1beta2.HealthCheckConfig { + if h == nil { + return nil + } + return &v1beta2.HealthCheckConfig{ + Test: h.Test, + Timeout: h.Timeout, + Interval: h.Interval, + Retries: h.Retries, + } +} + +func fromComposePlacement(p composeTypes.Placement) v1beta2.Placement { + return v1beta2.Placement{ + Constraints: fromComposeConstraints(p.Constraints), + } +} + +var constraintEquals = regexp.MustCompile(`([\w\.]*)\W*(==|!=)\W*([\w\.]*)`) + +const ( + swarmOs = "node.platform.os" + swarmArch = "node.platform.arch" + swarmHostname = "node.hostname" + swarmLabelPrefix = "node.labels." +) + +func fromComposeConstraints(s []string) *v1beta2.Constraints { + if len(s) == 0 { + return nil + } + constraints := &v1beta2.Constraints{} + for _, constraint := range s { + matches := constraintEquals.FindStringSubmatch(constraint) + if len(matches) == 4 { + key := matches[1] + operator := matches[2] + value := matches[3] + constraint := &v1beta2.Constraint{ + Operator: operator, + Value: value, + } + switch { + case key == swarmOs: + constraints.OperatingSystem = constraint + case key == swarmArch: + constraints.Architecture = constraint + case key == swarmHostname: + constraints.Hostname = constraint + case strings.HasPrefix(key, swarmLabelPrefix): + if constraints.MatchLabels == nil { + constraints.MatchLabels = map[string]v1beta2.Constraint{} + } + constraints.MatchLabels[strings.TrimPrefix(key, swarmLabelPrefix)] = *constraint + } + } + } + return constraints +} + +func fromComposeResources(r composeTypes.Resources) v1beta2.Resources { + return v1beta2.Resources{ + Limits: fromComposeResourcesResource(r.Limits), + Reservations: fromComposeResourcesResource(r.Reservations), + } +} + +func fromComposeResourcesResource(r *composeTypes.Resource) *v1beta2.Resource { + if r == nil { + return nil + } + return &v1beta2.Resource{ + MemoryBytes: int64(r.MemoryBytes), + NanoCPUs: r.NanoCPUs, + } +} + +func fromComposeUpdateConfig(u *composeTypes.UpdateConfig) *v1beta2.UpdateConfig { + if u == nil { + return nil + } + return &v1beta2.UpdateConfig{ + Parallelism: u.Parallelism, + } +} + +func fromComposeRestartPolicy(r *composeTypes.RestartPolicy) *v1beta2.RestartPolicy { + if r == nil { + return nil + } + return &v1beta2.RestartPolicy{ + Condition: r.Condition, + } +} + +func fromComposeServiceVolumeConfig(vs []composeTypes.ServiceVolumeConfig) []v1beta2.ServiceVolumeConfig { + if vs == nil { + return nil + } + volumes := []v1beta2.ServiceVolumeConfig{} + for _, v := range vs { + volumes = append(volumes, v1beta2.ServiceVolumeConfig{ + Type: v.Type, + Source: v.Source, + Target: v.Target, + ReadOnly: v.ReadOnly, + }) + } + return volumes +} diff --git a/cli/cli/command/stack/kubernetes/deploy.go b/cli/cli/command/stack/kubernetes/deploy.go new file mode 100644 index 00000000..62369a81 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/deploy.go @@ -0,0 +1,158 @@ +package kubernetes + +import ( + "fmt" + "io" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/morikuni/aec" +) + +// RunDeploy is the kubernetes implementation of docker stack deploy +func RunDeploy(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config) error { + cmdOut := dockerCli.Out() + + // Initialize clients + composeClient, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := composeClient.Stacks(false) + if err != nil { + return err + } + + stack, err := stacks.FromCompose(dockerCli.Err(), opts.Namespace, cfg) + if err != nil { + return err + } + + configMaps := composeClient.ConfigMaps() + secrets := composeClient.Secrets() + services := composeClient.Services() + + if err := stacks.IsColliding(services, stack); err != nil { + return err + } + + if err := stack.createFileBasedConfigMaps(configMaps); err != nil { + return err + } + + if err := stack.createFileBasedSecrets(secrets); err != nil { + return err + } + + if err = stacks.CreateOrUpdate(stack); err != nil { + return err + } + + fmt.Fprintln(cmdOut, "Waiting for the stack to be stable and running...") + v1beta1Cli, err := dockerCli.stacksv1beta1() + if err != nil { + return err + } + + pods := composeClient.Pods() + watcher := &deployWatcher{ + stacks: v1beta1Cli, + pods: pods, + } + statusUpdates := make(chan serviceStatus) + displayDone := make(chan struct{}) + go func() { + defer close(displayDone) + display := newStatusDisplay(dockerCli.Out()) + for status := range statusUpdates { + display.OnStatus(status) + } + }() + + err = watcher.Watch(stack.name, stack.getServices(), statusUpdates) + close(statusUpdates) + <-displayDone + if err != nil { + return err + } + fmt.Fprintf(cmdOut, "\nStack %s is stable and running\n\n", stack.name) + return nil + +} + +type statusDisplay interface { + OnStatus(serviceStatus) +} +type metaServiceState string + +const ( + metaServiceStateReady = metaServiceState("Ready") + metaServiceStatePending = metaServiceState("Pending") + metaServiceStateFailed = metaServiceState("Failed") +) + +func metaStateFromStatus(status serviceStatus) metaServiceState { + switch { + case status.podsReady > 0: + return metaServiceStateReady + case status.podsPending > 0: + return metaServiceStatePending + default: + return metaServiceStateFailed + } +} + +type forwardOnlyStatusDisplay struct { + o *command.OutStream + states map[string]metaServiceState +} + +func (d *forwardOnlyStatusDisplay) OnStatus(status serviceStatus) { + state := metaStateFromStatus(status) + if d.states[status.name] != state { + d.states[status.name] = state + fmt.Fprintf(d.o, "%s: %s\n", status.name, state) + } +} + +type interactiveStatusDisplay struct { + o *command.OutStream + statuses []serviceStatus +} + +func (d *interactiveStatusDisplay) OnStatus(status serviceStatus) { + b := aec.EmptyBuilder + for ix := 0; ix < len(d.statuses); ix++ { + b = b.Up(1).EraseLine(aec.EraseModes.All) + } + b = b.Column(0) + fmt.Fprint(d.o, b.ANSI) + updated := false + for ix, s := range d.statuses { + if s.name == status.name { + d.statuses[ix] = status + s = status + updated = true + } + displayInteractiveServiceStatus(s, d.o) + } + if !updated { + d.statuses = append(d.statuses, status) + displayInteractiveServiceStatus(status, d.o) + } +} + +func displayInteractiveServiceStatus(status serviceStatus, o io.Writer) { + state := metaStateFromStatus(status) + totalFailed := status.podsFailed + status.podsSucceeded + status.podsUnknown + fmt.Fprintf(o, "%[1]s: %[2]s\t\t[pod status: %[3]d/%[6]d ready, %[4]d/%[6]d pending, %[5]d/%[6]d failed]\n", status.name, state, + status.podsReady, status.podsPending, totalFailed, status.podsTotal) +} + +func newStatusDisplay(o *command.OutStream) statusDisplay { + if !o.IsTerminal() { + return &forwardOnlyStatusDisplay{o: o, states: map[string]metaServiceState{}} + } + return &interactiveStatusDisplay{o: o} +} diff --git a/cli/cli/command/stack/kubernetes/list.go b/cli/cli/command/stack/kubernetes/list.go new file mode 100644 index 00000000..f6efd050 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/list.go @@ -0,0 +1,136 @@ +package kubernetes + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/config/configfile" + "github.com/pkg/errors" + core_v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetStacks lists the kubernetes stacks +func GetStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + if opts.AllNamespaces || len(opts.Namespaces) == 0 { + if isAllNamespacesDisabled(kubeCli.ConfigFile().Kubernetes) { + opts.AllNamespaces = true + } + return getStacksWithAllNamespaces(kubeCli, opts) + } + return getStacksWithNamespaces(kubeCli, opts, removeDuplicates(opts.Namespaces)) +} + +func isAllNamespacesDisabled(kubeCliConfig *configfile.KubernetesConfig) bool { + return kubeCliConfig == nil || kubeCliConfig != nil && kubeCliConfig.AllNamespaces != "disabled" +} + +func getStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + composeClient, err := kubeCli.composeClient() + if err != nil { + return nil, err + } + stackSvc, err := composeClient.Stacks(opts.AllNamespaces) + if err != nil { + return nil, err + } + stacks, err := stackSvc.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + var formattedStacks []*formatter.Stack + for _, stack := range stacks { + formattedStacks = append(formattedStacks, &formatter.Stack{ + Name: stack.name, + Services: len(stack.getServices()), + Orchestrator: "Kubernetes", + Namespace: stack.namespace, + }) + } + return formattedStacks, nil +} + +func getStacksWithAllNamespaces(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + stacks, err := getStacks(kubeCli, opts) + if !apierrs.IsForbidden(err) { + return stacks, err + } + namespaces, err2 := getUserVisibleNamespaces(*kubeCli) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to query user visible namespaces") + } + if namespaces == nil { + // UCP API not present, fall back to Kubernetes error + return nil, err + } + opts.AllNamespaces = false + return getStacksWithNamespaces(kubeCli, opts, namespaces) +} + +func getUserVisibleNamespaces(dockerCli command.Cli) ([]string, error) { + host := dockerCli.Client().DaemonHost() + endpoint, err := url.Parse(host) + if err != nil { + return nil, err + } + endpoint.Scheme = "https" + endpoint.Path = "/kubernetesNamespaces" + resp, err := dockerCli.Client().HTTPClient().Get(endpoint.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrapf(err, "received %d status and unable to read response", resp.StatusCode) + } + switch resp.StatusCode { + case http.StatusOK: + nms := &core_v1.NamespaceList{} + if err := json.Unmarshal(body, nms); err != nil { + return nil, errors.Wrapf(err, "unmarshal failed: %s", string(body)) + } + namespaces := make([]string, len(nms.Items)) + for i, namespace := range nms.Items { + namespaces[i] = namespace.Name + } + return namespaces, nil + case http.StatusNotFound: + // UCP API not present + return nil, nil + default: + return nil, fmt.Errorf("received %d status while retrieving namespaces: %s", resp.StatusCode, string(body)) + } +} + +func getStacksWithNamespaces(kubeCli *KubeCli, opts options.List, namespaces []string) ([]*formatter.Stack, error) { + stacks := []*formatter.Stack{} + for _, namespace := range namespaces { + kubeCli.kubeNamespace = namespace + ss, err := getStacks(kubeCli, opts) + if err != nil { + return nil, err + } + stacks = append(stacks, ss...) + } + return stacks, nil +} + +func removeDuplicates(namespaces []string) []string { + found := make(map[string]bool) + results := namespaces[:0] + for _, n := range namespaces { + if !found[n] { + results = append(results, n) + found[n] = true + } + } + return results +} diff --git a/cli/cli/command/stack/kubernetes/ps.go b/cli/cli/command/stack/kubernetes/ps.go new file mode 100644 index 00000000..a4ee14f6 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/ps.go @@ -0,0 +1,112 @@ +package kubernetes + +import ( + "fmt" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/task" + "github.com/docker/docker/api/types/swarm" + apiv1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +var supportedPSFilters = map[string]bool{ + "name": true, + "service": true, + "node": true, +} + +// RunPS is the kubernetes implementation of docker stack ps +func RunPS(dockerCli *KubeCli, options options.PS) error { + filters := options.Filter.Value() + if err := filters.Validate(supportedPSFilters); err != nil { + return err + } + client, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := client.Stacks(false) + if err != nil { + return err + } + stackName := options.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) + } + if err != nil { + return err + } + pods, err := fetchPods(stackName, client.Pods(), filters) + if err != nil { + return err + } + if len(pods) == 0 { + return fmt.Errorf("nothing found in stack: %s", stackName) + } + return printTasks(dockerCli, options, stackName, client, pods) +} + +func printTasks(dockerCli command.Cli, options options.PS, namespace string, client corev1.NodesGetter, pods []apiv1.Pod) error { + format := options.Format + if format == "" { + format = task.DefaultFormat(dockerCli.ConfigFile(), options.Quiet) + } + + tasks := make([]swarm.Task, len(pods)) + for i, pod := range pods { + tasks[i] = podToTask(pod) + } + sort.Stable(tasksBySlot(tasks)) + + names := map[string]string{} + nodes := map[string]string{} + + n, err := listNodes(client, options.NoResolve) + if err != nil { + return err + } + for i, task := range tasks { + nodeValue, err := resolveNode(pods[i].Spec.NodeName, n, options.NoResolve) + if err != nil { + return err + } + names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name) + nodes[task.ID] = nodeValue + } + + tasksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewTaskFormat(format, options.Quiet), + Trunc: !options.NoTrunc, + } + + return formatter.TaskWrite(tasksCtx, tasks, names, nodes) +} + +func resolveNode(name string, nodes *apiv1.NodeList, noResolve bool) (string, error) { + // Here we have a name and we need to resolve its identifier. To mimic swarm behavior + // we need to resolve to the id when noResolve is set, otherwise we return the name. + if noResolve { + for _, node := range nodes.Items { + if node.Name == name { + return string(node.UID), nil + } + } + return "", fmt.Errorf("could not find node '%s'", name) + } + return name, nil +} + +func listNodes(client corev1.NodesGetter, noResolve bool) (*apiv1.NodeList, error) { + if noResolve { + return client.Nodes().List(metav1.ListOptions{}) + } + return nil, nil +} diff --git a/cli/cli/command/stack/kubernetes/remove.go b/cli/cli/command/stack/kubernetes/remove.go new file mode 100644 index 00000000..311c7597 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/remove.go @@ -0,0 +1,27 @@ +package kubernetes + +import ( + "fmt" + + "github.com/docker/cli/cli/command/stack/options" + "github.com/pkg/errors" +) + +// RunRemove is the kubernetes implementation of docker stack remove +func RunRemove(dockerCli *KubeCli, opts options.Remove) error { + composeClient, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := composeClient.Stacks(false) + if err != nil { + return err + } + for _, stack := range opts.Namespaces { + fmt.Fprintf(dockerCli.Out(), "Removing stack: %s\n", stack) + if err := stacks.Delete(stack); err != nil { + return errors.Wrapf(err, "Failed to remove stack %s", stack) + } + } + return nil +} diff --git a/cli/cli/command/stack/kubernetes/services.go b/cli/cli/command/stack/kubernetes/services.go new file mode 100644 index 00000000..a2d13e56 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/services.go @@ -0,0 +1,158 @@ +package kubernetes + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var supportedServicesFilters = map[string]bool{ + "mode": true, + "name": true, + "label": true, +} + +func generateSelector(labels map[string][]string) []string { + var result []string + for k, v := range labels { + for _, val := range v { + result = append(result, fmt.Sprintf("%s=%s", k, val)) + } + if len(v) == 0 { + result = append(result, k) + } + } + return result +} + +func parseLabelFilters(rawFilters []string) map[string][]string { + labels := map[string][]string{} + for _, rawLabel := range rawFilters { + v := strings.SplitN(rawLabel, "=", 2) + key := v[0] + if len(v) > 1 { + labels[key] = append(labels[key], v[1]) + } else if _, ok := labels[key]; !ok { + labels[key] = []string{} + } + } + return labels +} + +func generateLabelSelector(f filters.Args, stackName string) string { + selectors := append(generateSelector(parseLabelFilters(f.Get("label"))), labels.SelectorForStack(stackName)) + return strings.Join(selectors, ",") +} + +func getResourcesForServiceList(dockerCli *KubeCli, filters filters.Args, labelSelector string) (*appsv1beta2.ReplicaSetList, *appsv1beta2.DaemonSetList, *corev1.ServiceList, error) { + client, err := dockerCli.composeClient() + if err != nil { + return nil, nil, nil, err + } + modes := filters.Get("mode") + replicas := &appsv1beta2.ReplicaSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "replicated") { + if replicas, err = client.ReplicaSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + daemons := &appsv1beta2.DaemonSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "global") { + if daemons, err = client.DaemonSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + services, err := client.Services().List(metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return nil, nil, nil, err + } + return replicas, daemons, services, nil +} + +// RunServices is the kubernetes implementation of docker stack services +func RunServices(dockerCli *KubeCli, opts options.Services) error { + filters := opts.Filter.Value() + if err := filters.Validate(supportedServicesFilters); err != nil { + return err + } + client, err := dockerCli.composeClient() + if err != nil { + return nil + } + stacks, err := client.Stacks(false) + if err != nil { + return nil + } + stackName := opts.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) + } + if err != nil { + return err + } + + labelSelector := generateLabelSelector(filters, stackName) + replicasList, daemonsList, servicesList, err := getResourcesForServiceList(dockerCli, filters, labelSelector) + if err != nil { + return err + } + + // Convert Replicas sets and kubernetes services to swam services and formatter informations + services, info, err := convertToServices(replicasList, daemonsList, servicesList) + if err != nil { + return err + } + services = filterServicesByName(services, filters.Get("name"), stackName) + + if opts.Quiet { + info = map[string]formatter.ServiceListInfo{} + } + + format := opts.Format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.Quiet { + format = dockerCli.ConfigFile().ServicesFormat + } else { + format = formatter.TableFormatKey + } + } + + servicesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceListFormat(format, opts.Quiet), + } + return formatter.ServiceListWrite(servicesCtx, services, info) +} + +func filterServicesByName(services []swarm.Service, names []string, stackName string) []swarm.Service { + if len(names) == 0 { + return services + } + prefix := stackName + "_" + // Accepts unprefixed service name (for compatibility with existing swarm scripts where service names are prefixed by stack names) + for i, n := range names { + if !strings.HasPrefix(n, prefix) { + names[i] = stackName + "_" + n + } + } + // Filter services + result := []swarm.Service{} + for _, s := range services { + for _, n := range names { + if strings.HasPrefix(s.Spec.Name, n) { + result = append(result, s) + } + } + } + return result +} diff --git a/cli/cli/command/stack/kubernetes/services_test.go b/cli/cli/command/stack/kubernetes/services_test.go new file mode 100644 index 00000000..5603eeda --- /dev/null +++ b/cli/cli/command/stack/kubernetes/services_test.go @@ -0,0 +1,138 @@ +package kubernetes + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + "gotest.tools/assert/cmp" +) + +func TestServiceFiltersLabelSelectorGen(t *testing.T) { + cases := []struct { + name string + stackName string + filters filters.Args + expectedSelectorParts []string + }{ + { + name: "no-filter", + stackName: "test", + filters: filters.NewArgs(), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + }, + }, + { + name: "label present filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label-is-present"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label-is-present", + }, + }, + { + name: "single value label filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + }, + }, + { + name: "multi value label filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + filters.KeyValuePair{Key: "label", Value: "label1=test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + "label1=test2", + }, + }, + { + name: "2 different labels filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + filters.KeyValuePair{Key: "label", Value: "label2=test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + "label2=test2", + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result := generateLabelSelector(c.filters, c.stackName) + for _, toFind := range c.expectedSelectorParts { + assert.Assert(t, cmp.Contains(result, toFind)) + } + }) + } +} +func TestServiceFiltersServiceByName(t *testing.T) { + cases := []struct { + name string + filters []string + services []swarm.Service + expectedServices []swarm.Service + }{ + { + name: "no filter", + filters: []string{}, + services: makeServices("s1", "s2"), + expectedServices: makeServices("s1", "s2"), + }, + { + name: "single-name filter", + filters: []string{"s1"}, + services: makeServices("s1", "s2"), + expectedServices: makeServices("s1"), + }, + { + name: "filter by prefix", + filters: []string{"prefix"}, + services: makeServices("prefix-s1", "prefix-s2", "s2"), + expectedServices: makeServices("prefix-s1", "prefix-s2"), + }, + { + name: "multi-name filter", + filters: []string{"s1", "s2"}, + services: makeServices("s1", "s2", "s3"), + expectedServices: makeServices("s1", "s2"), + }, + { + name: "stack name prefix is valid", + filters: []string{"stack_s1"}, + services: makeServices("s1", "s11", "s2"), + expectedServices: makeServices("s1", "s11"), + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result := filterServicesByName(c.services, c.filters, "stack") + assert.DeepEqual(t, c.expectedServices, result) + }) + } +} + +func makeServices(names ...string) []swarm.Service { + result := make([]swarm.Service, len(names)) + for i, n := range names { + result[i] = swarm.Service{Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "stack_" + n}}} + } + return result +} diff --git a/cli/cli/command/stack/kubernetes/stack.go b/cli/cli/command/stack/kubernetes/stack.go new file mode 100644 index 00000000..566e7729 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/stack.go @@ -0,0 +1,107 @@ +package kubernetes + +import ( + "io/ioutil" + "path/filepath" + "sort" + + "github.com/docker/cli/kubernetes/compose/v1beta2" + "github.com/docker/cli/kubernetes/labels" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// stack is the main type used by stack commands so they remain independent from kubernetes compose component version. +type stack struct { + name string + namespace string + composeFile string + spec *v1beta2.StackSpec +} + +// getServices returns all the stack service names, sorted lexicographically +func (s *stack) getServices() []string { + services := make([]string, len(s.spec.Services)) + for i, service := range s.spec.Services { + services[i] = service.Name + } + sort.Strings(services) + return services +} + +// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config. +func (s *stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) error { + for name, config := range s.spec.Configs { + if config.File == "" { + continue + } + + fileName := filepath.Base(config.File) + content, err := ioutil.ReadFile(config.File) + if err != nil { + return err + } + + if _, err := configMaps.Create(toConfigMap(s.name, name, fileName, content)); err != nil { + return err + } + } + + return nil +} + +// toConfigMap converts a Compose Config to a Kube ConfigMap. +func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap { + return &apiv1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + labels.ForStackName: stackName, + }, + }, + Data: map[string]string{ + key: string(content), + }, + } +} + +// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret. +func (s *stack) createFileBasedSecrets(secrets corev1.SecretInterface) error { + for name, secret := range s.spec.Secrets { + if secret.File == "" { + continue + } + + fileName := filepath.Base(secret.File) + content, err := ioutil.ReadFile(secret.File) + if err != nil { + return err + } + + if _, err := secrets.Create(toSecret(s.name, name, fileName, content)); err != nil { + return err + } + } + + return nil +} + +// toSecret converts a Compose Secret to a Kube Secret. +func toSecret(stackName, name, key string, content []byte) *apiv1.Secret { + return &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + labels.ForStackName: stackName, + }, + }, + Data: map[string][]byte{ + key: content, + }, + } +} diff --git a/cli/cli/command/stack/kubernetes/stackclient.go b/cli/cli/command/stack/kubernetes/stackclient.go new file mode 100644 index 00000000..b59a50d4 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/stackclient.go @@ -0,0 +1,197 @@ +package kubernetes + +import ( + "fmt" + "io" + + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/cli/compose/schema" + composetypes "github.com/docker/cli/cli/compose/types" + composev1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1" + composev1beta2 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2" + v1beta1types "github.com/docker/cli/kubernetes/compose/v1beta1" + "github.com/docker/cli/kubernetes/labels" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" +) + +// StackClient talks to a kubernetes compose component. +type StackClient interface { + CreateOrUpdate(s stack) error + Delete(name string) error + Get(name string) (stack, error) + List(opts metav1.ListOptions) ([]stack, error) + IsColliding(servicesClient corev1.ServiceInterface, s stack) error + FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) +} + +// stackV1Beta1 implements stackClient interface and talks to compose component v1beta1. +type stackV1Beta1 struct { + stacks composev1beta1.StackInterface +} + +func newStackV1Beta1(config *rest.Config, namespace string) (*stackV1Beta1, error) { + client, err := composev1beta1.NewForConfig(config) + if err != nil { + return nil, err + } + return &stackV1Beta1{stacks: client.Stacks(namespace)}, nil +} + +func (s *stackV1Beta1) CreateOrUpdate(internalStack stack) error { + // If it already exists, update the stack + if stackBeta1, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil { + stackBeta1.Spec.ComposeFile = internalStack.composeFile + _, err := s.stacks.Update(stackBeta1) + return err + } + // Or create it + _, err := s.stacks.Create(stackToV1beta1(internalStack)) + return err +} + +func (s *stackV1Beta1) Delete(name string) error { + return s.stacks.Delete(name, &metav1.DeleteOptions{}) +} + +func (s *stackV1Beta1) Get(name string) (stack, error) { + stackBeta1, err := s.stacks.Get(name, metav1.GetOptions{}) + if err != nil { + return stack{}, err + } + return stackFromV1beta1(stackBeta1) +} + +func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]stack, error) { + list, err := s.stacks.List(opts) + if err != nil { + return nil, err + } + stacks := make([]stack, len(list.Items)) + for i := range list.Items { + stack, err := stackFromV1beta1(&list.Items[i]) + if err != nil { + return nil, err + } + stacks[i] = stack + } + return stacks, nil +} + +// IsColliding verifies that services defined in the stack collides with already deployed services +func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st stack) error { + for _, srv := range st.getServices() { + if err := verify(servicesClient, st.name, srv); err != nil { + return err + } + } + return nil +} + +// verify checks wether the service is already present in kubernetes. +// If we find the service by name but it doesn't have our label or it has a different value +// than the stack name for the label, we fail (i.e. it will collide) +func verify(services corev1.ServiceInterface, stackName string, service string) error { + svc, err := services.Get(service, metav1.GetOptions{}) + if err == nil { + if key, ok := svc.ObjectMeta.Labels[labels.ForStackName]; ok { + if key != stackName { + return fmt.Errorf("service %s already present in stack named %s", service, key) + } + return nil + } + return fmt.Errorf("service %s already present in the cluster", service) + } + return nil +} + +func (s *stackV1Beta1) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) { + cfg.Version = v1beta1types.MaxComposeVersion + st, err := fromCompose(stderr, name, cfg) + if err != nil { + return stack{}, err + } + res, err := yaml.Marshal(cfg) + if err != nil { + return stack{}, err + } + // reload the result to check that it produced a valid 3.5 compose file + resparsedConfig, err := loader.ParseYAML(res) + if err != nil { + return stack{}, err + } + if err = schema.Validate(resparsedConfig, v1beta1types.MaxComposeVersion); err != nil { + return stack{}, errors.Wrapf(err, "the compose yaml file is invalid with v%s", v1beta1types.MaxComposeVersion) + } + + st.composeFile = string(res) + return st, nil +} + +// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2. +type stackV1Beta2 struct { + stacks composev1beta2.StackInterface +} + +func newStackV1Beta2(config *rest.Config, namespace string) (*stackV1Beta2, error) { + client, err := composev1beta2.NewForConfig(config) + if err != nil { + return nil, err + } + return &stackV1Beta2{stacks: client.Stacks(namespace)}, nil +} + +func (s *stackV1Beta2) CreateOrUpdate(internalStack stack) error { + // If it already exists, update the stack + if stackBeta2, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil { + stackBeta2.Spec = internalStack.spec + _, err := s.stacks.Update(stackBeta2) + return err + } + // Or create it + _, err := s.stacks.Create(stackToV1beta2(internalStack)) + return err +} + +func (s *stackV1Beta2) Delete(name string) error { + return s.stacks.Delete(name, &metav1.DeleteOptions{}) +} + +func (s *stackV1Beta2) Get(name string) (stack, error) { + stackBeta2, err := s.stacks.Get(name, metav1.GetOptions{}) + if err != nil { + return stack{}, err + } + return stackFromV1beta2(stackBeta2), nil +} + +func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]stack, error) { + list, err := s.stacks.List(opts) + if err != nil { + return nil, err + } + stacks := make([]stack, len(list.Items)) + for i := range list.Items { + stacks[i] = stackFromV1beta2(&list.Items[i]) + } + return stacks, nil +} + +// IsColliding is handle server side with the compose api v1beta2, so nothing to do here +func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st stack) error { + return nil +} + +func (s *stackV1Beta2) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) { + return fromCompose(stderr, name, cfg) +} + +func fromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) { + return stack{ + name: name, + spec: fromComposeConfig(stderr, cfg), + }, nil +} diff --git a/cli/cli/command/stack/kubernetes/stackclient_test.go b/cli/cli/command/stack/kubernetes/stackclient_test.go new file mode 100644 index 00000000..8757f1c4 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/stackclient_test.go @@ -0,0 +1,64 @@ +package kubernetes + +import ( + "io/ioutil" + "testing" + + composetypes "github.com/docker/cli/cli/compose/types" + "gotest.tools/assert" +) + +func TestFromCompose(t *testing.T) { + stackClient := &stackV1Beta1{} + s, err := stackClient.FromCompose(ioutil.Discard, "foo", &composetypes.Config{ + Version: "3.1", + Filename: "banana", + Services: []composetypes.ServiceConfig{ + { + Name: "foo", + Image: "foo", + }, + { + Name: "bar", + Image: "bar", + }, + }, + }) + assert.NilError(t, err) + assert.Equal(t, "foo", s.name) + assert.Equal(t, string(`version: "3.5" +services: + bar: + image: bar + foo: + image: foo +networks: {} +volumes: {} +secrets: {} +configs: {} +`), s.composeFile) +} + +func TestFromComposeUnsupportedVersion(t *testing.T) { + stackClient := &stackV1Beta1{} + _, err := stackClient.FromCompose(ioutil.Discard, "foo", &composetypes.Config{ + Version: "3.6", + Filename: "banana", + Services: []composetypes.ServiceConfig{ + { + Name: "foo", + Image: "foo", + Volumes: []composetypes.ServiceVolumeConfig{ + { + Type: "tmpfs", + Target: "/app", + Tmpfs: &composetypes.ServiceVolumeTmpfs{ + Size: 10000, + }, + }, + }, + }, + }, + }) + assert.ErrorContains(t, err, "the compose yaml file is invalid with v3.5: services.foo.volumes.0 Additional property tmpfs is not allowed") +} diff --git a/cli/cli/command/stack/kubernetes/testdata/warnings.golden b/cli/cli/command/stack/kubernetes/testdata/warnings.golden new file mode 100644 index 00000000..1ee14b0f --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/warnings.golden @@ -0,0 +1,31 @@ +top-level network "global" is ignored +service "front": network "private" is ignored +service "front": update_config.delay is not supported +service "front": update_config.failure_action is not supported +service "front": update_config.monitor is not supported +service "front": update_config.max_failure_ratio is not supported +service "front": restart_policy.delay is ignored +service "front": restart_policy.max_attempts is ignored +service "front": restart_policy.window is ignored +service "front": container_name is deprecated +service "front": expose is deprecated +service "front": build is ignored +service "front": cgroup_parent is ignored +service "front": devices are ignored +service "front": domainname is ignored +service "front": external_links are ignored +service "front": links are ignored +service "front": mac_address is ignored +service "front": network_mode is ignored +service "front": restart is ignored +service "front": security_opt are ignored +service "front": ulimits are ignored +service "front": depends_on are ignored +service "front": credential_spec is ignored +service "front": dns are ignored +service "front": dns_search are ignored +service "front": env_file are ignored +service "front": stop_signal is ignored +service "front": logging is ignored +service "front": volume.propagation is ignored +service "front": volume.nocopy is ignored diff --git a/cli/cli/command/stack/kubernetes/warnings.go b/cli/cli/command/stack/kubernetes/warnings.go new file mode 100644 index 00000000..eb4598db --- /dev/null +++ b/cli/cli/command/stack/kubernetes/warnings.go @@ -0,0 +1,145 @@ +package kubernetes + +import ( + "fmt" + "io" + + composetypes "github.com/docker/cli/cli/compose/types" +) + +func warnUnsupportedFeatures(stderr io.Writer, cfg *composetypes.Config) { + warnForGlobalNetworks(stderr, cfg) + for _, s := range cfg.Services { + warnForServiceNetworks(stderr, s) + warnForUnsupportedDeploymentStrategy(stderr, s) + warnForUnsupportedRestartPolicy(stderr, s) + warnForDeprecatedProperties(stderr, s) + warnForUnsupportedProperties(stderr, s) + } +} + +func warnForGlobalNetworks(stderr io.Writer, config *composetypes.Config) { + for network := range config.Networks { + fmt.Fprintf(stderr, "top-level network %q is ignored\n", network) + } +} + +func warnServicef(stderr io.Writer, service, format string, args ...interface{}) { + fmt.Fprintf(stderr, "service \"%s\": %s\n", service, fmt.Sprintf(format, args...)) +} + +func warnForServiceNetworks(stderr io.Writer, s composetypes.ServiceConfig) { + for network := range s.Networks { + warnServicef(stderr, s.Name, "network %q is ignored", network) + } +} + +func warnForDeprecatedProperties(stderr io.Writer, s composetypes.ServiceConfig) { + if s.ContainerName != "" { + warnServicef(stderr, s.Name, "container_name is deprecated") + } + if len(s.Expose) > 0 { + warnServicef(stderr, s.Name, "expose is deprecated") + } +} + +func warnForUnsupportedDeploymentStrategy(stderr io.Writer, s composetypes.ServiceConfig) { + config := s.Deploy.UpdateConfig + if config == nil { + return + } + if config.Delay != 0 { + warnServicef(stderr, s.Name, "update_config.delay is not supported") + } + if config.FailureAction != "" { + warnServicef(stderr, s.Name, "update_config.failure_action is not supported") + } + if config.Monitor != 0 { + warnServicef(stderr, s.Name, "update_config.monitor is not supported") + } + if config.MaxFailureRatio != 0 { + warnServicef(stderr, s.Name, "update_config.max_failure_ratio is not supported") + } +} + +func warnForUnsupportedRestartPolicy(stderr io.Writer, s composetypes.ServiceConfig) { + policy := s.Deploy.RestartPolicy + if policy == nil { + return + } + + if policy.Delay != nil { + warnServicef(stderr, s.Name, "restart_policy.delay is ignored") + } + if policy.MaxAttempts != nil { + warnServicef(stderr, s.Name, "restart_policy.max_attempts is ignored") + } + if policy.Window != nil { + warnServicef(stderr, s.Name, "restart_policy.window is ignored") + } +} + +func warnForUnsupportedProperties(stderr io.Writer, s composetypes.ServiceConfig) { // nolint: gocyclo + if build := s.Build; build.Context != "" || build.Dockerfile != "" || len(build.Args) > 0 || len(build.Labels) > 0 || len(build.CacheFrom) > 0 || build.Network != "" || build.Target != "" { + warnServicef(stderr, s.Name, "build is ignored") + } + if s.CgroupParent != "" { + warnServicef(stderr, s.Name, "cgroup_parent is ignored") + } + if len(s.Devices) > 0 { + warnServicef(stderr, s.Name, "devices are ignored") + } + if s.DomainName != "" { + warnServicef(stderr, s.Name, "domainname is ignored") + } + if len(s.ExternalLinks) > 0 { + warnServicef(stderr, s.Name, "external_links are ignored") + } + if len(s.Links) > 0 { + warnServicef(stderr, s.Name, "links are ignored") + } + if s.MacAddress != "" { + warnServicef(stderr, s.Name, "mac_address is ignored") + } + if s.NetworkMode != "" { + warnServicef(stderr, s.Name, "network_mode is ignored") + } + if s.Restart != "" { + warnServicef(stderr, s.Name, "restart is ignored") + } + if len(s.SecurityOpt) > 0 { + warnServicef(stderr, s.Name, "security_opt are ignored") + } + if len(s.Ulimits) > 0 { + warnServicef(stderr, s.Name, "ulimits are ignored") + } + if len(s.DependsOn) > 0 { + warnServicef(stderr, s.Name, "depends_on are ignored") + } + if s.CredentialSpec.File != "" { + warnServicef(stderr, s.Name, "credential_spec is ignored") + } + if len(s.DNS) > 0 { + warnServicef(stderr, s.Name, "dns are ignored") + } + if len(s.DNSSearch) > 0 { + warnServicef(stderr, s.Name, "dns_search are ignored") + } + if len(s.EnvFile) > 0 { + warnServicef(stderr, s.Name, "env_file are ignored") + } + if s.StopSignal != "" { + warnServicef(stderr, s.Name, "stop_signal is ignored") + } + if s.Logging != nil { + warnServicef(stderr, s.Name, "logging is ignored") + } + for _, m := range s.Volumes { + if m.Volume != nil && m.Volume.NoCopy { + warnServicef(stderr, s.Name, "volume.nocopy is ignored") + } + if m.Bind != nil && m.Bind.Propagation != "" { + warnServicef(stderr, s.Name, "volume.propagation is ignored") + } + } +} diff --git a/cli/cli/command/stack/kubernetes/warnings_test.go b/cli/cli/command/stack/kubernetes/warnings_test.go new file mode 100644 index 00000000..bdb7bf9d --- /dev/null +++ b/cli/cli/command/stack/kubernetes/warnings_test.go @@ -0,0 +1,78 @@ +package kubernetes + +import ( + "bytes" + "testing" + "time" + + composetypes "github.com/docker/cli/cli/compose/types" + "gotest.tools/golden" +) + +func TestWarnings(t *testing.T) { + duration := 5 * time.Second + attempts := uint64(3) + config := &composetypes.Config{ + Version: "3.4", + Services: []composetypes.ServiceConfig{ + { + Name: "front", + Build: composetypes.BuildConfig{ + Context: "ignored", + }, + ContainerName: "ignored", + CgroupParent: "ignored", + CredentialSpec: composetypes.CredentialSpecConfig{File: "ignored"}, + DependsOn: []string{"ignored"}, + Deploy: composetypes.DeployConfig{ + UpdateConfig: &composetypes.UpdateConfig{ + Delay: 5 * time.Second, + FailureAction: "rollback", + Monitor: 10 * time.Second, + MaxFailureRatio: 0.5, + }, + RestartPolicy: &composetypes.RestartPolicy{ + Delay: &duration, + MaxAttempts: &attempts, + Window: &duration, + }, + }, + Devices: []string{"ignored"}, + DNSSearch: []string{"ignored"}, + DNS: []string{"ignored"}, + DomainName: "ignored", + EnvFile: []string{"ignored"}, + Expose: []string{"80"}, + ExternalLinks: []string{"ignored"}, + Image: "dockerdemos/front", + Links: []string{"ignored"}, + Logging: &composetypes.LoggingConfig{Driver: "syslog"}, + MacAddress: "ignored", + Networks: map[string]*composetypes.ServiceNetworkConfig{"private": {}}, + NetworkMode: "ignored", + Restart: "ignored", + SecurityOpt: []string{"ignored"}, + StopSignal: "ignored", + Ulimits: map[string]*composetypes.UlimitsConfig{"nproc": {Hard: 65535}}, + User: "ignored", + Volumes: []composetypes.ServiceVolumeConfig{ + { + Type: "bind", + Bind: &composetypes.ServiceVolumeBind{Propagation: "ignored"}, + }, + { + Type: "volume", + Volume: &composetypes.ServiceVolumeVolume{NoCopy: true}, + }, + }, + }, + }, + Networks: map[string]composetypes.NetworkConfig{ + "global": {}, + }, + } + var buf bytes.Buffer + warnUnsupportedFeatures(&buf, config) + warnings := buf.String() + golden.Assert(t, warnings, "warnings.golden") +} diff --git a/cli/cli/command/stack/kubernetes/watcher.go b/cli/cli/command/stack/kubernetes/watcher.go new file mode 100644 index 00000000..93d3358b --- /dev/null +++ b/cli/cli/command/stack/kubernetes/watcher.go @@ -0,0 +1,255 @@ +package kubernetes + +import ( + "context" + "sync" + "time" + + apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1" + "github.com/docker/cli/kubernetes/labels" + "github.com/pkg/errors" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + runtimeutil "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + podutils "k8s.io/kubernetes/pkg/api/v1/pod" +) + +type stackListWatch interface { + List(opts metav1.ListOptions) (*apiv1beta1.StackList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) +} + +type podListWatch interface { + List(opts metav1.ListOptions) (*apiv1.PodList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) +} + +// DeployWatcher watches a stack deployement +type deployWatcher struct { + pods podListWatch + stacks stackListWatch +} + +// Watch watches a stuck deployement and return a chan that will holds the state of the stack +func (w *deployWatcher) Watch(name string, serviceNames []string, statusUpdates chan serviceStatus) error { + errC := make(chan error, 1) + defer close(errC) + + handlers := runtimeutil.ErrorHandlers + + // informer errors are reported using global error handlers + runtimeutil.ErrorHandlers = append(handlers, func(err error) { + errC <- err + }) + defer func() { + runtimeutil.ErrorHandlers = handlers + }() + + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + defer func() { + cancel() + wg.Wait() + }() + wg.Add(2) + go func() { + defer wg.Done() + w.watchStackStatus(ctx, name, errC) + }() + go func() { + defer wg.Done() + w.waitForPods(ctx, name, serviceNames, errC, statusUpdates) + }() + + return <-errC +} + +type stackWatcher struct { + resultChan chan error + stackName string +} + +var _ cache.ResourceEventHandler = &stackWatcher{} + +func (sw *stackWatcher) OnAdd(obj interface{}) { + stack, ok := obj.(*apiv1beta1.Stack) + switch { + case !ok: + sw.resultChan <- errors.Errorf("stack %s has incorrect type", sw.stackName) + case stack.Status.Phase == apiv1beta1.StackFailure: + sw.resultChan <- errors.Errorf("stack %s failed with status %s: %s", sw.stackName, stack.Status.Phase, stack.Status.Message) + } +} + +func (sw *stackWatcher) OnUpdate(oldObj, newObj interface{}) { + sw.OnAdd(newObj) +} + +func (sw *stackWatcher) OnDelete(obj interface{}) { +} + +func (w *deployWatcher) watchStackStatus(ctx context.Context, stackname string, e chan error) { + informer := newStackInformer(w.stacks, stackname) + sw := &stackWatcher{ + resultChan: e, + } + informer.AddEventHandler(sw) + informer.Run(ctx.Done()) +} + +type serviceStatus struct { + name string + podsPending int + podsRunning int + podsSucceeded int + podsFailed int + podsUnknown int + podsReady int + podsTotal int +} + +type podWatcher struct { + stackName string + services map[string]serviceStatus + resultChan chan error + starts map[string]int32 + indexer cache.Indexer + statusUpdates chan serviceStatus +} + +var _ cache.ResourceEventHandler = &podWatcher{} + +func (pw *podWatcher) handlePod(obj interface{}) { + pod, ok := obj.(*apiv1.Pod) + if !ok { + pw.resultChan <- errors.Errorf("Pod has incorrect type in stack %s", pw.stackName) + return + } + serviceName := pod.Labels[labels.ForServiceName] + pw.updateServiceStatus(serviceName) + if pw.allReady() { + select { + case pw.resultChan <- nil: + default: + // result has already been reported, just don't block + } + } +} + +func (pw *podWatcher) updateServiceStatus(serviceName string) { + pods, _ := pw.indexer.ByIndex("byservice", serviceName) + status := serviceStatus{name: serviceName} + for _, obj := range pods { + if pod, ok := obj.(*apiv1.Pod); ok { + switch pod.Status.Phase { + case apiv1.PodPending: + status.podsPending++ + case apiv1.PodRunning: + status.podsRunning++ + case apiv1.PodSucceeded: + status.podsSucceeded++ + case apiv1.PodFailed: + status.podsFailed++ + case apiv1.PodUnknown: + status.podsUnknown++ + } + if podutils.IsPodReady(pod) { + status.podsReady++ + } + } + } + status.podsTotal = len(pods) + oldStatus := pw.services[serviceName] + if oldStatus != status { + pw.statusUpdates <- status + } + pw.services[serviceName] = status +} + +func (pw *podWatcher) allReady() bool { + for _, status := range pw.services { + if status.podsReady == 0 { + return false + } + } + return true +} + +func (pw *podWatcher) OnAdd(obj interface{}) { + pw.handlePod(obj) +} + +func (pw *podWatcher) OnUpdate(oldObj, newObj interface{}) { + pw.handlePod(newObj) +} + +func (pw *podWatcher) OnDelete(obj interface{}) { + pw.handlePod(obj) +} + +func (w *deployWatcher) waitForPods(ctx context.Context, stackName string, serviceNames []string, e chan error, statusUpdates chan serviceStatus) { + informer := newPodInformer(w.pods, stackName, cache.Indexers{ + "byservice": func(obj interface{}) ([]string, error) { + pod, ok := obj.(*apiv1.Pod) + if !ok { + return nil, errors.Errorf("Pod has incorrect type in stack %s", stackName) + } + return []string{pod.Labels[labels.ForServiceName]}, nil + }}) + services := map[string]serviceStatus{} + for _, name := range serviceNames { + services[name] = serviceStatus{name: name} + } + pw := &podWatcher{ + stackName: stackName, + services: services, + resultChan: e, + starts: map[string]int32{}, + indexer: informer.GetIndexer(), + statusUpdates: statusUpdates, + } + informer.AddEventHandler(pw) + informer.Run(ctx.Done()) +} + +func newPodInformer(podsClient podListWatch, stackName string, indexers cache.Indexers) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + options.IncludeUninitialized = true + return podsClient.List(options) + }, + + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + options.IncludeUninitialized = true + return podsClient.Watch(options) + }, + }, + &apiv1.Pod{}, + time.Second*5, + indexers, + ) +} + +func newStackInformer(stacksClient stackListWatch, stackName string) cache.SharedInformer { + return cache.NewSharedInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + return stacksClient.List(options) + }, + + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + return stacksClient.Watch(options) + }, + }, + &apiv1beta1.Stack{}, + time.Second*5, + ) +} diff --git a/cli/cli/command/stack/kubernetes/watcher_test.go b/cli/cli/command/stack/kubernetes/watcher_test.go new file mode 100644 index 00000000..84e51ea3 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/watcher_test.go @@ -0,0 +1,218 @@ +package kubernetes + +import ( + "testing" + + apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1" + composelabels "github.com/docker/cli/kubernetes/labels" + "gotest.tools/assert" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + k8stesting "k8s.io/client-go/testing" +) + +var podsResource = apiv1.SchemeGroupVersion.WithResource("pods") +var podKind = apiv1.SchemeGroupVersion.WithKind("Pod") +var stacksResource = apiv1beta1.SchemeGroupVersion.WithResource("stacks") +var stackKind = apiv1beta1.SchemeGroupVersion.WithKind("Stack") + +type testPodAndStackRepository struct { + fake *k8stesting.Fake +} + +func (r *testPodAndStackRepository) stackListWatchForNamespace(ns string) *testStackListWatch { + return &testStackListWatch{fake: r.fake, ns: ns} +} +func (r *testPodAndStackRepository) podListWatchForNamespace(ns string) *testPodListWatch { + return &testPodListWatch{fake: r.fake, ns: ns} +} + +func newTestPodAndStackRepository(initialPods []apiv1.Pod, initialStacks []apiv1beta1.Stack, podWatchHandler, stackWatchHandler k8stesting.WatchReactionFunc) *testPodAndStackRepository { + var scheme = runtime.NewScheme() + var codecs = serializer.NewCodecFactory(scheme) + metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + apiv1.AddToScheme(scheme) + apiv1beta1.AddToScheme(scheme) + + o := k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range initialPods { + if err := o.Add(&obj); err != nil { + panic(err) + } + } + for _, obj := range initialStacks { + if err := o.Add(&obj); err != nil { + panic(err) + } + } + fakePtr := &k8stesting.Fake{} + fakePtr.AddReactor("*", "*", k8stesting.ObjectReaction(o)) + if podWatchHandler != nil { + fakePtr.AddWatchReactor(podsResource.Resource, podWatchHandler) + } + if stackWatchHandler != nil { + fakePtr.AddWatchReactor(stacksResource.Resource, stackWatchHandler) + } + fakePtr.AddWatchReactor("*", k8stesting.DefaultWatchReactor(watch.NewFake(), nil)) + return &testPodAndStackRepository{fake: fakePtr} +} + +type testStackListWatch struct { + fake *k8stesting.Fake + ns string +} + +func (s *testStackListWatch) List(opts metav1.ListOptions) (*apiv1beta1.StackList, error) { + obj, err := s.fake.Invokes(k8stesting.NewListAction(stacksResource, stackKind, s.ns, opts), &apiv1beta1.StackList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := k8stesting.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apiv1beta1.StackList{} + for _, item := range obj.(*apiv1beta1.StackList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} +func (s *testStackListWatch) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return s.fake.InvokesWatch(k8stesting.NewWatchAction(stacksResource, s.ns, opts)) +} + +type testPodListWatch struct { + fake *k8stesting.Fake + ns string +} + +func (p *testPodListWatch) List(opts metav1.ListOptions) (*apiv1.PodList, error) { + obj, err := p.fake.Invokes(k8stesting.NewListAction(podsResource, podKind, p.ns, opts), &apiv1.PodList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := k8stesting.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apiv1.PodList{} + for _, item := range obj.(*apiv1.PodList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err + +} +func (p *testPodListWatch) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return p.fake.InvokesWatch(k8stesting.NewWatchAction(podsResource, p.ns, opts)) +} + +func TestDeployWatchOk(t *testing.T) { + stack := apiv1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{Name: "test-stack", Namespace: "test-ns"}, + } + + serviceNames := []string{"svc1", "svc2"} + testRepo := newTestPodAndStackRepository(nil, []apiv1beta1.Stack{stack}, func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { + res := watch.NewFake() + go func() { + pod1 := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + Namespace: "test-ns", + Labels: composelabels.ForService("test-stack", "svc1"), + }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodRunning, + Conditions: []apiv1.PodCondition{ + { + Type: apiv1.PodReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + } + pod2 := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + Namespace: "test-ns", + Labels: composelabels.ForService("test-stack", "svc2"), + }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodRunning, + Conditions: []apiv1.PodCondition{ + { + Type: apiv1.PodReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + } + res.Add(pod1) + res.Add(pod2) + }() + + return true, res, nil + }, nil) + + testee := &deployWatcher{ + stacks: testRepo.stackListWatchForNamespace("test-ns"), + pods: testRepo.podListWatchForNamespace("test-ns"), + } + + statusUpdates := make(chan serviceStatus) + go func() { + for range statusUpdates { + } + }() + defer close(statusUpdates) + err := testee.Watch(stack.Name, serviceNames, statusUpdates) + assert.NilError(t, err) +} + +func TestDeployReconcileFailure(t *testing.T) { + stack := apiv1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{Name: "test-stack", Namespace: "test-ns"}, + } + + serviceNames := []string{"svc1", "svc2"} + testRepo := newTestPodAndStackRepository(nil, []apiv1beta1.Stack{stack}, nil, func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { + res := watch.NewFake() + go func() { + sfailed := stack + sfailed.Status = apiv1beta1.StackStatus{ + Phase: apiv1beta1.StackFailure, + Message: "test error", + } + res.Modify(&sfailed) + }() + + return true, res, nil + }) + + testee := &deployWatcher{ + stacks: testRepo.stackListWatchForNamespace("test-ns"), + pods: testRepo.podListWatchForNamespace("test-ns"), + } + + statusUpdates := make(chan serviceStatus) + go func() { + for range statusUpdates { + } + }() + defer close(statusUpdates) + err := testee.Watch(stack.Name, serviceNames, statusUpdates) + assert.ErrorContains(t, err, "Failure: test error") +} diff --git a/cli/cli/command/stack/list.go b/cli/cli/command/stack/list.go new file mode 100644 index 00000000..a0aa3d1c --- /dev/null +++ b/cli/cli/command/stack/list.go @@ -0,0 +1,79 @@ +package stack + +import ( + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.List{} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List stacks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(cmd, dockerCli, opts, common.orchestrator) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Format, "format", "", "Pretty-print stacks using a Go template") + flags.StringSliceVar(&opts.Namespaces, "namespace", []string{}, "Kubernetes namespaces to use") + flags.SetAnnotation("namespace", "kubernetes", nil) + flags.BoolVarP(&opts.AllNamespaces, "all-namespaces", "", false, "List stacks from all Kubernetes namespaces") + flags.SetAnnotation("all-namespaces", "kubernetes", nil) + return cmd +} + +func runList(cmd *cobra.Command, dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator) error { + stacks := []*formatter.Stack{} + if orchestrator.HasSwarm() { + ss, err := swarm.GetStacks(dockerCli) + if err != nil { + return err + } + stacks = append(stacks, ss...) + } + if orchestrator.HasKubernetes() { + kubeCli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), orchestrator)) + if err != nil { + return err + } + ss, err := kubernetes.GetStacks(kubeCli, opts) + if err != nil { + return err + } + stacks = append(stacks, ss...) + } + return format(dockerCli, opts, orchestrator, stacks) +} + +func format(dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator, stacks []*formatter.Stack) error { + format := opts.Format + if format == "" || format == formatter.TableFormatKey { + format = formatter.SwarmStackTableFormat + if orchestrator.HasKubernetes() { + format = formatter.KubernetesStackTableFormat + } + } + stackCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.Format(format), + } + sort.Slice(stacks, func(i, j int) bool { + return sortorder.NaturalLess(stacks[i].Name, stacks[j].Name) || + !sortorder.NaturalLess(stacks[j].Name, stacks[i].Name) && + sortorder.NaturalLess(stacks[j].Namespace, stacks[i].Namespace) + }) + return formatter.StackWrite(stackCtx, stacks) +} diff --git a/cli/cli/command/stack/list_test.go b/cli/cli/command/stack/list_test.go new file mode 100644 index 00000000..e646fdbe --- /dev/null +++ b/cli/cli/command/stack/list_test.go @@ -0,0 +1,152 @@ +package stack + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" +) + +var ( + orchestrator = commonOptions{orchestrator: command.OrchestratorSwarm} +) + +func TestListErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{}, errors.Errorf("error getting services") + }, + expectedError: "error getting services", + }, + { + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + expectedError: "cannot get label", + }, + } + + for _, tc := range testCases { + cmd := newListCommand(test.NewFakeCli(&fakeClient{ + serviceListFunc: tc.serviceListFunc, + }), &orchestrator) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestListWithFormat(t *testing.T) { + cli := test.NewFakeCli( + &fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-foo", + }), + )}, nil + }, + }) + cmd := newListCommand(cli, &orchestrator) + cmd.Flags().Set("format", "{{ .Name }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-list-with-format.golden") +} + +func TestListWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-foo", + }), + )}, nil + }, + }) + cmd := newListCommand(cli, &orchestrator) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-list-without-format.golden") +} + +func TestListOrder(t *testing.T) { + usecases := []struct { + golden string + swarmServices []swarm.Service + }{ + { + golden: "stack-list-sort.golden", + swarmServices: []swarm.Service{ + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-foo", + }), + ), + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-bar", + }), + ), + }, + }, + { + golden: "stack-list-sort-natural.golden", + swarmServices: []swarm.Service{ + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-1-foo", + }), + ), + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-10-foo", + }), + ), + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": "service-name-2-foo", + }), + ), + }, + }, + } + + for _, uc := range usecases { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return uc.swarmServices, nil + }, + }) + cmd := newListCommand(cli, &orchestrator) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), uc.golden) + } +} diff --git a/cli/cli/command/stack/loader/loader.go b/cli/cli/command/stack/loader/loader.go new file mode 100644 index 00000000..b4790945 --- /dev/null +++ b/cli/cli/command/stack/loader/loader.go @@ -0,0 +1,152 @@ +package loader + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/cli/compose/schema" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/pkg/errors" +) + +// LoadComposefile parse the composefile specified in the cli and returns its Config and version. +func LoadComposefile(dockerCli command.Cli, opts options.Deploy) (*composetypes.Config, error) { + configDetails, err := getConfigDetails(opts.Composefiles, dockerCli.In()) + if err != nil { + return nil, err + } + + dicts := getDictsFrom(configDetails.ConfigFiles) + config, err := loader.Load(configDetails) + if err != nil { + if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { + return nil, errors.Errorf("Compose file contains unsupported options:\n\n%s\n", + propertyWarnings(fpe.Properties)) + } + + return nil, err + } + + unsupportedProperties := loader.GetUnsupportedProperties(dicts...) + if len(unsupportedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", + strings.Join(unsupportedProperties, ", ")) + } + + deprecatedProperties := loader.GetDeprecatedProperties(dicts...) + if len(deprecatedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", + propertyWarnings(deprecatedProperties)) + } + return config, nil +} + +func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]interface{} { + dicts := []map[string]interface{}{} + + for _, configFile := range configFiles { + dicts = append(dicts, configFile.Config) + } + + return dicts +} + +func propertyWarnings(properties map[string]string) string { + var msgs []string + for name, description := range properties { + msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) + } + sort.Strings(msgs) + return strings.Join(msgs, "\n\n") +} + +func getConfigDetails(composefiles []string, stdin io.Reader) (composetypes.ConfigDetails, error) { + var details composetypes.ConfigDetails + + if len(composefiles) == 0 { + return details, errors.New("no composefile(s)") + } + + if composefiles[0] == "-" && len(composefiles) == 1 { + workingDir, err := os.Getwd() + if err != nil { + return details, err + } + details.WorkingDir = workingDir + } else { + absPath, err := filepath.Abs(composefiles[0]) + if err != nil { + return details, err + } + details.WorkingDir = filepath.Dir(absPath) + } + + var err error + details.ConfigFiles, err = loadConfigFiles(composefiles, stdin) + if err != nil { + return details, err + } + // Take the first file version (2 files can't have different version) + details.Version = schema.Version(details.ConfigFiles[0].Config) + details.Environment, err = buildEnvironment(os.Environ()) + return details, err +} + +func buildEnvironment(env []string) (map[string]string, error) { + result := make(map[string]string, len(env)) + for _, s := range env { + // if value is empty, s is like "K=", not "K". + if !strings.Contains(s, "=") { + return result, errors.Errorf("unexpected environment %q", s) + } + kv := strings.SplitN(s, "=", 2) + result[kv[0]] = kv[1] + } + return result, nil +} + +func loadConfigFiles(filenames []string, stdin io.Reader) ([]composetypes.ConfigFile, error) { + var configFiles []composetypes.ConfigFile + + for _, filename := range filenames { + configFile, err := loadConfigFile(filename, stdin) + if err != nil { + return configFiles, err + } + configFiles = append(configFiles, *configFile) + } + + return configFiles, nil +} + +func loadConfigFile(filename string, stdin io.Reader) (*composetypes.ConfigFile, error) { + var bytes []byte + var err error + + if filename == "-" { + bytes, err = ioutil.ReadAll(stdin) + } else { + bytes, err = ioutil.ReadFile(filename) + } + if err != nil { + return nil, err + } + + config, err := loader.ParseYAML(bytes) + if err != nil { + return nil, err + } + + return &composetypes.ConfigFile{ + Filename: filename, + Config: config, + }, nil +} diff --git a/cli/cli/command/stack/loader/loader_test.go b/cli/cli/command/stack/loader/loader_test.go new file mode 100644 index 00000000..de524cc5 --- /dev/null +++ b/cli/cli/command/stack/loader/loader_test.go @@ -0,0 +1,47 @@ +package loader + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestGetConfigDetails(t *testing.T) { + content := ` +version: "3.0" +services: + foo: + image: alpine:3.5 +` + file := fs.NewFile(t, "test-get-config-details", fs.WithContent(content)) + defer file.Remove() + + details, err := getConfigDetails([]string{file.Path()}, nil) + assert.NilError(t, err) + assert.Check(t, is.Equal(filepath.Dir(file.Path()), details.WorkingDir)) + assert.Assert(t, is.Len(details.ConfigFiles, 1)) + assert.Check(t, is.Equal("3.0", details.ConfigFiles[0].Config["version"])) + assert.Check(t, is.Len(details.Environment, len(os.Environ()))) +} + +func TestGetConfigDetailsStdin(t *testing.T) { + content := ` +version: "3.0" +services: + foo: + image: alpine:3.5 +` + details, err := getConfigDetails([]string{"-"}, strings.NewReader(content)) + assert.NilError(t, err) + cwd, err := os.Getwd() + assert.NilError(t, err) + assert.Check(t, is.Equal(cwd, details.WorkingDir)) + assert.Assert(t, is.Len(details.ConfigFiles, 1)) + assert.Check(t, is.Equal("3.0", details.ConfigFiles[0].Config["version"])) + assert.Check(t, is.Len(details.Environment, len(os.Environ()))) +} diff --git a/cli/cli/command/stack/options/opts.go b/cli/cli/command/stack/options/opts.go new file mode 100644 index 00000000..afcecd99 --- /dev/null +++ b/cli/cli/command/stack/options/opts.go @@ -0,0 +1,43 @@ +package options + +import "github.com/docker/cli/opts" + +// Deploy holds docker stack deploy options +type Deploy struct { + Bundlefile string + Composefiles []string + Namespace string + ResolveImage string + SendRegistryAuth bool + Prune bool +} + +// List holds docker stack ls options +type List struct { + Format string + AllNamespaces bool + Namespaces []string +} + +// PS holds docker stack ps options +type PS struct { + Filter opts.FilterOpt + NoTrunc bool + Namespace string + NoResolve bool + Quiet bool + Format string +} + +// Remove holds docker stack remove options +type Remove struct { + Namespaces []string +} + +// Services holds docker stack services options +type Services struct { + Quiet bool + Format string + Filter opts.FilterOpt + Namespace string +} diff --git a/cli/cli/command/stack/ps.go b/cli/cli/command/stack/ps.go new file mode 100644 index 00000000..4d621535 --- /dev/null +++ b/cli/cli/command/stack/ps.go @@ -0,0 +1,48 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + cliopts "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +func newPsCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.PS{Filter: cliopts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] STACK", + Short: "List the tasks in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + switch { + case common.orchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case common.orchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), common.orchestrator)) + if err != nil { + return err + } + return kubernetes.RunPS(kli, opts) + default: + return swarm.RunPS(dockerCli, opts) + } + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.NoTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.NoResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") + flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display task IDs") + flags.StringVar(&opts.Format, "format", "", "Pretty-print tasks using a Go template") + kubernetes.AddNamespaceFlag(flags) + return cmd +} diff --git a/cli/cli/command/stack/ps_test.go b/cli/cli/command/stack/ps_test.go new file mode 100644 index 00000000..39dd3c85 --- /dev/null +++ b/cli/cli/command/stack/ps_test.go @@ -0,0 +1,171 @@ +package stack + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestStackPsErrors(t *testing.T) { + testCases := []struct { + args []string + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + expectedError string + }{ + + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"foo", "bar"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"foo"}, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return nil, errors.Errorf("error getting tasks") + }, + expectedError: "error getting tasks", + }, + } + + for _, tc := range testCases { + cmd := newPsCommand(test.NewFakeCli(&fakeClient{ + taskListFunc: tc.taskListFunc, + }), &orchestrator) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestRunPSWithEmptyName(t *testing.T) { + cmd := newPsCommand(test.NewFakeCli(&fakeClient{}), &orchestrator) + cmd.SetArgs([]string{"' '"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} + +func TestStackPsEmptyStack(t *testing.T) { + fakeCli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{}, nil + }, + }) + cmd := newPsCommand(fakeCli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.SetOutput(ioutil.Discard) + + assert.Error(t, cmd.Execute(), "nothing found in stack: foo") + assert.Check(t, is.Equal("", fakeCli.OutBuffer().String())) +} + +func TestStackPsWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskID("id-foo"))}, nil + }, + }) + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-quiet-option.golden") + +} + +func TestStackPsWithNoTruncOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskID("xn4cypcov06f2w8gsbaf2lst3"))}, nil + }, + }) + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.Flags().Set("no-trunc", "true") + cmd.Flags().Set("format", "{{ .ID }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-no-trunc-option.golden") +} + +func TestStackPsWithNoResolveOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task( + TaskNodeID("id-node-foo"), + )}, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + }) + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.Flags().Set("no-resolve", "true") + cmd.Flags().Set("format", "{{ .Node }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-no-resolve-option.golden") +} + +func TestStackPsWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil + }, + }) + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.Flags().Set("format", "{{ .Name }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-format.golden") +} + +func TestStackPsWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + TasksFormat: "{{ .Name }}", + }) + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-config-format.golden") +} + +func TestStackPsWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task( + TaskID("id-foo"), + TaskServiceID("service-id-foo"), + TaskNodeID("id-node"), + WithTaskSpec(TaskImage("myimage:mytag")), + TaskDesiredState(swarm.TaskStateReady), + WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))), + )}, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + }) + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-ps-without-format.golden") +} diff --git a/cli/cli/command/stack/remove.go b/cli/cli/command/stack/remove.go new file mode 100644 index 00000000..737713e4 --- /dev/null +++ b/cli/cli/command/stack/remove.go @@ -0,0 +1,43 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + var opts options.Remove + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] STACK [STACK...]", + Aliases: []string{"remove", "down"}, + Short: "Remove one or more stacks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespaces = args + if err := validateStackNames(opts.Namespaces); err != nil { + return err + } + + switch { + case common.orchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case common.orchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), common.orchestrator)) + if err != nil { + return err + } + return kubernetes.RunRemove(kli, opts) + default: + return swarm.RunRemove(dockerCli, opts) + } + }, + } + flags := cmd.Flags() + kubernetes.AddNamespaceFlag(flags) + return cmd +} diff --git a/cli/cli/command/stack/remove_test.go b/cli/cli/command/stack/remove_test.go new file mode 100644 index 00000000..c7032d84 --- /dev/null +++ b/cli/cli/command/stack/remove_test.go @@ -0,0 +1,166 @@ +package stack + +import ( + "errors" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func fakeClientForRemoveStackTest(version string) *fakeClient { + allServices := []string{ + objectName("foo", "service1"), + objectName("foo", "service2"), + objectName("bar", "service1"), + objectName("bar", "service2"), + } + allNetworks := []string{ + objectName("foo", "network1"), + objectName("bar", "network1"), + } + allSecrets := []string{ + objectName("foo", "secret1"), + objectName("foo", "secret2"), + objectName("bar", "secret1"), + } + allConfigs := []string{ + objectName("foo", "config1"), + objectName("foo", "config2"), + objectName("bar", "config1"), + } + return &fakeClient{ + version: version, + services: allServices, + networks: allNetworks, + secrets: allSecrets, + configs: allConfigs, + } +} + +func TestRemoveWithEmptyName(t *testing.T) { + cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{}), &orchestrator) + cmd.SetArgs([]string{"good", "' '", "alsogood"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} + +func TestRemoveStackVersion124DoesNotRemoveConfigsOrSecrets(t *testing.T) { + client := fakeClientForRemoveStackTest("1.24") + cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.services), client.removedServices)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.networks), client.removedNetworks)) + assert.Check(t, is.Len(client.removedSecrets, 0)) + assert.Check(t, is.Len(client.removedConfigs, 0)) +} + +func TestRemoveStackVersion125DoesNotRemoveConfigs(t *testing.T) { + client := fakeClientForRemoveStackTest("1.25") + cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.services), client.removedServices)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.networks), client.removedNetworks)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.secrets), client.removedSecrets)) + assert.Check(t, is.Len(client.removedConfigs, 0)) +} + +func TestRemoveStackVersion130RemovesEverything(t *testing.T) { + client := fakeClientForRemoveStackTest("1.30") + cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.services), client.removedServices)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.networks), client.removedNetworks)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.secrets), client.removedSecrets)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.configs), client.removedConfigs)) +} + +func TestRemoveStackSkipEmpty(t *testing.T) { + allServices := []string{objectName("bar", "service1"), objectName("bar", "service2")} + allServiceIDs := buildObjectIDs(allServices) + + allNetworks := []string{objectName("bar", "network1")} + allNetworkIDs := buildObjectIDs(allNetworks) + + allSecrets := []string{objectName("bar", "secret1")} + allSecretIDs := buildObjectIDs(allSecrets) + + allConfigs := []string{objectName("bar", "config1")} + allConfigIDs := buildObjectIDs(allConfigs) + + fakeClient := &fakeClient{ + version: "1.30", + services: allServices, + networks: allNetworks, + secrets: allSecrets, + configs: allConfigs, + } + fakeCli := test.NewFakeCli(fakeClient) + cmd := newRemoveCommand(fakeCli, &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + expectedList := []string{"Removing service bar_service1", + "Removing service bar_service2", + "Removing secret bar_secret1", + "Removing config bar_config1", + "Removing network bar_network1\n", + } + assert.Check(t, is.Equal(strings.Join(expectedList, "\n"), fakeCli.OutBuffer().String())) + assert.Check(t, is.Contains(fakeCli.ErrBuffer().String(), "Nothing found in stack: foo\n")) + assert.Check(t, is.DeepEqual(allServiceIDs, fakeClient.removedServices)) + assert.Check(t, is.DeepEqual(allNetworkIDs, fakeClient.removedNetworks)) + assert.Check(t, is.DeepEqual(allSecretIDs, fakeClient.removedSecrets)) + assert.Check(t, is.DeepEqual(allConfigIDs, fakeClient.removedConfigs)) +} + +func TestRemoveContinueAfterError(t *testing.T) { + allServices := []string{objectName("foo", "service1"), objectName("bar", "service1")} + allServiceIDs := buildObjectIDs(allServices) + + allNetworks := []string{objectName("foo", "network1"), objectName("bar", "network1")} + allNetworkIDs := buildObjectIDs(allNetworks) + + allSecrets := []string{objectName("foo", "secret1"), objectName("bar", "secret1")} + allSecretIDs := buildObjectIDs(allSecrets) + + allConfigs := []string{objectName("foo", "config1"), objectName("bar", "config1")} + allConfigIDs := buildObjectIDs(allConfigs) + + removedServices := []string{} + cli := &fakeClient{ + version: "1.30", + services: allServices, + networks: allNetworks, + secrets: allSecrets, + configs: allConfigs, + + serviceRemoveFunc: func(serviceID string) error { + removedServices = append(removedServices, serviceID) + + if strings.Contains(serviceID, "foo") { + return errors.New("") + } + return nil + }, + } + cmd := newRemoveCommand(test.NewFakeCli(cli), &orchestrator) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.Error(t, cmd.Execute(), "Failed to remove some resources from stack: foo") + assert.Check(t, is.DeepEqual(allServiceIDs, removedServices)) + assert.Check(t, is.DeepEqual(allNetworkIDs, cli.removedNetworks)) + assert.Check(t, is.DeepEqual(allSecretIDs, cli.removedSecrets)) + assert.Check(t, is.DeepEqual(allConfigIDs, cli.removedConfigs)) +} diff --git a/cli/cli/command/stack/services.go b/cli/cli/command/stack/services.go new file mode 100644 index 00000000..ca6d42c2 --- /dev/null +++ b/cli/cli/command/stack/services.go @@ -0,0 +1,46 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + cliopts "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +func newServicesCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.Services{Filter: cliopts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "services [OPTIONS] STACK", + Short: "List the services in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + switch { + case common.orchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case common.orchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), common.orchestrator)) + if err != nil { + return err + } + return kubernetes.RunServices(kli, opts) + default: + return swarm.RunServices(dockerCli, opts) + } + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&opts.Format, "format", "", "Pretty-print services using a Go template") + flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") + kubernetes.AddNamespaceFlag(flags) + return cmd +} diff --git a/cli/cli/command/stack/services_test.go b/cli/cli/command/stack/services_test.go new file mode 100644 index 00000000..64a58b99 --- /dev/null +++ b/cli/cli/command/stack/services_test.go @@ -0,0 +1,170 @@ +package stack + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestStackServicesErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + nodeListFunc func(options types.NodeListOptions) ([]swarm.Node, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + expectedError string + }{ + { + args: []string{"foo"}, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return nil, errors.Errorf("error getting services") + }, + expectedError: "error getting services", + }, + { + args: []string{"foo"}, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + nodeListFunc: func(options types.NodeListOptions) ([]swarm.Node, error) { + return nil, errors.Errorf("error getting nodes") + }, + expectedError: "error getting nodes", + }, + { + args: []string{"foo"}, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return nil, errors.Errorf("error getting tasks") + }, + expectedError: "error getting tasks", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + expectedError: "Template parsing error", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: tc.serviceListFunc, + nodeListFunc: tc.nodeListFunc, + taskListFunc: tc.taskListFunc, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestRunServicesWithEmptyName(t *testing.T) { + cmd := newServicesCommand(test.NewFakeCli(&fakeClient{}), &orchestrator) + cmd.SetArgs([]string{"' '"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} + +func TestStackServicesEmptyServiceList(t *testing.T) { + fakeCli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{}, nil + }, + }) + cmd := newServicesCommand(fakeCli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("", fakeCli.OutBuffer().String())) + assert.Check(t, is.Equal("Nothing found in stack: foo\n", fakeCli.ErrBuffer().String())) +} + +func TestStackServicesWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service(ServiceID("id-foo"))}, nil + }, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.Flags().Set("quiet", "true") + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-quiet-option.golden") +} + +func TestStackServicesWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + *Service(ServiceName("service-name-foo")), + }, nil + }, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.Flags().Set("format", "{{ .Name }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-format.golden") +} + +func TestStackServicesWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + *Service(ServiceName("service-name-foo")), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + ServicesFormat: "{{ .Name }}", + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-config-format.golden") +} + +func TestStackServicesWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service( + ServiceName("name-foo"), + ServiceID("id-foo"), + ReplicatedService(2), + ServiceImage("busybox:latest"), + ServicePort(swarm.PortConfig{ + PublishMode: swarm.PortConfigPublishModeIngress, + PublishedPort: 0, + TargetPort: 3232, + Protocol: swarm.PortConfigProtocolTCP, + }), + )}, nil + }, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-without-format.golden") +} diff --git a/cli/cli/command/stack/swarm/client_test.go b/cli/cli/command/stack/swarm/client_test.go new file mode 100644 index 00000000..7f9375e9 --- /dev/null +++ b/cli/cli/command/stack/swarm/client_test.go @@ -0,0 +1,239 @@ +package swarm + +import ( + "context" + "strings" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + + version string + + services []string + networks []string + secrets []string + configs []string + + removedServices []string + removedNetworks []string + removedSecrets []string + removedConfigs []string + + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + networkListFunc func(options types.NetworkListOptions) ([]types.NetworkResource, error) + secretListFunc func(options types.SecretListOptions) ([]swarm.Secret, error) + configListFunc func(options types.ConfigListOptions) ([]swarm.Config, error) + nodeListFunc func(options types.NodeListOptions) ([]swarm.Node, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + nodeInspectWithRaw func(ref string) (swarm.Node, []byte, error) + + serviceUpdateFunc func(serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + + serviceRemoveFunc func(serviceID string) error + networkRemoveFunc func(networkID string) error + secretRemoveFunc func(secretID string) error + configRemoveFunc func(configID string) error +} + +func (cli *fakeClient) ServerVersion(ctx context.Context) (types.Version, error) { + return types.Version{ + Version: "docker-dev", + APIVersion: api.DefaultVersion, + }, nil +} + +func (cli *fakeClient) ClientVersion() string { + return cli.version +} + +func (cli *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + if cli.serviceListFunc != nil { + return cli.serviceListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + servicesList := []swarm.Service{} + for _, name := range cli.services { + if belongToNamespace(name, namespace) { + servicesList = append(servicesList, serviceFromName(name)) + } + } + return servicesList, nil +} + +func (cli *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + if cli.networkListFunc != nil { + return cli.networkListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + networksList := []types.NetworkResource{} + for _, name := range cli.networks { + if belongToNamespace(name, namespace) { + networksList = append(networksList, networkFromName(name)) + } + } + return networksList, nil +} + +func (cli *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if cli.secretListFunc != nil { + return cli.secretListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + secretsList := []swarm.Secret{} + for _, name := range cli.secrets { + if belongToNamespace(name, namespace) { + secretsList = append(secretsList, secretFromName(name)) + } + } + return secretsList, nil +} + +func (cli *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if cli.configListFunc != nil { + return cli.configListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + configsList := []swarm.Config{} + for _, name := range cli.configs { + if belongToNamespace(name, namespace) { + configsList = append(configsList, configFromName(name)) + } + } + return configsList, nil +} + +func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if cli.taskListFunc != nil { + return cli.taskListFunc(options) + } + return []swarm.Task{}, nil +} + +func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + if cli.nodeListFunc != nil { + return cli.nodeListFunc(options) + } + return []swarm.Node{}, nil +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectWithRaw != nil { + return cli.nodeInspectWithRaw(ref) + } + return swarm.Node{}, nil, nil +} + +func (cli *fakeClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + if cli.serviceUpdateFunc != nil { + return cli.serviceUpdateFunc(serviceID, version, service, options) + } + + return types.ServiceUpdateResponse{}, nil +} + +func (cli *fakeClient) ServiceRemove(ctx context.Context, serviceID string) error { + if cli.serviceRemoveFunc != nil { + return cli.serviceRemoveFunc(serviceID) + } + + cli.removedServices = append(cli.removedServices, serviceID) + return nil +} + +func (cli *fakeClient) NetworkRemove(ctx context.Context, networkID string) error { + if cli.networkRemoveFunc != nil { + return cli.networkRemoveFunc(networkID) + } + + cli.removedNetworks = append(cli.removedNetworks, networkID) + return nil +} + +func (cli *fakeClient) SecretRemove(ctx context.Context, secretID string) error { + if cli.secretRemoveFunc != nil { + return cli.secretRemoveFunc(secretID) + } + + cli.removedSecrets = append(cli.removedSecrets, secretID) + return nil +} + +func (cli *fakeClient) ConfigRemove(ctx context.Context, configID string) error { + if cli.configRemoveFunc != nil { + return cli.configRemoveFunc(configID) + } + + cli.removedConfigs = append(cli.removedConfigs, configID) + return nil +} + +func serviceFromName(name string) swarm.Service { + return swarm.Service{ + ID: "ID-" + name, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func networkFromName(name string) types.NetworkResource { + return types.NetworkResource{ + ID: "ID-" + name, + Name: name, + } +} + +func secretFromName(name string) swarm.Secret { + return swarm.Secret{ + ID: "ID-" + name, + Spec: swarm.SecretSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func configFromName(name string) swarm.Config { + return swarm.Config{ + ID: "ID-" + name, + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func namespaceFromFilters(filters filters.Args) string { + label := filters.Get("label")[0] + return strings.TrimPrefix(label, convert.LabelNamespace+"=") +} + +func belongToNamespace(id, namespace string) bool { + return strings.HasPrefix(id, namespace+"_") +} + +func objectName(namespace, name string) string { + return namespace + "_" + name +} + +func objectID(name string) string { + return "ID-" + name +} + +func buildObjectIDs(objectNames []string) []string { + IDs := make([]string, len(objectNames)) + for i, name := range objectNames { + IDs[i] = objectID(name) + } + return IDs +} diff --git a/cli/cli/command/stack/swarm/common.go b/cli/cli/command/stack/swarm/common.go new file mode 100644 index 00000000..b4193df3 --- /dev/null +++ b/cli/cli/command/stack/swarm/common.go @@ -0,0 +1,50 @@ +package swarm + +import ( + "context" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +func getStackFilter(namespace string) filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getStackServiceFilter(namespace string) filters.Args { + return getStackFilter(namespace) +} + +func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { + filter := opt.Value() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getAllStacksFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace) + return filter +} + +func getStackServices(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Service, error) { + return apiclient.ServiceList(ctx, types.ServiceListOptions{Filters: getStackServiceFilter(namespace)}) +} + +func getStackNetworks(ctx context.Context, apiclient client.APIClient, namespace string) ([]types.NetworkResource, error) { + return apiclient.NetworkList(ctx, types.NetworkListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackSecrets(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Secret, error) { + return apiclient.SecretList(ctx, types.SecretListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackConfigs(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Config, error) { + return apiclient.ConfigList(ctx, types.ConfigListOptions{Filters: getStackFilter(namespace)}) +} diff --git a/cli/cli/command/stack/swarm/deploy.go b/cli/cli/command/stack/swarm/deploy.go new file mode 100644 index 00000000..d11c328f --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy.go @@ -0,0 +1,80 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/convert" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// Resolve image constants +const ( + defaultNetworkDriver = "overlay" + ResolveImageAlways = "always" + ResolveImageChanged = "changed" + ResolveImageNever = "never" +) + +// RunDeploy is the swarm implementation of docker stack deploy +func RunDeploy(dockerCli command.Cli, opts options.Deploy, cfg *composetypes.Config) error { + ctx := context.Background() + + if err := validateResolveImageFlag(dockerCli, &opts); err != nil { + return err + } + + return deployCompose(ctx, dockerCli, opts, cfg) +} + +// validateResolveImageFlag validates the opts.resolveImage command line option +// and also turns image resolution off if the version is older than 1.30 +func validateResolveImageFlag(dockerCli command.Cli, opts *options.Deploy) error { + if opts.ResolveImage != ResolveImageAlways && opts.ResolveImage != ResolveImageChanged && opts.ResolveImage != ResolveImageNever { + return errors.Errorf("Invalid option %s for flag --resolve-image", opts.ResolveImage) + } + // client side image resolution should not be done when the supported + // server version is older than 1.30 + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") { + opts.ResolveImage = ResolveImageNever + } + return nil +} + +// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is +// a swarm manager. This is necessary because we must create networks before we +// create services, but the API call for creating a network does not return a +// proper status code when it can't create a network in the "global" scope. +func checkDaemonIsSwarmManager(ctx context.Context, dockerCli command.Cli) error { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if !info.Swarm.ControlAvailable { + return errors.New("this node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again") + } + return nil +} + +// pruneServices removes services that are no longer referenced in the source +func pruneServices(ctx context.Context, dockerCli command.Cli, namespace convert.Namespace, services map[string]struct{}) { + client := dockerCli.Client() + + oldServices, err := getStackServices(ctx, client, namespace.Name()) + if err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to list services: %s\n", err) + } + + pruneServices := []swarm.Service{} + for _, service := range oldServices { + if _, exists := services[namespace.Descope(service.Spec.Name)]; !exists { + pruneServices = append(pruneServices, service) + } + } + removeServices(ctx, dockerCli, pruneServices) +} diff --git a/cli/cli/command/stack/swarm/deploy_bundlefile.go b/cli/cli/command/stack/swarm/deploy_bundlefile.go new file mode 100644 index 00000000..8db6f66b --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_bundlefile.go @@ -0,0 +1,124 @@ +package swarm + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/bundlefile" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" +) + +// DeployBundle deploy a bundlefile (dab) on a swarm. +func DeployBundle(ctx context.Context, dockerCli command.Cli, opts options.Deploy) error { + bundle, err := loadBundlefile(dockerCli.Err(), opts.Namespace, opts.Bundlefile) + if err != nil { + return err + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.Namespace) + + if opts.Prune { + services := map[string]struct{}{} + for service := range bundle.Services { + services[service] = struct{}{} + } + pruneServices(ctx, dockerCli, namespace, services) + } + + networks := make(map[string]types.NetworkCreate) + for _, service := range bundle.Services { + for _, networkName := range service.Networks { + networks[namespace.Scope(networkName)] = types.NetworkCreate{ + Labels: convert.AddStackLabel(namespace, nil), + } + } + } + + services := make(map[string]swarm.ServiceSpec) + for internalName, service := range bundle.Services { + name := namespace.Scope(internalName) + + var ports []swarm.PortConfig + for _, portSpec := range service.Ports { + ports = append(ports, swarm.PortConfig{ + Protocol: swarm.PortConfigProtocol(portSpec.Protocol), + TargetPort: portSpec.Port, + }) + } + + nets := []swarm.NetworkAttachmentConfig{} + for _, networkName := range service.Networks { + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: namespace.Scope(networkName), + Aliases: []string{internalName}, + }) + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: convert.AddStackLabel(namespace, service.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: service.Image, + Command: service.Command, + Args: service.Args, + Env: service.Env, + // Service Labels will not be copied to Containers + // automatically during the deployment so we apply + // it here. + Labels: convert.AddStackLabel(namespace, nil), + }, + }, + EndpointSpec: &swarm.EndpointSpec{ + Ports: ports, + }, + Networks: nets, + } + + services[internalName] = serviceSpec + } + + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.SendRegistryAuth, opts.ResolveImage) +} + +func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { + defaultPath := fmt.Sprintf("%s.dab", namespace) + + if path == "" { + path = defaultPath + } + if _, err := os.Stat(path); err != nil { + return nil, errors.Errorf( + "Bundle %s not found. Specify the path with --file", + path) + } + + fmt.Fprintf(stderr, "Loading bundle from %s\n", path) + reader, err := os.Open(path) + if err != nil { + return nil, err + } + defer reader.Close() + + bundle, err := bundlefile.LoadFile(reader) + if err != nil { + return nil, errors.Errorf("Error reading %s: %v\n", path, err) + } + return bundle, err +} diff --git a/cli/cli/command/stack/swarm/deploy_bundlefile_test.go b/cli/cli/command/stack/swarm/deploy_bundlefile_test.go new file mode 100644 index 00000000..485271cb --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_bundlefile_test.go @@ -0,0 +1,50 @@ +package swarm + +import ( + "bytes" + "path/filepath" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestLoadBundlefileErrors(t *testing.T) { + testCases := []struct { + namespace string + path string + expectedError string + }{ + { + namespace: "namespace_foo", + expectedError: "Bundle namespace_foo.dab not found", + }, + { + namespace: "namespace_foo", + path: "invalid_path", + expectedError: "Bundle invalid_path not found", + }, + // FIXME: this test never working, testdata file is missing from repo + //{ + // namespace: "namespace_foo", + // path: string(golden.Get(t, "bundlefile_with_invalid_syntax")), + // expectedError: "Error reading", + //}, + } + + for _, tc := range testCases { + _, err := loadBundlefile(&bytes.Buffer{}, tc.namespace, tc.path) + assert.ErrorContains(t, err, tc.expectedError) + } +} + +func TestLoadBundlefile(t *testing.T) { + buf := new(bytes.Buffer) + + namespace := "" + path := filepath.Join("testdata", "bundlefile_with_two_services.dab") + bundleFile, err := loadBundlefile(buf, namespace, path) + + assert.NilError(t, err) + assert.Check(t, is.Equal(len(bundleFile.Services), 2)) +} diff --git a/cli/cli/command/stack/swarm/deploy_composefile.go b/cli/cli/command/stack/swarm/deploy_composefile.go new file mode 100644 index 00000000..e4574fd9 --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_composefile.go @@ -0,0 +1,281 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/convert" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + apiclient "github.com/docker/docker/client" + dockerclient "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Deploy, config *composetypes.Config) error { + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.Namespace) + + if opts.Prune { + services := map[string]struct{}{} + for _, service := range config.Services { + services[service.Name] = struct{}{} + } + pruneServices(ctx, dockerCli, namespace, services) + } + + serviceNetworks := getServicesDeclaredNetworks(config.Services) + networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) + if err := validateExternalNetworks(ctx, dockerCli.Client(), externalNetworks); err != nil { + return err + } + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + + secrets, err := convert.Secrets(namespace, config.Secrets) + if err != nil { + return err + } + if err := createSecrets(ctx, dockerCli, secrets); err != nil { + return err + } + + configs, err := convert.Configs(namespace, config.Configs) + if err != nil { + return err + } + if err := createConfigs(ctx, dockerCli, configs); err != nil { + return err + } + + services, err := convert.Services(namespace, config, dockerCli.Client()) + if err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.SendRegistryAuth, opts.ResolveImage) +} + +func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { + serviceNetworks := map[string]struct{}{} + for _, serviceConfig := range serviceConfigs { + if len(serviceConfig.Networks) == 0 { + serviceNetworks["default"] = struct{}{} + continue + } + for network := range serviceConfig.Networks { + serviceNetworks[network] = struct{}{} + } + } + return serviceNetworks +} + +func validateExternalNetworks( + ctx context.Context, + client dockerclient.NetworkAPIClient, + externalNetworks []string, +) error { + for _, networkName := range externalNetworks { + if !container.NetworkMode(networkName).IsUserDefined() { + // Networks that are not user defined always exist on all nodes as + // local-scoped networks, so there's no need to inspect them. + continue + } + network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}) + switch { + case dockerclient.IsErrNotFound(err): + return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed", networkName) + case err != nil: + return err + case network.Scope != "swarm": + return errors.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of \"swarm\"", networkName, network.Scope) + } + } + return nil +} + +func createSecrets( + ctx context.Context, + dockerCli command.Cli, + secrets []swarm.SecretSpec, +) error { + client := dockerCli.Client() + + for _, secretSpec := range secrets { + secret, _, err := client.SecretInspectWithRaw(ctx, secretSpec.Name) + switch { + case err == nil: + // secret already exists, then we update that + if err := client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec); err != nil { + return errors.Wrapf(err, "failed to update secret %s", secretSpec.Name) + } + case apiclient.IsErrNotFound(err): + // secret does not exist, then we create a new one. + fmt.Fprintf(dockerCli.Out(), "Creating secret %s\n", secretSpec.Name) + if _, err := client.SecretCreate(ctx, secretSpec); err != nil { + return errors.Wrapf(err, "failed to create secret %s", secretSpec.Name) + } + default: + return err + } + } + return nil +} + +func createConfigs( + ctx context.Context, + dockerCli command.Cli, + configs []swarm.ConfigSpec, +) error { + client := dockerCli.Client() + + for _, configSpec := range configs { + config, _, err := client.ConfigInspectWithRaw(ctx, configSpec.Name) + switch { + case err == nil: + // config already exists, then we update that + if err := client.ConfigUpdate(ctx, config.ID, config.Meta.Version, configSpec); err != nil { + return errors.Wrapf(err, "failed to update config %s", configSpec.Name) + } + case apiclient.IsErrNotFound(err): + // config does not exist, then we create a new one. + fmt.Fprintf(dockerCli.Out(), "Creating config %s\n", configSpec.Name) + if _, err := client.ConfigCreate(ctx, configSpec); err != nil { + return errors.Wrapf(err, "failed to create config %s", configSpec.Name) + } + default: + return err + } + } + return nil +} + +func createNetworks( + ctx context.Context, + dockerCli command.Cli, + namespace convert.Namespace, + networks map[string]types.NetworkCreate, +) error { + client := dockerCli.Client() + + existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) + if err != nil { + return err + } + + existingNetworkMap := make(map[string]types.NetworkResource) + for _, network := range existingNetworks { + existingNetworkMap[network.Name] = network + } + + for name, createOpts := range networks { + if _, exists := existingNetworkMap[name]; exists { + continue + } + + if createOpts.Driver == "" { + createOpts.Driver = defaultNetworkDriver + } + + fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) + if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { + return errors.Wrapf(err, "failed to create network %s", name) + } + } + return nil +} + +func deployServices( + ctx context.Context, + dockerCli command.Cli, + services map[string]swarm.ServiceSpec, + namespace convert.Namespace, + sendAuth bool, + resolveImage string, +) error { + apiClient := dockerCli.Client() + out := dockerCli.Out() + + existingServices, err := getStackServices(ctx, apiClient, namespace.Name()) + if err != nil { + return err + } + + existingServiceMap := make(map[string]swarm.Service) + for _, service := range existingServices { + existingServiceMap[service.Spec.Name] = service + } + + for internalName, serviceSpec := range services { + name := namespace.Scope(internalName) + + encodedAuth := "" + image := serviceSpec.TaskTemplate.ContainerSpec.Image + if sendAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + } + + if service, exists := existingServiceMap[name]; exists { + fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) + + updateOpts := types.ServiceUpdateOptions{EncodedRegistryAuth: encodedAuth} + + switch { + case resolveImage == ResolveImageAlways || (resolveImage == ResolveImageChanged && image != service.Spec.Labels[convert.LabelImage]): + // image should be updated by the server using QueryRegistry + updateOpts.QueryRegistry = true + case image == service.Spec.Labels[convert.LabelImage]: + // image has not changed; update the serviceSpec with the + // existing information that was set by QueryRegistry on the + // previous deploy. Otherwise this will trigger an incorrect + // service update. + serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image + } + + // Stack deploy does not have a `--force` option. Preserve existing ForceUpdate + // value so that tasks are not re-deployed if not updated. + // TODO move this to API client? + serviceSpec.TaskTemplate.ForceUpdate = service.Spec.TaskTemplate.ForceUpdate + + response, err := apiClient.ServiceUpdate( + ctx, + service.ID, + service.Version, + serviceSpec, + updateOpts, + ) + if err != nil { + return errors.Wrapf(err, "failed to update service %s", name) + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + } else { + fmt.Fprintf(out, "Creating service %s\n", name) + + createOpts := types.ServiceCreateOptions{EncodedRegistryAuth: encodedAuth} + + // query registry if flag disabling it was not set + if resolveImage == ResolveImageAlways || resolveImage == ResolveImageChanged { + createOpts.QueryRegistry = true + } + + if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { + return errors.Wrapf(err, "failed to create service %s", name) + } + } + } + return nil +} diff --git a/cli/cli/command/stack/swarm/deploy_composefile_test.go b/cli/cli/command/stack/swarm/deploy_composefile_test.go new file mode 100644 index 00000000..065a4f29 --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_composefile_test.go @@ -0,0 +1,67 @@ +package swarm + +import ( + "context" + "testing" + + "github.com/docker/cli/internal/test/network" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +type notFound struct { + error +} + +func (n notFound) NotFound() bool { + return true +} + +func TestValidateExternalNetworks(t *testing.T) { + var testcases = []struct { + inspectResponse types.NetworkResource + inspectError error + expectedMsg string + network string + }{ + { + inspectError: notFound{}, + expectedMsg: "could not be found. You need to create a swarm-scoped network", + }, + { + inspectError: errors.New("Unexpected"), + expectedMsg: "Unexpected", + }, + // FIXME(vdemeester) that doesn't work under windows, the check needs to be smarter + /* + { + inspectError: errors.New("host net does not exist on swarm classic"), + network: "host", + }, + */ + { + network: "user", + expectedMsg: "is not in the right scope", + }, + { + network: "user", + inspectResponse: types.NetworkResource{Scope: "swarm"}, + }, + } + + for _, testcase := range testcases { + fakeClient := &network.FakeClient{ + NetworkInspectFunc: func(_ context.Context, _ string, _ types.NetworkInspectOptions) (types.NetworkResource, error) { + return testcase.inspectResponse, testcase.inspectError + }, + } + networks := []string{testcase.network} + err := validateExternalNetworks(context.Background(), fakeClient, networks) + if testcase.expectedMsg == "" { + assert.NilError(t, err) + } else { + assert.ErrorContains(t, err, testcase.expectedMsg) + } + } +} diff --git a/cli/cli/command/stack/swarm/deploy_test.go b/cli/cli/command/stack/swarm/deploy_test.go new file mode 100644 index 00000000..b1df60dc --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_test.go @@ -0,0 +1,110 @@ +package swarm + +import ( + "context" + "testing" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPruneServices(t *testing.T) { + ctx := context.Background() + namespace := convert.NewNamespace("foo") + services := map[string]struct{}{ + "new": {}, + "keep": {}, + } + client := &fakeClient{services: []string{objectName("foo", "keep"), objectName("foo", "remove")}} + dockerCli := test.NewFakeCli(client) + + pruneServices(ctx, dockerCli, namespace, services) + assert.Check(t, is.DeepEqual(buildObjectIDs([]string{objectName("foo", "remove")}), client.removedServices)) +} + +// TestServiceUpdateResolveImageChanged tests that the service's +// image digest, and "ForceUpdate" is preserved if the image did not change in +// the compose file +func TestServiceUpdateResolveImageChanged(t *testing.T) { + namespace := convert.NewNamespace("mystack") + + var ( + receivedOptions types.ServiceUpdateOptions + receivedService swarm.ServiceSpec + ) + + client := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + { + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: namespace.Name() + "_myservice", + Labels: map[string]string{"com.docker.stack.image": "foobar:1.2.3"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "foobar:1.2.3@sha256:deadbeef", + }, + ForceUpdate: 123, + }, + }, + }, + }, nil + }, + serviceUpdateFunc: func(serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + receivedOptions = options + receivedService = service + return types.ServiceUpdateResponse{}, nil + }, + }) + + var testcases = []struct { + image string + expectedQueryRegistry bool + expectedImage string + expectedForceUpdate uint64 + }{ + // Image not changed + { + image: "foobar:1.2.3", + expectedQueryRegistry: false, + expectedImage: "foobar:1.2.3@sha256:deadbeef", + expectedForceUpdate: 123, + }, + // Image changed + { + image: "foobar:1.2.4", + expectedQueryRegistry: true, + expectedImage: "foobar:1.2.4", + expectedForceUpdate: 123, + }, + } + + ctx := context.Background() + + for _, testcase := range testcases { + t.Logf("Testing image %q", testcase.image) + spec := map[string]swarm.ServiceSpec{ + "myservice": { + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: testcase.image, + }, + }, + }, + } + err := deployServices(ctx, client, spec, namespace, false, ResolveImageChanged) + assert.NilError(t, err) + assert.Check(t, is.Equal(receivedOptions.QueryRegistry, testcase.expectedQueryRegistry)) + assert.Check(t, is.Equal(receivedService.TaskTemplate.ContainerSpec.Image, testcase.expectedImage)) + assert.Check(t, is.Equal(receivedService.TaskTemplate.ForceUpdate, testcase.expectedForceUpdate)) + + receivedService = swarm.ServiceSpec{} + receivedOptions = types.ServiceUpdateOptions{} + } +} diff --git a/cli/cli/command/stack/swarm/list.go b/cli/cli/command/stack/swarm/list.go new file mode 100644 index 00000000..c0c19d0a --- /dev/null +++ b/cli/cli/command/stack/swarm/list.go @@ -0,0 +1,45 @@ +package swarm + +import ( + "context" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// GetStacks lists the swarm stacks. +func GetStacks(dockerCli command.Cli) ([]*formatter.Stack, error) { + services, err := dockerCli.Client().ServiceList( + context.Background(), + types.ServiceListOptions{Filters: getAllStacksFilter()}) + if err != nil { + return nil, err + } + m := make(map[string]*formatter.Stack) + for _, service := range services { + labels := service.Spec.Labels + name, ok := labels[convert.LabelNamespace] + if !ok { + return nil, errors.Errorf("cannot get label %s for service %s", + convert.LabelNamespace, service.ID) + } + ztack, ok := m[name] + if !ok { + m[name] = &formatter.Stack{ + Name: name, + Services: 1, + Orchestrator: "Swarm", + } + } else { + ztack.Services++ + } + } + var stacks []*formatter.Stack + for _, stack := range m { + stacks = append(stacks, stack) + } + return stacks, nil +} diff --git a/cli/cli/command/stack/swarm/ps.go b/cli/cli/command/stack/swarm/ps.go new file mode 100644 index 00000000..5b28a39e --- /dev/null +++ b/cli/cli/command/stack/swarm/ps.go @@ -0,0 +1,35 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/task" + "github.com/docker/docker/api/types" +) + +// RunPS is the swarm implementation of docker stack ps +func RunPS(dockerCli command.Cli, opts options.PS) error { + filter := getStackFilterFromOpt(opts.Namespace, opts.Filter) + + ctx := context.Background() + client := dockerCli.Client() + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if len(tasks) == 0 { + return fmt.Errorf("nothing found in stack: %s", opts.Namespace) + } + + format := opts.Format + if len(format) == 0 { + format = task.DefaultFormat(dockerCli.ConfigFile(), opts.Quiet) + } + + return task.Print(ctx, dockerCli, tasks, idresolver.New(client, opts.NoResolve), !opts.NoTrunc, opts.Quiet, format) +} diff --git a/cli/cli/command/stack/swarm/remove.go b/cli/cli/command/stack/swarm/remove.go new file mode 100644 index 00000000..4dedef12 --- /dev/null +++ b/cli/cli/command/stack/swarm/remove.go @@ -0,0 +1,140 @@ +package swarm + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// RunRemove is the swarm implementation of docker stack remove +func RunRemove(dockerCli command.Cli, opts options.Remove) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + for _, namespace := range opts.Namespaces { + services, err := getStackServices(ctx, client, namespace) + if err != nil { + return err + } + + networks, err := getStackNetworks(ctx, client, namespace) + if err != nil { + return err + } + + var secrets []swarm.Secret + if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.25") { + secrets, err = getStackSecrets(ctx, client, namespace) + if err != nil { + return err + } + } + + var configs []swarm.Config + if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.30") { + configs, err = getStackConfigs(ctx, client, namespace) + if err != nil { + return err + } + } + + if len(services)+len(networks)+len(secrets)+len(configs) == 0 { + fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", namespace) + continue + } + + hasError := removeServices(ctx, dockerCli, services) + hasError = removeSecrets(ctx, dockerCli, secrets) || hasError + hasError = removeConfigs(ctx, dockerCli, configs) || hasError + hasError = removeNetworks(ctx, dockerCli, networks) || hasError + + if hasError { + errs = append(errs, fmt.Sprintf("Failed to remove some resources from stack: %s", namespace)) + } + } + + if len(errs) > 0 { + return errors.Errorf(strings.Join(errs, "\n")) + } + return nil +} + +func sortServiceByName(services []swarm.Service) func(i, j int) bool { + return func(i, j int) bool { + return services[i].Spec.Name < services[j].Spec.Name + } +} + +func removeServices( + ctx context.Context, + dockerCli command.Cli, + services []swarm.Service, +) bool { + var hasError bool + sort.Slice(services, sortServiceByName(services)) + for _, service := range services { + fmt.Fprintf(dockerCli.Out(), "Removing service %s\n", service.Spec.Name) + if err := dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) + } + } + return hasError +} + +func removeNetworks( + ctx context.Context, + dockerCli command.Cli, + networks []types.NetworkResource, +) bool { + var hasError bool + for _, network := range networks { + fmt.Fprintf(dockerCli.Out(), "Removing network %s\n", network.Name) + if err := dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) + } + } + return hasError +} + +func removeSecrets( + ctx context.Context, + dockerCli command.Cli, + secrets []swarm.Secret, +) bool { + var hasError bool + for _, secret := range secrets { + fmt.Fprintf(dockerCli.Out(), "Removing secret %s\n", secret.Spec.Name) + if err := dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) + } + } + return hasError +} + +func removeConfigs( + ctx context.Context, + dockerCli command.Cli, + configs []swarm.Config, +) bool { + var hasError bool + for _, config := range configs { + fmt.Fprintf(dockerCli.Out(), "Removing config %s\n", config.Spec.Name) + if err := dockerCli.Client().ConfigRemove(ctx, config.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove config %s: %s", config.ID, err) + } + } + return hasError +} diff --git a/cli/cli/command/stack/swarm/services.go b/cli/cli/command/stack/swarm/services.go new file mode 100644 index 00000000..07b990ad --- /dev/null +++ b/cli/cli/command/stack/swarm/services.go @@ -0,0 +1,66 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/service" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// RunServices is the swarm implementation of docker stack services +func RunServices(dockerCli command.Cli, opts options.Services) error { + ctx := context.Background() + client := dockerCli.Client() + + filter := getStackFilterFromOpt(opts.Namespace, opts.Filter) + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) + if err != nil { + return err + } + + // if no services in this stack, print message and exit 0 + if len(services) == 0 { + fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", opts.Namespace) + return nil + } + + info := map[string]formatter.ServiceListInfo{} + if !opts.Quiet { + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + info = service.GetServicesStatus(services, nodes, tasks) + } + + format := opts.Format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.Quiet { + format = dockerCli.ConfigFile().ServicesFormat + } else { + format = formatter.TableFormatKey + } + } + + servicesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceListFormat(format, opts.Quiet), + } + return formatter.ServiceListWrite(servicesCtx, services, info) +} diff --git a/cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab b/cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab new file mode 100644 index 00000000..ced8180d --- /dev/null +++ b/cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab @@ -0,0 +1,29 @@ +{ + "Services": { + "visualizer": { + "Image": "busybox@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f", + "Networks": [ + "webnet" + ], + "Ports": [ + { + "Port": 8080, + "Protocol": "tcp" + } + ] + }, + "web": { + "Image": "busybox@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f", + "Networks": [ + "webnet" + ], + "Ports": [ + { + "Port": 80, + "Protocol": "tcp" + } + ] + } + }, + "Version": "0.1" +} diff --git a/cli/cli/command/stack/testdata/stack-list-sort-natural.golden b/cli/cli/command/stack/testdata/stack-list-sort-natural.golden new file mode 100644 index 00000000..3090cb9e --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-sort-natural.golden @@ -0,0 +1,4 @@ +NAME SERVICES ORCHESTRATOR +service-name-1-foo 1 Swarm +service-name-2-foo 1 Swarm +service-name-10-foo 1 Swarm diff --git a/cli/cli/command/stack/testdata/stack-list-sort.golden b/cli/cli/command/stack/testdata/stack-list-sort.golden new file mode 100644 index 00000000..179ae71d --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-sort.golden @@ -0,0 +1,3 @@ +NAME SERVICES ORCHESTRATOR +service-name-bar 1 Swarm +service-name-foo 1 Swarm diff --git a/cli/cli/command/stack/testdata/stack-list-with-format.golden b/cli/cli/command/stack/testdata/stack-list-with-format.golden new file mode 100644 index 00000000..b53e6401 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-with-format.golden @@ -0,0 +1 @@ +service-name-foo diff --git a/cli/cli/command/stack/testdata/stack-list-without-format.golden b/cli/cli/command/stack/testdata/stack-list-without-format.golden new file mode 100644 index 00000000..37213aaf --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-without-format.golden @@ -0,0 +1,2 @@ +NAME SERVICES ORCHESTRATOR +service-name-foo 1 Swarm diff --git a/cli/cli/command/stack/testdata/stack-ps-with-config-format.golden b/cli/cli/command/stack/testdata/stack-ps-with-config-format.golden new file mode 100644 index 00000000..9ecebdaf --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-config-format.golden @@ -0,0 +1 @@ +service-id-foo.1 diff --git a/cli/cli/command/stack/testdata/stack-ps-with-format.golden b/cli/cli/command/stack/testdata/stack-ps-with-format.golden new file mode 100644 index 00000000..9ecebdaf --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-format.golden @@ -0,0 +1 @@ +service-id-foo.1 diff --git a/cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden b/cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden new file mode 100644 index 00000000..b90d743b --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden @@ -0,0 +1 @@ +id-node-foo diff --git a/cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden b/cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden new file mode 100644 index 00000000..8179bf4d --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden @@ -0,0 +1 @@ +xn4cypcov06f2w8gsbaf2lst3 diff --git a/cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden b/cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/stack/testdata/stack-ps-without-format.golden b/cli/cli/command/stack/testdata/stack-ps-without-format.golden new file mode 100644 index 00000000..ceb4f841 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-without-format.golden @@ -0,0 +1,2 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +id-foo service-id-foo.1 myimage:mytag node-name-bar Ready Failed 2 hours ago diff --git a/cli/cli/command/stack/testdata/stack-services-with-config-format.golden b/cli/cli/command/stack/testdata/stack-services-with-config-format.golden new file mode 100644 index 00000000..b53e6401 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-with-config-format.golden @@ -0,0 +1 @@ +service-name-foo diff --git a/cli/cli/command/stack/testdata/stack-services-with-format.golden b/cli/cli/command/stack/testdata/stack-services-with-format.golden new file mode 100644 index 00000000..b53e6401 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-with-format.golden @@ -0,0 +1 @@ +service-name-foo diff --git a/cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden b/cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/stack/testdata/stack-services-without-format.golden b/cli/cli/command/stack/testdata/stack-services-without-format.golden new file mode 100644 index 00000000..dcca0dfa --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-without-format.golden @@ -0,0 +1,2 @@ +ID NAME MODE REPLICAS IMAGE PORTS +id-foo name-foo replicated 0/2 busybox:latest *:30000->3232/tcp diff --git a/cli/cli/command/stream.go b/cli/cli/command/stream.go new file mode 100644 index 00000000..71a43fa2 --- /dev/null +++ b/cli/cli/command/stream.go @@ -0,0 +1,34 @@ +package command + +import ( + "github.com/docker/docker/pkg/term" +) + +// CommonStream is an input stream used by the DockerCli to read user input +type CommonStream struct { + fd uintptr + isTerminal bool + state *term.State +} + +// FD returns the file descriptor number for this stream +func (s *CommonStream) FD() uintptr { + return s.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (s *CommonStream) IsTerminal() bool { + return s.isTerminal +} + +// RestoreTerminal restores normal mode to the terminal +func (s *CommonStream) RestoreTerminal() { + if s.state != nil { + term.RestoreTerminal(s.fd, s.state) + } +} + +// SetIsTerminal sets the boolean used for isTerminal +func (s *CommonStream) SetIsTerminal(isTerminal bool) { + s.isTerminal = isTerminal +} diff --git a/cli/cli/command/swarm/ca.go b/cli/cli/command/swarm/ca.go new file mode 100644 index 00000000..961e7e89 --- /dev/null +++ b/cli/cli/command/swarm/ca.go @@ -0,0 +1,141 @@ +package swarm + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/swarm/progress" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type caOptions struct { + swarmCAOptions + rootCACert PEMFile + rootCAKey PEMFile + rotate bool + detach bool + quiet bool +} + +func newCACommand(dockerCli command.Cli) *cobra.Command { + opts := caOptions{} + + cmd := &cobra.Command{ + Use: "ca [OPTIONS]", + Short: "Display and rotate the root CA", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runCA(dockerCli, cmd.Flags(), opts) + }, + Annotations: map[string]string{"version": "1.30"}, + } + + flags := cmd.Flags() + addSwarmCAFlags(flags, &opts.swarmCAOptions) + flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate the swarm CA - if no certificate or key are provided, new ones will be generated") + flags.Var(&opts.rootCACert, flagCACert, "Path to the PEM-formatted root CA certificate to use for the new cluster") + flags.Var(&opts.rootCAKey, flagCAKey, "Path to the PEM-formatted root CA key to use for the new cluster") + + flags.BoolVarP(&opts.detach, "detach", "d", false, "Exit immediately instead of waiting for the root rotation to converge") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress progress output") + return cmd +} + +func runCA(dockerCli command.Cli, flags *pflag.FlagSet, opts caOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + swarmInspect, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !opts.rotate { + for _, f := range []string{flagCACert, flagCAKey, flagCertExpiry, flagExternalCA} { + if flags.Changed(f) { + return fmt.Errorf("`--%s` flag requires the `--rotate` flag to update the CA", f) + } + } + return displayTrustRoot(dockerCli.Out(), swarmInspect) + } + + if flags.Changed(flagExternalCA) && len(opts.externalCA.Value()) > 0 && !flags.Changed(flagCACert) { + return fmt.Errorf( + "rotating to an external CA requires the `--%s` flag to specify the external CA's cert - "+ + "to add an external CA with the current root CA certificate, use the `update` command instead", flagCACert) + } + + if flags.Changed(flagCACert) && len(opts.externalCA.Value()) == 0 && !flags.Changed(flagCAKey) { + return fmt.Errorf("the --%s flag requires that a --%s flag and/or --%s flag be provided as well", + flagCACert, flagCAKey, flagExternalCA) + } + + updateSwarmSpec(&swarmInspect.Spec, flags, opts) + if err := client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, swarm.UpdateFlags{}); err != nil { + return err + } + + if opts.detach { + return nil + } + return attach(ctx, dockerCli, opts) +} + +func updateSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, opts caOptions) { + caCert := opts.rootCACert.Contents() + caKey := opts.rootCAKey.Contents() + opts.mergeSwarmSpecCAFlags(spec, flags, caCert) + + spec.CAConfig.SigningCACert = caCert + spec.CAConfig.SigningCAKey = caKey + + if caKey == "" && caCert == "" { + spec.CAConfig.ForceRotate++ + } +} + +func attach(ctx context.Context, dockerCli command.Cli, opts caOptions) error { + client := dockerCli.Client() + errChan := make(chan error, 1) + pipeReader, pipeWriter := io.Pipe() + + go func() { + errChan <- progress.RootRotationProgress(ctx, client, pipeWriter) + }() + + if opts.quiet { + go io.Copy(ioutil.Discard, pipeReader) + return <-errChan + } + + err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, dockerCli.Out(), nil) + if err == nil { + err = <-errChan + } + if err != nil { + return err + } + + swarmInspect, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + return displayTrustRoot(dockerCli.Out(), swarmInspect) +} + +func displayTrustRoot(out io.Writer, info swarm.Swarm) error { + if info.ClusterInfo.TLSInfo.TrustRoot == "" { + return errors.New("No CA information available") + } + fmt.Fprintln(out, strings.TrimSpace(info.ClusterInfo.TLSInfo.TrustRoot)) + return nil +} diff --git a/cli/cli/command/swarm/ca_test.go b/cli/cli/command/swarm/ca_test.go new file mode 100644 index 00000000..d0eff116 --- /dev/null +++ b/cli/cli/command/swarm/ca_test.go @@ -0,0 +1,300 @@ +package swarm + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +const ( + cert = ` +-----BEGIN CERTIFICATE----- +MIIBuDCCAV4CCQDOqUYOWdqMdjAKBggqhkjOPQQDAzBjMQswCQYDVQQGEwJVUzEL +MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv +Y2tlcjEPMA0GA1UECwwGRG9ja2VyMQ0wCwYDVQQDDARUZXN0MCAXDTE4MDcwMjIx +MjkxOFoYDzMwMTcxMTAyMjEyOTE4WjBjMQswCQYDVQQGEwJVUzELMAkGA1UECAwC +Q0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRvY2tlcjEPMA0G +A1UECwwGRG9ja2VyMQ0wCwYDVQQDDARUZXN0MFkwEwYHKoZIzj0CAQYIKoZIzj0D +AQcDQgAEgvvZl5Vqpr1e+g5IhoU6TZHgRau+BZETVFTmqyWYajA/mooRQ1MZTozu +s9ZZZA8tzUhIqS36gsFuyIZ4YiAlyjAKBggqhkjOPQQDAwNIADBFAiBQ7pCPQrj8 +8zaItMf0pk8j1NU5XrFqFEZICzvjzUJQBAIhAKq2gFwoTn8KH+cAAXZpAGJPmOsT +zsBT8gBAOHhNA6/2 +-----END CERTIFICATE-----` + key = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEICyheZpw70pbgO4hEuwhZTETWyTpNJmJ3TyFaWT6WTRkoAoGCCqGSM49 +AwEHoUQDQgAEgvvZl5Vqpr1e+g5IhoU6TZHgRau+BZETVFTmqyWYajA/mooRQ1MZ +Tozus9ZZZA8tzUhIqS36gsFuyIZ4YiAlyg== +-----END EC PRIVATE KEY-----` +) + +func swarmSpecWithFullCAConfig() *swarm.Spec { + return &swarm.Spec{ + CAConfig: swarm.CAConfig{ + SigningCACert: "cacert", + SigningCAKey: "cakey", + ForceRotate: 1, + NodeCertExpiry: time.Duration(200), + ExternalCAs: []*swarm.ExternalCA{ + { + URL: "https://example.com/ca", + Protocol: swarm.ExternalCAProtocolCFSSL, + CACert: "excacert", + }, + }, + }, + } +} + +func TestDisplayTrustRootNoRoot(t *testing.T) { + buffer := new(bytes.Buffer) + err := displayTrustRoot(buffer, swarm.Swarm{}) + assert.Error(t, err, "No CA information available") +} + +type invalidCATestCases struct { + args []string + errorMsg string +} + +func writeFile(data string) (string, error) { + tmpfile, err := ioutil.TempFile("", "testfile") + if err != nil { + return "", err + } + _, err = tmpfile.Write([]byte(data)) + if err != nil { + return "", err + } + tmpfile.Close() + return tmpfile.Name(), nil +} + +func TestDisplayTrustRootInvalidFlags(t *testing.T) { + // we need an actual PEMfile to test + tmpfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(tmpfile) + + errorTestCases := []invalidCATestCases{ + { + args: []string{"--ca-cert=" + tmpfile}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{"--ca-key=" + tmpfile}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { // to make sure we're not erroring because we didn't provide a CA key along with the CA cert + args: []string{ + "--ca-cert=" + tmpfile, + "--ca-key=" + tmpfile, + }, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{"--cert-expiry=2160h0m0s"}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{"--external-ca=protocol=cfssl,url=https://some.com/https/url"}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { // to make sure we're not erroring because we didn't provide a CA cert and external CA + args: []string{ + "--ca-cert=" + tmpfile, + "--external-ca=protocol=cfssl,url=https://some.com/https/url", + }, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{ + "--rotate", + "--external-ca=protocol=cfssl,url=https://some.com/https/url", + }, + errorMsg: "rotating to an external CA requires the `--ca-cert` flag to specify the external CA's cert - " + + "to add an external CA with the current root CA certificate, use the `update` command instead", + }, + { + args: []string{ + "--rotate", + "--ca-cert=" + tmpfile, + }, + errorMsg: "the --ca-cert flag requires that a --ca-key flag and/or --external-ca flag be provided as well", + }, + } + + for _, testCase := range errorTestCases { + cmd := newCACommand( + test.NewFakeCli(&fakeClient{ + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + TLSInfo: swarm.TLSInfo{ + TrustRoot: "root", + }, + }, + }, nil + }, + })) + assert.Check(t, cmd.Flags().Parse(testCase.args)) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), testCase.errorMsg) + } +} + +func TestDisplayTrustRoot(t *testing.T) { + buffer := new(bytes.Buffer) + trustRoot := "trustme" + err := displayTrustRoot(buffer, swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + TLSInfo: swarm.TLSInfo{TrustRoot: trustRoot}, + }, + }) + assert.NilError(t, err) + assert.Check(t, is.Equal(trustRoot+"\n", buffer.String())) +} + +type swarmUpdateRecorder struct { + spec swarm.Spec +} + +func (s *swarmUpdateRecorder) swarmUpdate(sp swarm.Spec, _ swarm.UpdateFlags) error { + s.spec = sp + return nil +} + +func swarmInspectFuncWithFullCAConfig() (swarm.Swarm, error) { + return swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + Spec: *swarmSpecWithFullCAConfig(), + }, + }, nil +} + +func TestUpdateSwarmSpecDefaultRotate(t *testing.T) { + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{"--rotate", "--detach"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.ForceRotate = 2 + expected.CAConfig.SigningCACert = "" + expected.CAConfig.SigningCAKey = "" + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} + +func TestUpdateSwarmSpecCertAndKey(t *testing.T) { + certfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(certfile) + + keyfile, err := writeFile(key) + assert.NilError(t, err) + defer os.Remove(keyfile) + + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{ + "--rotate", + "--detach", + "--ca-cert=" + certfile, + "--ca-key=" + keyfile, + "--cert-expiry=3m"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.SigningCACert = cert + expected.CAConfig.SigningCAKey = key + expected.CAConfig.NodeCertExpiry = 3 * time.Minute + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} + +func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) { + certfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(certfile) + + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{ + "--rotate", + "--detach", + "--ca-cert=" + certfile, + "--external-ca=protocol=cfssl,url=https://some.external.ca"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.SigningCACert = cert + expected.CAConfig.SigningCAKey = "" + expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://some.external.ca", + CACert: cert, + Options: make(map[string]string), + }, + } + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} + +func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) { + certfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(certfile) + + keyfile, err := writeFile(key) + assert.NilError(t, err) + defer os.Remove(keyfile) + + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{ + "--rotate", + "--detach", + "--ca-cert=" + certfile, + "--ca-key=" + keyfile, + "--external-ca=protocol=cfssl,url=https://some.external.ca"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.SigningCACert = cert + expected.CAConfig.SigningCAKey = key + expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://some.external.ca", + CACert: cert, + Options: make(map[string]string), + }, + } + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} diff --git a/cli/cli/command/swarm/client_test.go b/cli/cli/command/swarm/client_test.go new file mode 100644 index 00000000..8695c895 --- /dev/null +++ b/cli/cli/command/swarm/client_test.go @@ -0,0 +1,85 @@ +package swarm + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + infoFunc func() (types.Info, error) + swarmInitFunc func() (string, error) + swarmInspectFunc func() (swarm.Swarm, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + swarmJoinFunc func() error + swarmLeaveFunc func() error + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmUnlockFunc func(req swarm.UnlockRequest) error +} + +func (cli *fakeClient) Info(ctx context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectFunc != nil { + return cli.nodeInspectFunc() + } + return swarm.Node{}, []byte{}, nil +} + +func (cli *fakeClient) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + if cli.swarmInitFunc != nil { + return cli.swarmInitFunc() + } + return "", nil +} + +func (cli *fakeClient) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + if cli.swarmInspectFunc != nil { + return cli.swarmInspectFunc() + } + return swarm.Swarm{}, nil +} + +func (cli *fakeClient) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + if cli.swarmGetUnlockKeyFunc != nil { + return cli.swarmGetUnlockKeyFunc() + } + return types.SwarmUnlockKeyResponse{}, nil +} + +func (cli *fakeClient) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + if cli.swarmJoinFunc != nil { + return cli.swarmJoinFunc() + } + return nil +} + +func (cli *fakeClient) SwarmLeave(ctx context.Context, force bool) error { + if cli.swarmLeaveFunc != nil { + return cli.swarmLeaveFunc() + } + return nil +} + +func (cli *fakeClient) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + if cli.swarmUpdateFunc != nil { + return cli.swarmUpdateFunc(swarm, flags) + } + return nil +} + +func (cli *fakeClient) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + if cli.swarmUnlockFunc != nil { + return cli.swarmUnlockFunc(req) + } + return nil +} diff --git a/cli/cli/command/swarm/cmd.go b/cli/cli/command/swarm/cmd.go new file mode 100644 index 00000000..89bf5c3c --- /dev/null +++ b/cli/cli/command/swarm/cmd.go @@ -0,0 +1,33 @@ +package swarm + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewSwarmCommand returns a cobra command for `swarm` subcommands +func NewSwarmCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "swarm", + Short: "Manage Swarm", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.24", + "swarm": "", + }, + } + cmd.AddCommand( + newInitCommand(dockerCli), + newJoinCommand(dockerCli), + newJoinTokenCommand(dockerCli), + newUnlockKeyCommand(dockerCli), + newUpdateCommand(dockerCli), + newLeaveCommand(dockerCli), + newUnlockCommand(dockerCli), + newCACommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/swarm/init.go b/cli/cli/command/swarm/init.go new file mode 100644 index 00000000..d9dadd61 --- /dev/null +++ b/cli/cli/command/swarm/init.go @@ -0,0 +1,98 @@ +package swarm + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type initOptions struct { + swarmOptions + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + dataPathAddr string + forceNewCluster bool + availability string +} + +func newInitCommand(dockerCli command.Cli) *cobra.Command { + opts := initOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "init [OPTIONS]", + Short: "Initialize a swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInit(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.dataPathAddr, flagDataPathAddr, "", "Address or interface to use for data path traffic (format: )") + flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") + flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") + flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`) + addSwarmFlags(flags, &opts.swarmOptions) + return cmd +} + +func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.InitRequest{ + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + DataPathAddr: opts.dataPathAddr, + ForceNewCluster: opts.forceNewCluster, + Spec: opts.swarmOptions.ToSpec(flags), + AutoLockManagers: opts.swarmOptions.autolock, + } + if flags.Changed(flagAvailability) { + availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) + switch availability { + case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: + req.Availability = availability + default: + return errors.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + } + + nodeID, err := client.SwarmInit(ctx, req) + if err != nil { + if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { + return errors.New(err.Error() + " - specify one with --advertise-addr") + } + return err + } + + fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) + + if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { + return err + } + + fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") + + if req.AutoLockManagers { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(dockerCli.Out(), unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/cli/cli/command/swarm/init_test.go b/cli/cli/command/swarm/init_test.go new file mode 100644 index 00000000..735cc8da --- /dev/null +++ b/cli/cli/command/swarm/init_test.go @@ -0,0 +1,125 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmInitErrorOnAPIFailure(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + swarmInitFunc func() (string, error) + swarmInspectFunc func() (swarm.Swarm, error) + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + expectedError string + }{ + { + name: "init-failed", + swarmInitFunc: func() (string, error) { + return "", errors.Errorf("error initializing the swarm") + }, + expectedError: "error initializing the swarm", + }, + { + name: "init-failed-with-ip-choice", + swarmInitFunc: func() (string, error) { + return "", errors.Errorf("could not choose an IP address to advertise") + }, + expectedError: "could not choose an IP address to advertise - specify one with --advertise-addr", + }, + { + name: "swarm-inspect-after-init-failed", + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "node-inspect-after-init-failed", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + name: "swarm-get-unlock-key-after-init-failed", + flags: map[string]string{ + flagAutolock: "true", + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting swarm unlock key") + }, + expectedError: "could not fetch unlock key: error getting swarm unlock key", + }, + } + for _, tc := range testCases { + cmd := newInitCommand( + test.NewFakeCli(&fakeClient{ + swarmInitFunc: tc.swarmInitFunc, + swarmInspectFunc: tc.swarmInspectFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + nodeInspectFunc: tc.nodeInspectFunc, + })) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmInit(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + swarmInitFunc func() (string, error) + swarmInspectFunc func() (swarm.Swarm, error) + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + }{ + { + name: "init", + swarmInitFunc: func() (string, error) { + return "nodeID", nil + }, + }, + { + name: "init-autolock", + flags: map[string]string{ + flagAutolock: "true", + }, + swarmInitFunc: func() (string, error) { + return "nodeID", nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInitFunc: tc.swarmInitFunc, + swarmInspectFunc: tc.swarmInspectFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newInitCommand(cli) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("init-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/swarm/join.go b/cli/cli/command/swarm/join.go new file mode 100644 index 00000000..d794000d --- /dev/null +++ b/cli/cli/command/swarm/join.go @@ -0,0 +1,87 @@ +package swarm + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type joinOptions struct { + remote string + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + dataPathAddr string + token string + availability string +} + +func newJoinCommand(dockerCli command.Cli) *cobra.Command { + opts := joinOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "join [OPTIONS] HOST:PORT", + Short: "Join a swarm as a node and/or manager", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runJoin(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.dataPathAddr, flagDataPathAddr, "", "Address or interface to use for data path traffic (format: )") + flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") + flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`) + return cmd +} + +func runJoin(dockerCli command.Cli, flags *pflag.FlagSet, opts joinOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.JoinRequest{ + JoinToken: opts.token, + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + DataPathAddr: opts.dataPathAddr, + RemoteAddrs: []string{opts.remote}, + } + if flags.Changed(flagAvailability) { + availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) + switch availability { + case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: + req.Availability = availability + default: + return errors.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + } + + err := client.SwarmJoin(ctx, req) + if err != nil { + return err + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + if info.Swarm.ControlAvailable { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") + } else { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") + } + return nil +} diff --git a/cli/cli/command/swarm/join_test.go b/cli/cli/command/swarm/join_test.go new file mode 100644 index 00000000..e70d448d --- /dev/null +++ b/cli/cli/command/swarm/join_test.go @@ -0,0 +1,100 @@ +package swarm + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSwarmJoinErrors(t *testing.T) { + testCases := []struct { + name string + args []string + swarmJoinFunc func() error + infoFunc func() (types.Info, error) + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"remote1", "remote2"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "join-failed", + args: []string{"remote"}, + swarmJoinFunc: func() error { + return errors.Errorf("error joining the swarm") + }, + expectedError: "error joining the swarm", + }, + { + name: "join-failed-on-init", + args: []string{"remote"}, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cmd := newJoinCommand( + test.NewFakeCli(&fakeClient{ + swarmJoinFunc: tc.swarmJoinFunc, + infoFunc: tc.infoFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmJoin(t *testing.T) { + testCases := []struct { + name string + infoFunc func() (types.Info, error) + expected string + }{ + { + name: "join-as-manager", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + ControlAvailable: true, + }, + }, nil + }, + expected: "This node joined a swarm as a manager.", + }, + { + name: "join-as-worker", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + ControlAvailable: false, + }, + }, nil + }, + expected: "This node joined a swarm as a worker.", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + }) + cmd := newJoinCommand(cli) + cmd.SetArgs([]string{"remote"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(strings.TrimSpace(cli.OutBuffer().String()), tc.expected)) + } +} diff --git a/cli/cli/command/swarm/join_token.go b/cli/cli/command/swarm/join_token.go new file mode 100644 index 00000000..f8ed93cf --- /dev/null +++ b/cli/cli/command/swarm/join_token.go @@ -0,0 +1,119 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type joinTokenOptions struct { + role string + rotate bool + quiet bool +} + +func newJoinTokenCommand(dockerCli command.Cli) *cobra.Command { + opts := joinTokenOptions{} + + cmd := &cobra.Command{ + Use: "join-token [OPTIONS] (worker|manager)", + Short: "Manage join tokens", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.role = args[0] + return runJoinToken(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate join token") + flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func runJoinToken(dockerCli command.Cli, opts joinTokenOptions) error { + worker := opts.role == "worker" + manager := opts.role == "manager" + + if !worker && !manager { + return errors.New("unknown role " + opts.role) + } + + client := dockerCli.Client() + ctx := context.Background() + + if opts.rotate { + flags := swarm.UpdateFlags{ + RotateWorkerToken: worker, + RotateManagerToken: manager, + } + + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { + return err + } + + if !opts.quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", opts.role) + } + } + + // second SwarmInspect in this function, + // this is necessary since SwarmUpdate after first changes the join tokens + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if opts.quiet && worker { + fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Worker) + return nil + } + + if opts.quiet && manager { + fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Manager) + return nil + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) +} + +func printJoinCommand(ctx context.Context, dockerCli command.Cli, nodeID string, worker bool, manager bool) error { + client := dockerCli.Client() + + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if node.ManagerStatus != nil { + if worker { + fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join --token %s %s\n\n", sw.JoinTokens.Worker, node.ManagerStatus.Addr) + } + if manager { + fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join --token %s %s\n\n", sw.JoinTokens.Manager, node.ManagerStatus.Addr) + } + } + + return nil +} diff --git a/cli/cli/command/swarm/join_token_test.go b/cli/cli/command/swarm/join_token_test.go new file mode 100644 index 00000000..1bd7ba25 --- /dev/null +++ b/cli/cli/command/swarm/join_token_test.go @@ -0,0 +1,211 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmJoinTokenErrors(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + infoFunc func() (types.Info, error) + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + nodeInspectFunc func() (swarm.Node, []byte, error) + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"worker", "manager"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "invalid-args", + args: []string{"foo"}, + expectedError: "unknown role foo", + }, + { + name: "swarm-inspect-failed", + args: []string{"worker"}, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-inspect-rotate-failed", + args: []string{"worker"}, + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-update-failed", + args: []string{"worker"}, + flags: map[string]string{ + flagRotate: "true", + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + return errors.Errorf("error updating the swarm") + }, + expectedError: "error updating the swarm", + }, + { + name: "node-inspect-failed", + args: []string{"worker"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") + }, + expectedError: "error inspecting node", + }, + { + name: "info-failed", + args: []string{"worker"}, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newJoinTokenCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmJoinToken(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + infoFunc func() (types.Info, error) + swarmInspectFunc func() (swarm.Swarm, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + }{ + { + name: "worker", + args: []string{"worker"}, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID", + }, + }, nil + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "manager", + args: []string{"manager"}, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID", + }, + }, nil + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "manager-rotate", + args: []string{"manager"}, + flags: map[string]string{ + flagRotate: "true", + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID", + }, + }, nil + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "worker-quiet", + args: []string{"worker"}, + flags: map[string]string{ + flagQuiet: "true", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "manager-quiet", + args: []string{"manager"}, + flags: map[string]string{ + flagQuiet: "true", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newJoinTokenCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("jointoken-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/swarm/leave.go b/cli/cli/command/swarm/leave.go new file mode 100644 index 00000000..af6e0753 --- /dev/null +++ b/cli/cli/command/swarm/leave.go @@ -0,0 +1,43 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type leaveOptions struct { + force bool +} + +func newLeaveCommand(dockerCli command.Cli) *cobra.Command { + opts := leaveOptions{} + + cmd := &cobra.Command{ + Use: "leave [OPTIONS]", + Short: "Leave the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLeave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") + return cmd +} + +func runLeave(dockerCli command.Cli, opts leaveOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if err := client.SwarmLeave(ctx, opts.force); err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") + return nil +} diff --git a/cli/cli/command/swarm/leave_test.go b/cli/cli/command/swarm/leave_test.go new file mode 100644 index 00000000..91ee6e24 --- /dev/null +++ b/cli/cli/command/swarm/leave_test.go @@ -0,0 +1,50 @@ +package swarm + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSwarmLeaveErrors(t *testing.T) { + testCases := []struct { + name string + args []string + swarmLeaveFunc func() error + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "leave-failed", + swarmLeaveFunc: func() error { + return errors.Errorf("error leaving the swarm") + }, + expectedError: "error leaving the swarm", + }, + } + for _, tc := range testCases { + cmd := newLeaveCommand( + test.NewFakeCli(&fakeClient{ + swarmLeaveFunc: tc.swarmLeaveFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmLeave(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newLeaveCommand(cli) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("Node left the swarm.", strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/swarm/opts.go b/cli/cli/command/swarm/opts.go new file mode 100644 index 00000000..b2a4de1f --- /dev/null +++ b/cli/cli/command/swarm/opts.go @@ -0,0 +1,273 @@ +package swarm + +import ( + "encoding/csv" + "encoding/pem" + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +const ( + defaultListenAddr = "0.0.0.0:2377" + + flagCertExpiry = "cert-expiry" + flagDispatcherHeartbeat = "dispatcher-heartbeat" + flagListenAddr = "listen-addr" + flagAdvertiseAddr = "advertise-addr" + flagDataPathAddr = "data-path-addr" + flagQuiet = "quiet" + flagRotate = "rotate" + flagToken = "token" + flagTaskHistoryLimit = "task-history-limit" + flagExternalCA = "external-ca" + flagMaxSnapshots = "max-snapshots" + flagSnapshotInterval = "snapshot-interval" + flagAutolock = "autolock" + flagAvailability = "availability" + flagCACert = "ca-cert" + flagCAKey = "ca-key" +) + +type swarmOptions struct { + swarmCAOptions + taskHistoryLimit int64 + dispatcherHeartbeat time.Duration + maxSnapshots uint64 + snapshotInterval uint64 + autolock bool +} + +// NodeAddrOption is a pflag.Value for listening addresses +type NodeAddrOption struct { + addr string +} + +// String prints the representation of this flag +func (a *NodeAddrOption) String() string { + return a.Value() +} + +// Set the value for this flag +func (a *NodeAddrOption) Set(value string) error { + addr, err := opts.ParseTCPAddr(value, a.addr) + if err != nil { + return err + } + a.addr = addr + return nil +} + +// Type returns the type of this flag +func (a *NodeAddrOption) Type() string { + return "node-addr" +} + +// Value returns the value of this option as addr:port +func (a *NodeAddrOption) Value() string { + return strings.TrimPrefix(a.addr, "tcp://") +} + +// NewNodeAddrOption returns a new node address option +func NewNodeAddrOption(addr string) NodeAddrOption { + return NodeAddrOption{addr} +} + +// NewListenAddrOption returns a NodeAddrOption with default values +func NewListenAddrOption() NodeAddrOption { + return NewNodeAddrOption(defaultListenAddr) +} + +// ExternalCAOption is a Value type for parsing external CA specifications. +type ExternalCAOption struct { + values []*swarm.ExternalCA +} + +// Set parses an external CA option. +func (m *ExternalCAOption) Set(value string) error { + parsed, err := parseExternalCA(value) + if err != nil { + return err + } + + m.values = append(m.values, parsed) + return nil +} + +// Type returns the type of this option. +func (m *ExternalCAOption) Type() string { + return "external-ca" +} + +// String returns a string repr of this option. +func (m *ExternalCAOption) String() string { + externalCAs := []string{} + for _, externalCA := range m.values { + repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) + externalCAs = append(externalCAs, repr) + } + return strings.Join(externalCAs, ", ") +} + +// Value returns the external CAs +func (m *ExternalCAOption) Value() []*swarm.ExternalCA { + return m.values +} + +// PEMFile represents the path to a pem-formatted file +type PEMFile struct { + path, contents string +} + +// Type returns the type of this option. +func (p *PEMFile) Type() string { + return "pem-file" +} + +// String returns the path to the pem file +func (p *PEMFile) String() string { + return p.path +} + +// Set parses a root rotation option +func (p *PEMFile) Set(value string) error { + contents, err := ioutil.ReadFile(value) + if err != nil { + return err + } + if pemBlock, _ := pem.Decode(contents); pemBlock == nil { + return errors.New("file contents must be in PEM format") + } + p.contents, p.path = string(contents), value + return nil +} + +// Contents returns the contents of the PEM file +func (p *PEMFile) Contents() string { + return p.contents +} + +// parseExternalCA parses an external CA specification from the command line, +// such as protocol=cfssl,url=https://example.com. +func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { + csvReader := csv.NewReader(strings.NewReader(caSpec)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + + externalCA := swarm.ExternalCA{ + Options: make(map[string]string), + } + + var ( + hasProtocol bool + hasURL bool + ) + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + + if len(parts) != 2 { + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key, value := parts[0], parts[1] + + switch strings.ToLower(key) { + case "protocol": + hasProtocol = true + if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { + externalCA.Protocol = swarm.ExternalCAProtocolCFSSL + } else { + return nil, errors.Errorf("unrecognized external CA protocol %s", value) + } + case "url": + hasURL = true + externalCA.URL = value + case "cacert": + cacontents, err := ioutil.ReadFile(value) + if err != nil { + return nil, errors.Wrap(err, "unable to read CA cert for external CA") + } + if pemBlock, _ := pem.Decode(cacontents); pemBlock == nil { + return nil, errors.New("CA cert for external CA must be in PEM format") + } + externalCA.CACert = string(cacontents) + default: + externalCA.Options[key] = value + } + } + + if !hasProtocol { + return nil, errors.New("the external-ca option needs a protocol= parameter") + } + if !hasURL { + return nil, errors.New("the external-ca option needs a url= parameter") + } + + return &externalCA, nil +} + +func addSwarmCAFlags(flags *pflag.FlagSet, opts *swarmCAOptions) { + flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, 90*24*time.Hour, "Validity period for node certificates (ns|us|ms|s|m|h)") + flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") +} + +func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { + flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") + flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, 5*time.Second, "Dispatcher heartbeat period (ns|us|ms|s|m|h)") + flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") + flags.SetAnnotation(flagMaxSnapshots, "version", []string{"1.25"}) + flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") + flags.SetAnnotation(flagSnapshotInterval, "version", []string{"1.25"}) + addSwarmCAFlags(flags, &opts.swarmCAOptions) +} + +func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) { + if flags.Changed(flagTaskHistoryLimit) { + spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit + } + if flags.Changed(flagDispatcherHeartbeat) { + spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat + } + if flags.Changed(flagMaxSnapshots) { + spec.Raft.KeepOldSnapshots = &opts.maxSnapshots + } + if flags.Changed(flagSnapshotInterval) { + spec.Raft.SnapshotInterval = opts.snapshotInterval + } + if flags.Changed(flagAutolock) { + spec.EncryptionConfig.AutoLockManagers = opts.autolock + } + opts.mergeSwarmSpecCAFlags(spec, flags, caCert) +} + +type swarmCAOptions struct { + nodeCertExpiry time.Duration + externalCA ExternalCAOption +} + +func (opts *swarmCAOptions) mergeSwarmSpecCAFlags(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) { + if flags.Changed(flagCertExpiry) { + spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry + } + if flags.Changed(flagExternalCA) { + spec.CAConfig.ExternalCAs = opts.externalCA.Value() + for _, ca := range spec.CAConfig.ExternalCAs { + ca.CACert = caCert + } + } +} + +func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { + var spec swarm.Spec + opts.mergeSwarmSpec(&spec, flags, "") + return spec +} diff --git a/cli/cli/command/swarm/opts_test.go b/cli/cli/command/swarm/opts_test.go new file mode 100644 index 00000000..6382d2a0 --- /dev/null +++ b/cli/cli/command/swarm/opts_test.go @@ -0,0 +1,111 @@ +package swarm + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNodeAddrOptionSetHostAndPort(t *testing.T) { + opt := NewNodeAddrOption("old:123") + addr := "newhost:5555" + assert.NilError(t, opt.Set(addr)) + assert.Check(t, is.Equal(addr, opt.Value())) +} + +func TestNodeAddrOptionSetHostOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("newhost")) + assert.Check(t, is.Equal("newhost:2377", opt.Value())) +} + +func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("::1")) + assert.Check(t, is.Equal("[::1]:2377", opt.Value())) +} + +func TestNodeAddrOptionSetPortOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set(":4545")) + assert.Check(t, is.Equal("0.0.0.0:4545", opt.Value())) +} + +func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { + opt := NewListenAddrOption() + assert.Error(t, opt.Set("http://localhost:4545"), "Invalid proto, expected tcp: http://localhost:4545") +} + +func TestExternalCAOptionErrors(t *testing.T) { + testCases := []struct { + externalCA string + expectedError string + }{ + { + externalCA: "", + expectedError: "EOF", + }, + { + externalCA: "anything", + expectedError: "invalid field 'anything' must be a key=value pair", + }, + { + externalCA: "foo=bar", + expectedError: "the external-ca option needs a protocol= parameter", + }, + { + externalCA: "protocol=baz", + expectedError: "unrecognized external CA protocol baz", + }, + { + externalCA: "protocol=cfssl", + expectedError: "the external-ca option needs a url= parameter", + }, + } + for _, tc := range testCases { + opt := &ExternalCAOption{} + assert.Error(t, opt.Set(tc.externalCA), tc.expectedError) + } +} + +func TestExternalCAOption(t *testing.T) { + testCases := []struct { + externalCA string + expected string + }{ + { + externalCA: "protocol=cfssl,url=anything", + expected: "cfssl: anything", + }, + { + externalCA: "protocol=CFSSL,url=anything", + expected: "cfssl: anything", + }, + { + externalCA: "protocol=Cfssl,url=https://example.com", + expected: "cfssl: https://example.com", + }, + { + externalCA: "protocol=Cfssl,url=https://example.com,foo=bar", + expected: "cfssl: https://example.com", + }, + { + externalCA: "protocol=Cfssl,url=https://example.com,foo=bar,foo=baz", + expected: "cfssl: https://example.com", + }, + } + for _, tc := range testCases { + opt := &ExternalCAOption{} + assert.NilError(t, opt.Set(tc.externalCA)) + assert.Check(t, is.Equal(tc.expected, opt.String())) + } +} + +func TestExternalCAOptionMultiple(t *testing.T) { + opt := &ExternalCAOption{} + assert.NilError(t, opt.Set("protocol=cfssl,url=https://example.com")) + assert.NilError(t, opt.Set("protocol=CFSSL,url=anything")) + assert.Check(t, is.Len(opt.Value(), 2)) + assert.Check(t, is.Equal("cfssl: https://example.com, cfssl: anything", opt.String())) +} diff --git a/cli/cli/command/swarm/progress/root_rotation.go b/cli/cli/command/swarm/progress/root_rotation.go new file mode 100644 index 00000000..e72de1d2 --- /dev/null +++ b/cli/cli/command/swarm/progress/root_rotation.go @@ -0,0 +1,120 @@ +package progress + +import ( + "bytes" + "context" + "io" + "os" + "os/signal" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/opencontainers/go-digest" +) + +const ( + certsRotatedStr = " rotated TLS certificates" + rootsRotatedStr = " rotated CA certificates" + // rootsAction has a single space because rootsRotatedStr is one character shorter than certsRotatedStr. + // This makes sure the progress bar are aligned. + certsAction = "" + rootsAction = " " +) + +// RootRotationProgress outputs progress information for convergence of a root rotation. +func RootRotationProgress(ctx context.Context, dclient client.APIClient, progressWriter io.WriteCloser) error { + defer progressWriter.Close() + + progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false) + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, os.Interrupt) + defer signal.Stop(sigint) + + // draw 2 progress bars, 1 for nodes with the correct cert, 1 for nodes with the correct trust root + progress.Update(progressOut, "desired root digest", "") + progress.Update(progressOut, certsRotatedStr, certsAction) + progress.Update(progressOut, rootsRotatedStr, rootsAction) + + var done bool + + for { + info, err := dclient.SwarmInspect(ctx) + if err != nil { + return err + } + + if done { + return nil + } + + nodes, err := dclient.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + done = updateProgress(progressOut, info.ClusterInfo.TLSInfo, nodes, info.ClusterInfo.RootRotationInProgress) + + select { + case <-time.After(200 * time.Millisecond): + case <-sigint: + if !done { + progress.Message(progressOut, "", "Operation continuing in background.") + progress.Message(progressOut, "", "Use `swarmctl cluster inspect default` to check progress.") + } + return nil + } + } +} + +func updateProgress(progressOut progress.Output, desiredTLSInfo swarm.TLSInfo, nodes []swarm.Node, rootRotationInProgress bool) bool { + // write the current desired root cert's digest, because the desired root certs might be too long + progressOut.WriteProgress(progress.Progress{ + ID: "desired root digest", + Action: digest.FromBytes([]byte(desiredTLSInfo.TrustRoot)).String(), + }) + + // If we had reached a converged state, check if we are still converged. + var certsRight, trustRootsRight int64 + for _, n := range nodes { + if bytes.Equal(n.Description.TLSInfo.CertIssuerPublicKey, desiredTLSInfo.CertIssuerPublicKey) && + bytes.Equal(n.Description.TLSInfo.CertIssuerSubject, desiredTLSInfo.CertIssuerSubject) { + certsRight++ + } + + if n.Description.TLSInfo.TrustRoot == desiredTLSInfo.TrustRoot { + trustRootsRight++ + } + } + + total := int64(len(nodes)) + progressOut.WriteProgress(progress.Progress{ + ID: certsRotatedStr, + Action: certsAction, + Current: certsRight, + Total: total, + Units: "nodes", + }) + + rootsProgress := progress.Progress{ + ID: rootsRotatedStr, + Action: rootsAction, + Current: trustRootsRight, + Total: total, + Units: "nodes", + } + + if certsRight == total && !rootRotationInProgress { + progressOut.WriteProgress(rootsProgress) + return certsRight == total && trustRootsRight == total + } + + // we still have certs that need renewing, so display that there are zero roots rotated yet + rootsProgress.Current = 0 + progressOut.WriteProgress(rootsProgress) + return false +} diff --git a/cli/cli/command/swarm/testdata/init-init-autolock.golden b/cli/cli/command/swarm/testdata/init-init-autolock.golden new file mode 100644 index 00000000..cdd3c666 --- /dev/null +++ b/cli/cli/command/swarm/testdata/init-init-autolock.golden @@ -0,0 +1,11 @@ +Swarm initialized: current node (nodeID) is now a manager. + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/init-init.golden b/cli/cli/command/swarm/testdata/init-init.golden new file mode 100644 index 00000000..6e82be01 --- /dev/null +++ b/cli/cli/command/swarm/testdata/init-init.golden @@ -0,0 +1,4 @@ +Swarm initialized: current node (nodeID) is now a manager. + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. + diff --git a/cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden b/cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden new file mode 100644 index 00000000..0c7cfc60 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden @@ -0,0 +1 @@ +manager-join-token diff --git a/cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden b/cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden new file mode 100644 index 00000000..4a978e76 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden @@ -0,0 +1,6 @@ +Successfully rotated manager join token. + +To add a manager to this swarm, run the following command: + + docker swarm join --token manager-join-token 127.0.0.1 + diff --git a/cli/cli/command/swarm/testdata/jointoken-manager.golden b/cli/cli/command/swarm/testdata/jointoken-manager.golden new file mode 100644 index 00000000..7bcb7337 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-manager.golden @@ -0,0 +1,4 @@ +To add a manager to this swarm, run the following command: + + docker swarm join --token manager-join-token 127.0.0.1 + diff --git a/cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden b/cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden new file mode 100644 index 00000000..b445e191 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden @@ -0,0 +1 @@ +worker-join-token diff --git a/cli/cli/command/swarm/testdata/jointoken-worker.golden b/cli/cli/command/swarm/testdata/jointoken-worker.golden new file mode 100644 index 00000000..e6c3ab9a --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-worker.golden @@ -0,0 +1,4 @@ +To add a worker to this swarm, run the following command: + + docker swarm join --token worker-join-token 127.0.0.1 + diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden new file mode 100644 index 00000000..ed53505e --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden @@ -0,0 +1 @@ +unlock-key diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden new file mode 100644 index 00000000..ed53505e --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden @@ -0,0 +1 @@ +unlock-key diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden new file mode 100644 index 00000000..89152b86 --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden @@ -0,0 +1,9 @@ +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden new file mode 100644 index 00000000..8316df47 --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden @@ -0,0 +1,7 @@ +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/update-all-flags-quiet.golden b/cli/cli/command/swarm/testdata/update-all-flags-quiet.golden new file mode 100644 index 00000000..3d195a25 --- /dev/null +++ b/cli/cli/command/swarm/testdata/update-all-flags-quiet.golden @@ -0,0 +1 @@ +Swarm updated. diff --git a/cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden b/cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden new file mode 100644 index 00000000..a077b9e1 --- /dev/null +++ b/cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden @@ -0,0 +1,8 @@ +Swarm updated. +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/update-noargs.golden b/cli/cli/command/swarm/testdata/update-noargs.golden new file mode 100644 index 00000000..a2ce7589 --- /dev/null +++ b/cli/cli/command/swarm/testdata/update-noargs.golden @@ -0,0 +1,14 @@ +Update the swarm + +Usage: + update [OPTIONS] [flags] + +Flags: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + -h, --help help for update + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) diff --git a/cli/cli/command/swarm/unlock.go b/cli/cli/command/swarm/unlock.go new file mode 100644 index 00000000..7d0dce68 --- /dev/null +++ b/cli/cli/command/swarm/unlock.go @@ -0,0 +1,74 @@ +package swarm + +import ( + "bufio" + "context" + "fmt" + "io" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" +) + +func newUnlockCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "unlock", + Short: "Unlock swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUnlock(dockerCli) + }, + } + + return cmd +} + +func runUnlock(dockerCli command.Cli) error { + client := dockerCli.Client() + ctx := context.Background() + + // First see if the node is actually part of a swarm, and if it is actually locked first. + // If it's in any other state than locked, don't ask for the key. + info, err := client.Info(ctx) + if err != nil { + return err + } + + switch info.Swarm.LocalNodeState { + case swarm.LocalNodeStateInactive: + return errors.New("Error: This node is not part of a swarm") + case swarm.LocalNodeStateLocked: + break + default: + return errors.New("Error: swarm is not locked") + } + + key, err := readKey(dockerCli.In(), "Please enter unlock key: ") + if err != nil { + return err + } + req := swarm.UnlockRequest{ + UnlockKey: key, + } + + return client.SwarmUnlock(ctx, req) +} + +func readKey(in *command.InStream, prompt string) (string, error) { + if in.IsTerminal() { + fmt.Print(prompt) + dt, err := terminal.ReadPassword(int(in.FD())) + fmt.Println() + return string(dt), err + } + key, err := bufio.NewReader(in).ReadString('\n') + if err == io.EOF { + err = nil + } + return strings.TrimSpace(key), err +} diff --git a/cli/cli/command/swarm/unlock_key.go b/cli/cli/command/swarm/unlock_key.go new file mode 100644 index 00000000..be5d9ea2 --- /dev/null +++ b/cli/cli/command/swarm/unlock_key.go @@ -0,0 +1,89 @@ +package swarm + +import ( + "context" + "fmt" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type unlockKeyOptions struct { + rotate bool + quiet bool +} + +func newUnlockKeyCommand(dockerCli command.Cli) *cobra.Command { + opts := unlockKeyOptions{} + + cmd := &cobra.Command{ + Use: "unlock-key [OPTIONS]", + Short: "Manage the unlock key", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUnlockKey(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate unlock key") + flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func runUnlockKey(dockerCli command.Cli, opts unlockKeyOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.rotate { + flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} + + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !sw.Spec.EncryptionConfig.AutoLockManagers { + return errors.New("cannot rotate because autolock is not turned on") + } + + if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { + return err + } + + if !opts.quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") + } + } + + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + + if unlockKeyResp.UnlockKey == "" { + return errors.New("no unlock key is set") + } + + if opts.quiet { + fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) + return nil + } + + printUnlockCommand(dockerCli.Out(), unlockKeyResp.UnlockKey) + return nil +} + +func printUnlockCommand(out io.Writer, unlockKey string) { + if len(unlockKey) > 0 { + fmt.Fprintf(out, "To unlock a swarm manager after it restarts, "+ + "run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\n"+ + "Please remember to store this key in a password manager, since without it you\n"+ + "will not be able to restart the manager.\n", unlockKey) + } +} diff --git a/cli/cli/command/swarm/unlock_key_test.go b/cli/cli/command/swarm/unlock_key_test.go new file mode 100644 index 00000000..d28921a1 --- /dev/null +++ b/cli/cli/command/swarm/unlock_key_test.go @@ -0,0 +1,171 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmUnlockKeyErrors(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "swarm-inspect-rotate-failed", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-rotate-no-autolock-failed", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + expectedError: "cannot rotate because autolock is not turned on", + }, + { + name: "swarm-update-failed", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(Autolock()), nil + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + return errors.Errorf("error updating the swarm") + }, + expectedError: "error updating the swarm", + }, + { + name: "swarm-get-unlock-key-failed", + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting unlock key") + }, + expectedError: "error getting unlock key", + }, + { + name: "swarm-no-unlock-key-failed", + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "", + }, nil + }, + expectedError: "no unlock key is set", + }, + } + for _, tc := range testCases { + cmd := newUnlockKeyCommand( + test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmUnlockKey(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + }{ + { + name: "unlock-key", + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + { + name: "unlock-key-quiet", + flags: map[string]string{ + flagQuiet: "true", + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + { + name: "unlock-key-rotate", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(Autolock()), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + { + name: "unlock-key-rotate-quiet", + flags: map[string]string{ + flagQuiet: "true", + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(Autolock()), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + }) + cmd := newUnlockKeyCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("unlockkeys-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/swarm/unlock_test.go b/cli/cli/command/swarm/unlock_test.go new file mode 100644 index 00000000..8eb2ecd4 --- /dev/null +++ b/cli/cli/command/swarm/unlock_test.go @@ -0,0 +1,98 @@ +package swarm + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestSwarmUnlockErrors(t *testing.T) { + testCases := []struct { + name string + args []string + swarmUnlockFunc func(req swarm.UnlockRequest) error + infoFunc func() (types.Info, error) + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "is-not-part-of-a-swarm", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateInactive, + }, + }, nil + }, + expectedError: "This node is not part of a swarm", + }, + { + name: "is-not-locked", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateActive, + }, + }, nil + }, + expectedError: "Error: swarm is not locked", + }, + { + name: "unlockrequest-failed", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateLocked, + }, + }, nil + }, + swarmUnlockFunc: func(req swarm.UnlockRequest) error { + return errors.Errorf("error unlocking the swarm") + }, + expectedError: "error unlocking the swarm", + }, + } + for _, tc := range testCases { + cmd := newUnlockCommand( + test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + swarmUnlockFunc: tc.swarmUnlockFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmUnlock(t *testing.T) { + input := "unlockKey" + dockerCli := test.NewFakeCli(&fakeClient{ + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateLocked, + }, + }, nil + }, + swarmUnlockFunc: func(req swarm.UnlockRequest) error { + if req.UnlockKey != input { + return errors.Errorf("Invalid unlock key") + } + return nil + }, + }) + dockerCli.SetIn(command.NewInStream(ioutil.NopCloser(strings.NewReader(input)))) + cmd := newUnlockCommand(dockerCli) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/swarm/update.go b/cli/cli/command/swarm/update.go new file mode 100644 index 00000000..6b9ad728 --- /dev/null +++ b/cli/cli/command/swarm/update.go @@ -0,0 +1,71 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + opts := swarmOptions{} + + cmd := &cobra.Command{ + Use: "update [OPTIONS]", + Short: "Update the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), opts) + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Flags().NFlag() == 0 { + return pflag.ErrHelp + } + return nil + }, + } + + cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") + addSwarmFlags(cmd.Flags(), &opts) + return cmd +} + +func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, opts swarmOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var updateFlags swarm.UpdateFlags + + swarmInspect, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + prevAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers + + opts.mergeSwarmSpec(&swarmInspect.Spec, flags, swarmInspect.ClusterInfo.TLSInfo.TrustRoot) + + curAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers + + err = client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, updateFlags) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Swarm updated.") + + if curAutoLock && !prevAutoLock { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(dockerCli.Out(), unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/cli/cli/command/swarm/update_test.go b/cli/cli/command/swarm/update_test.go new file mode 100644 index 00000000..20a5624a --- /dev/null +++ b/cli/cli/command/swarm/update_test.go @@ -0,0 +1,185 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmUpdateErrors(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "swarm-inspect-error", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-update-error", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + return errors.Errorf("error updating the swarm") + }, + expectedError: "error updating the swarm", + }, + { + name: "swarm-unlockkey-error", + flags: map[string]string{ + flagAutolock: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting unlock key") + }, + expectedError: "error getting unlock key", + }, + } + for _, tc := range testCases { + cmd := newUpdateCommand( + test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmUpdate(t *testing.T) { + swarmInfo := Swarm() + swarmInfo.ClusterInfo.TLSInfo.TrustRoot = "trustroot" + + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + }{ + { + name: "noargs", + }, + { + name: "all-flags-quiet", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + flagDispatcherHeartbeat: "10s", + flagCertExpiry: "20s", + flagExternalCA: "protocol=cfssl,url=https://example.com.", + flagMaxSnapshots: "10", + flagSnapshotInterval: "100", + flagAutolock: "true", + flagQuiet: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *swarmInfo, nil + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 { + return errors.Errorf("historyLimit not correctly set") + } + heartbeatDuration, err := time.ParseDuration("10s") + if err != nil { + return err + } + if swarm.Dispatcher.HeartbeatPeriod != heartbeatDuration { + return errors.Errorf("heartbeatPeriodLimit not correctly set") + } + certExpiryDuration, err := time.ParseDuration("20s") + if err != nil { + return err + } + if swarm.CAConfig.NodeCertExpiry != certExpiryDuration { + return errors.Errorf("certExpiry not correctly set") + } + if len(swarm.CAConfig.ExternalCAs) != 1 || swarm.CAConfig.ExternalCAs[0].CACert != "trustroot" { + return errors.Errorf("externalCA not correctly set") + } + if *swarm.Raft.KeepOldSnapshots != 10 { + return errors.Errorf("keepOldSnapshots not correctly set") + } + if swarm.Raft.SnapshotInterval != 100 { + return errors.Errorf("snapshotInterval not correctly set") + } + if !swarm.EncryptionConfig.AutoLockManagers { + return errors.Errorf("autolock not correctly set") + } + return nil + }, + }, + { + name: "autolock-unlock-key", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + flagAutolock: "true", + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 { + return errors.Errorf("historyLimit not correctly set") + } + return nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + }) + cmd := newUpdateCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("update-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/system/client_test.go b/cli/cli/command/system/client_test.go new file mode 100644 index 00000000..20d8dc38 --- /dev/null +++ b/cli/cli/command/system/client_test.go @@ -0,0 +1,23 @@ +package system + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + + version string + serverVersion func(ctx context.Context) (types.Version, error) +} + +func (cli *fakeClient) ServerVersion(ctx context.Context) (types.Version, error) { + return cli.serverVersion(ctx) +} + +func (cli *fakeClient) ClientVersion() string { + return cli.version +} diff --git a/cli/cli/command/system/cmd.go b/cli/cli/command/system/cmd.go new file mode 100644 index 00000000..7b9d6819 --- /dev/null +++ b/cli/cli/command/system/cmd.go @@ -0,0 +1,25 @@ +package system + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewSystemCommand returns a cobra command for `system` subcommands +func NewSystemCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "Manage Docker", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewEventsCommand(dockerCli), + NewInfoCommand(dockerCli), + newDiskUsageCommand(dockerCli), + newPruneCommand(dockerCli), + ) + + return cmd +} diff --git a/cli/cli/command/system/df.go b/cli/cli/command/system/df.go new file mode 100644 index 00000000..43a2d74c --- /dev/null +++ b/cli/cli/command/system/df.go @@ -0,0 +1,70 @@ +package system + +import ( + "context" + "errors" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type diskUsageOptions struct { + verbose bool + format string +} + +// newDiskUsageCommand creates a new cobra.Command for `docker df` +func newDiskUsageCommand(dockerCli command.Cli) *cobra.Command { + var opts diskUsageOptions + + cmd := &cobra.Command{ + Use: "df [OPTIONS]", + Short: "Show docker disk usage", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiskUsage(dockerCli, opts) + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error { + if opts.verbose && len(opts.format) != 0 { + return errors.New("the verbose and the format options conflict") + } + + du, err := dockerCli.Client().DiskUsage(context.Background()) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + duCtx := formatter.DiskUsageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewDiskUsageFormat(format), + }, + LayersSize: du.LayersSize, + BuilderSize: du.BuilderSize, + BuildCache: du.BuildCache, + Images: du.Images, + Containers: du.Containers, + Volumes: du.Volumes, + Verbose: opts.verbose, + } + + return duCtx.Write() +} diff --git a/cli/cli/command/system/events.go b/cli/cli/command/system/events.go new file mode 100644 index 00000000..37de9722 --- /dev/null +++ b/cli/cli/command/system/events.go @@ -0,0 +1,142 @@ +package system + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "text/template" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/spf13/cobra" +) + +type eventsOptions struct { + since string + until string + filter opts.FilterOpt + format string +} + +// NewEventsCommand creates a new cobra.Command for `docker events` +func NewEventsCommand(dockerCli command.Cli) *cobra.Command { + options := eventsOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "events [OPTIONS]", + Short: "Get real time events from the server", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runEvents(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.since, "since", "", "Show all events created since timestamp") + flags.StringVar(&options.until, "until", "", "Stream events until this timestamp") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&options.format, "format", "", "Format the output using the given Go template") + + return cmd +} + +func runEvents(dockerCli command.Cli, options *eventsOptions) error { + tmpl, err := makeTemplate(options.format) + if err != nil { + return cli.StatusError{ + StatusCode: 64, + Status: "Error parsing format: " + err.Error()} + } + eventOptions := types.EventsOptions{ + Since: options.since, + Until: options.until, + Filters: options.filter.Value(), + } + + ctx, cancel := context.WithCancel(context.Background()) + events, errs := dockerCli.Client().Events(ctx, eventOptions) + defer cancel() + + out := dockerCli.Out() + + for { + select { + case event := <-events: + if err := handleEvent(out, event, tmpl); err != nil { + return err + } + case err := <-errs: + if err == io.EOF { + return nil + } + return err + } + } +} + +func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + if tmpl == nil { + return prettyPrintEvent(out, event) + } + + return formatEvent(out, event, tmpl) +} + +func makeTemplate(format string) (*template.Template, error) { + if format == "" { + return nil, nil + } + tmpl, err := templates.Parse(format) + if err != nil { + return tmpl, err + } + // we execute the template for an empty message, so as to validate + // a bad template like "{{.badFieldString}}" + return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) +} + +// rfc3339NanoFixed is similar to time.RFC3339Nano, except it pads nanoseconds +// zeros to maintain a fixed number of characters +const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// prettyPrintEvent prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { + if event.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(rfc3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(rfc3339NanoFixed)) + } + + fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(out, "\n") + return nil +} + +func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + defer out.Write([]byte{'\n'}) + return tmpl.Execute(out, event) +} diff --git a/cli/cli/command/system/info.go b/cli/cli/command/system/info.go new file mode 100644 index 00000000..73aeda63 --- /dev/null +++ b/cli/cli/command/system/info.go @@ -0,0 +1,360 @@ +package system + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/debug" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type infoOptions struct { + format string +} + +// NewInfoCommand creates a new cobra.Command for `docker info` +func NewInfoCommand(dockerCli command.Cli) *cobra.Command { + var opts infoOptions + + cmd := &cobra.Command{ + Use: "info [OPTIONS]", + Short: "Display system-wide information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInfo(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInfo(dockerCli command.Cli, opts *infoOptions) error { + ctx := context.Background() + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if opts.format == "" { + return prettyPrintInfo(dockerCli, info) + } + return formatInfo(dockerCli, info, opts.format) +} + +// nolint: gocyclo +func prettyPrintInfo(dockerCli command.Cli, info types.Info) error { + fmt.Fprintln(dockerCli.Out(), "Containers:", info.Containers) + fmt.Fprintln(dockerCli.Out(), " Running:", info.ContainersRunning) + fmt.Fprintln(dockerCli.Out(), " Paused:", info.ContainersPaused) + fmt.Fprintln(dockerCli.Out(), " Stopped:", info.ContainersStopped) + fmt.Fprintln(dockerCli.Out(), "Images:", info.Images) + fprintlnNonEmpty(dockerCli.Out(), "Server Version:", info.ServerVersion) + fprintlnNonEmpty(dockerCli.Out(), "Storage Driver:", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + } + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) + } + } + fprintlnNonEmpty(dockerCli.Out(), "Logging Driver:", info.LoggingDriver) + fprintlnNonEmpty(dockerCli.Out(), "Cgroup Driver:", info.CgroupDriver) + + fmt.Fprintln(dockerCli.Out(), "Plugins:") + fmt.Fprintln(dockerCli.Out(), " Volume:", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintln(dockerCli.Out(), " Network:", strings.Join(info.Plugins.Network, " ")) + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintln(dockerCli.Out(), " Authorization:", strings.Join(info.Plugins.Authorization, " ")) + } + + fmt.Fprintln(dockerCli.Out(), " Log:", strings.Join(info.Plugins.Log, " ")) + + fmt.Fprintln(dockerCli.Out(), "Swarm:", info.Swarm.LocalNodeState) + printSwarmInfo(dockerCli, info) + + if len(info.Runtimes) > 0 { + fmt.Fprint(dockerCli.Out(), "Runtimes:") + for name := range info.Runtimes { + fmt.Fprintf(dockerCli.Out(), " %s", name) + } + fmt.Fprint(dockerCli.Out(), "\n") + fmt.Fprintln(dockerCli.Out(), "Default Runtime:", info.DefaultRuntime) + } + + if info.OSType == "linux" { + fmt.Fprintln(dockerCli.Out(), "Init Binary:", info.InitBinary) + + for _, ci := range []struct { + Name string + Commit types.Commit + }{ + {"containerd", info.ContainerdCommit}, + {"runc", info.RuncCommit}, + {"init", info.InitCommit}, + } { + fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) + if ci.Commit.ID != ci.Commit.Expected { + fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) + } + fmt.Fprint(dockerCli.Out(), "\n") + } + if len(info.SecurityOptions) != 0 { + kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) + if err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), "Security Options:") + for _, so := range kvs { + fmt.Fprintln(dockerCli.Out(), " "+so.Name) + for _, o := range so.Options { + switch o.Key { + case "profile": + if o.Value != "default" { + fmt.Fprintln(dockerCli.Err(), " WARNING: You're not using the default seccomp profile") + } + fmt.Fprintln(dockerCli.Out(), " Profile:", o.Value) + } + } + } + } + } + + // Isolation only has meaning on a Windows daemon. + if info.OSType == "windows" { + fmt.Fprintln(dockerCli.Out(), "Default Isolation:", info.Isolation) + } + + fprintlnNonEmpty(dockerCli.Out(), "Kernel Version:", info.KernelVersion) + fprintlnNonEmpty(dockerCli.Out(), "Operating System:", info.OperatingSystem) + fprintlnNonEmpty(dockerCli.Out(), "OSType:", info.OSType) + fprintlnNonEmpty(dockerCli.Out(), "Architecture:", info.Architecture) + fmt.Fprintln(dockerCli.Out(), "CPUs:", info.NCPU) + fmt.Fprintln(dockerCli.Out(), "Total Memory:", units.BytesSize(float64(info.MemTotal))) + fprintlnNonEmpty(dockerCli.Out(), "Name:", info.Name) + fprintlnNonEmpty(dockerCli.Out(), "ID:", info.ID) + fmt.Fprintln(dockerCli.Out(), "Docker Root Dir:", info.DockerRootDir) + fmt.Fprintln(dockerCli.Out(), "Debug Mode (client):", debug.IsEnabled()) + fmt.Fprintln(dockerCli.Out(), "Debug Mode (server):", info.Debug) + + if info.Debug { + fmt.Fprintln(dockerCli.Out(), " File Descriptors:", info.NFd) + fmt.Fprintln(dockerCli.Out(), " Goroutines:", info.NGoroutines) + fmt.Fprintln(dockerCli.Out(), " System Time:", info.SystemTime) + fmt.Fprintln(dockerCli.Out(), " EventsListeners:", info.NEventsListener) + } + + fprintlnNonEmpty(dockerCli.Out(), "HTTP Proxy:", info.HTTPProxy) + fprintlnNonEmpty(dockerCli.Out(), "HTTPS Proxy:", info.HTTPSProxy) + fprintlnNonEmpty(dockerCli.Out(), "No Proxy:", info.NoProxy) + + if info.IndexServerAddress != "" { + u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintln(dockerCli.Out(), "Username:", u) + } + fmt.Fprintln(dockerCli.Out(), "Registry:", info.IndexServerAddress) + } + + if info.Labels != nil { + fmt.Fprintln(dockerCli.Out(), "Labels:") + for _, lbl := range info.Labels { + fmt.Fprintln(dockerCli.Out(), " "+lbl) + } + } + + fmt.Fprintln(dockerCli.Out(), "Experimental:", info.ExperimentalBuild) + fprintlnNonEmpty(dockerCli.Out(), "Cluster Store:", info.ClusterStore) + fprintlnNonEmpty(dockerCli.Out(), "Cluster Advertise:", info.ClusterAdvertise) + + if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { + fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") + for _, registry := range info.RegistryConfig.IndexConfigs { + if !registry.Secure { + fmt.Fprintln(dockerCli.Out(), " "+registry.Name) + } + } + + for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { + mask, _ := registry.Mask.Size() + fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) + } + } + + if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { + fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") + for _, mirror := range info.RegistryConfig.Mirrors { + fmt.Fprintln(dockerCli.Out(), " "+mirror) + } + } + + fmt.Fprintln(dockerCli.Out(), "Live Restore Enabled:", info.LiveRestoreEnabled) + fmt.Fprint(dockerCli.Out(), "\n") + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + printStorageDriverWarnings(dockerCli, info) + + if !info.MemoryLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + return nil +} + +func printSwarmInfo(dockerCli command.Cli, info types.Info) { + if info.Swarm.LocalNodeState == swarm.LocalNodeStateInactive || info.Swarm.LocalNodeState == swarm.LocalNodeStateLocked { + return + } + fmt.Fprintln(dockerCli.Out(), " NodeID:", info.Swarm.NodeID) + if info.Swarm.Error != "" { + fmt.Fprintln(dockerCli.Out(), " Error:", info.Swarm.Error) + } + fmt.Fprintln(dockerCli.Out(), " Is Manager:", info.Swarm.ControlAvailable) + if info.Swarm.Cluster != nil && info.Swarm.ControlAvailable && info.Swarm.Error == "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateError { + fmt.Fprintln(dockerCli.Out(), " ClusterID:", info.Swarm.Cluster.ID) + fmt.Fprintln(dockerCli.Out(), " Managers:", info.Swarm.Managers) + fmt.Fprintln(dockerCli.Out(), " Nodes:", info.Swarm.Nodes) + fmt.Fprintln(dockerCli.Out(), " Orchestration:") + taskHistoryRetentionLimit := int64(0) + if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { + taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + fmt.Fprintln(dockerCli.Out(), " Task History Retention Limit:", taskHistoryRetentionLimit) + fmt.Fprintln(dockerCli.Out(), " Raft:") + fmt.Fprintln(dockerCli.Out(), " Snapshot Interval:", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) + if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { + fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) + } + fmt.Fprintln(dockerCli.Out(), " Heartbeat Tick:", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) + fmt.Fprintln(dockerCli.Out(), " Election Tick:", info.Swarm.Cluster.Spec.Raft.ElectionTick) + fmt.Fprintln(dockerCli.Out(), " Dispatcher:") + fmt.Fprintln(dockerCli.Out(), " Heartbeat Period:", units.HumanDuration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod)) + fmt.Fprintln(dockerCli.Out(), " CA Configuration:") + fmt.Fprintln(dockerCli.Out(), " Expiry Duration:", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) + fmt.Fprintln(dockerCli.Out(), " Force Rotate:", info.Swarm.Cluster.Spec.CAConfig.ForceRotate) + if caCert := strings.TrimSpace(info.Swarm.Cluster.Spec.CAConfig.SigningCACert); caCert != "" { + fmt.Fprintf(dockerCli.Out(), " Signing CA Certificate: \n%s\n\n", caCert) + } + if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintln(dockerCli.Out(), " External CAs:") + for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) + } + } + fmt.Fprintln(dockerCli.Out(), " Autolock Managers:", info.Swarm.Cluster.Spec.EncryptionConfig.AutoLockManagers) + fmt.Fprintln(dockerCli.Out(), " Root Rotation In Progress:", info.Swarm.Cluster.RootRotationInProgress) + } + fmt.Fprintln(dockerCli.Out(), " Node Address:", info.Swarm.NodeAddr) + if len(info.Swarm.RemoteManagers) > 0 { + managers := []string{} + for _, entry := range info.Swarm.RemoteManagers { + managers = append(managers, entry.Addr) + } + sort.Strings(managers) + fmt.Fprintln(dockerCli.Out(), " Manager Addresses:") + for _, entry := range managers { + fmt.Fprintf(dockerCli.Out(), " %s\n", entry) + } + } +} + +func printStorageDriverWarnings(dockerCli command.Cli, info types.Info) { + if info.DriverStatus == nil { + return + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Data loop file" { + fmt.Fprintf(dockerCli.Err(), "WARNING: %s: usage of loopback devices is "+ + "strongly discouraged for production use.\n "+ + "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\n", info.Driver) + } + if pair[0] == "Supports d_type" && pair[1] == "false" { + backingFs := getBackingFs(info) + + msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", info.Driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" + } + msg += " Running without d_type support will not be supported in future releases." + fmt.Fprintln(dockerCli.Err(), msg) + } + } +} + +func getBackingFs(info types.Info) string { + if info.DriverStatus == nil { + return "" + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Backing Filesystem" { + return pair[1] + } + } + return "" +} + +func formatInfo(dockerCli command.Cli, info types.Info, format string) error { + tmpl, err := templates.Parse(format) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + err = tmpl.Execute(dockerCli.Out(), info) + dockerCli.Out().Write([]byte{'\n'}) + return err +} + +func fprintlnNonEmpty(w io.Writer, label, value string) { + if value != "" { + fmt.Fprintln(w, label, value) + } +} diff --git a/cli/cli/command/system/info_test.go b/cli/cli/command/system/info_test.go new file mode 100644 index 00000000..600e79ac --- /dev/null +++ b/cli/cli/command/system/info_test.go @@ -0,0 +1,238 @@ +package system + +import ( + "encoding/base64" + "net" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +// helper function that base64 decodes a string and ignores the error +func base64Decode(val string) []byte { + decoded, _ := base64.StdEncoding.DecodeString(val) + return decoded +} + +var sampleInfoNoSwarm = types.Info{ + ID: "EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX", + Containers: 0, + ContainersRunning: 0, + ContainersPaused: 0, + ContainersStopped: 0, + Images: 0, + Driver: "aufs", + DriverStatus: [][2]string{ + {"Root Dir", "/var/lib/docker/aufs"}, + {"Backing Filesystem", "extfs"}, + {"Dirs", "0"}, + {"Dirperm1 Supported", "true"}, + }, + SystemStatus: nil, + Plugins: types.PluginsInfo{ + Volume: []string{"local"}, + Network: []string{"bridge", "host", "macvlan", "null", "overlay"}, + Authorization: nil, + Log: []string{"awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"}, + }, + MemoryLimit: true, + SwapLimit: true, + KernelMemory: true, + CPUCfsPeriod: true, + CPUCfsQuota: true, + CPUShares: true, + CPUSet: true, + IPv4Forwarding: true, + BridgeNfIptables: true, + BridgeNfIP6tables: true, + Debug: true, + NFd: 33, + OomKillDisable: true, + NGoroutines: 135, + SystemTime: "2017-08-24T17:44:34.077811894Z", + LoggingDriver: "json-file", + CgroupDriver: "cgroupfs", + NEventsListener: 0, + KernelVersion: "4.4.0-87-generic", + OperatingSystem: "Ubuntu 16.04.3 LTS", + OSType: "linux", + Architecture: "x86_64", + IndexServerAddress: "https://index.docker.io/v1/", + RegistryConfig: ®istry.ServiceConfig{ + AllowNondistributableArtifactsCIDRs: nil, + AllowNondistributableArtifactsHostnames: nil, + InsecureRegistryCIDRs: []*registry.NetIPNet{ + { + IP: net.ParseIP("127.0.0.0"), + Mask: net.IPv4Mask(255, 0, 0, 0), + }, + }, + IndexConfigs: map[string]*registry.IndexInfo{ + "docker.io": { + Name: "docker.io", + Mirrors: nil, + Secure: true, + Official: true, + }, + }, + Mirrors: nil, + }, + NCPU: 2, + MemTotal: 2097356800, + DockerRootDir: "/var/lib/docker", + HTTPProxy: "", + HTTPSProxy: "", + NoProxy: "", + Name: "system-sample", + Labels: []string{"provider=digitalocean"}, + ExperimentalBuild: false, + ServerVersion: "17.06.1-ce", + ClusterStore: "", + ClusterAdvertise: "", + Runtimes: map[string]types.Runtime{ + "runc": { + Path: "docker-runc", + Args: nil, + }, + }, + DefaultRuntime: "runc", + Swarm: swarm.Info{LocalNodeState: "inactive"}, + LiveRestoreEnabled: false, + Isolation: "", + InitBinary: "docker-init", + ContainerdCommit: types.Commit{ + ID: "6e23458c129b551d5c9871e5174f6b1b7f6d1170", + Expected: "6e23458c129b551d5c9871e5174f6b1b7f6d1170", + }, + RuncCommit: types.Commit{ + ID: "810190ceaa507aa2727d7ae6f4790c76ec150bd2", + Expected: "810190ceaa507aa2727d7ae6f4790c76ec150bd2", + }, + InitCommit: types.Commit{ + ID: "949e6fa", + Expected: "949e6fa", + }, + SecurityOptions: []string{"name=apparmor", "name=seccomp,profile=default"}, +} + +var sampleSwarmInfo = swarm.Info{ + NodeID: "qo2dfdig9mmxqkawulggepdih", + NodeAddr: "165.227.107.89", + LocalNodeState: "active", + ControlAvailable: true, + Error: "", + RemoteManagers: []swarm.Peer{ + { + NodeID: "qo2dfdig9mmxqkawulggepdih", + Addr: "165.227.107.89:2377", + }, + }, + Nodes: 1, + Managers: 1, + Cluster: &swarm.ClusterInfo{ + ID: "9vs5ygs0gguyyec4iqf2314c0", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 11}, + CreatedAt: time.Date(2017, 8, 24, 17, 34, 19, 278062352, time.UTC), + UpdatedAt: time.Date(2017, 8, 24, 17, 34, 42, 398815481, time.UTC), + }, + Spec: swarm.Spec{ + Annotations: swarm.Annotations{ + Name: "default", + Labels: nil, + }, + Orchestration: swarm.OrchestrationConfig{ + TaskHistoryRetentionLimit: &[]int64{5}[0], + }, + Raft: swarm.RaftConfig{ + SnapshotInterval: 10000, + KeepOldSnapshots: &[]uint64{0}[0], + LogEntriesForSlowFollowers: 500, + ElectionTick: 3, + HeartbeatTick: 1, + }, + Dispatcher: swarm.DispatcherConfig{ + HeartbeatPeriod: 5000000000, + }, + CAConfig: swarm.CAConfig{ + NodeCertExpiry: 7776000000000000, + }, + TaskDefaults: swarm.TaskDefaults{}, + EncryptionConfig: swarm.EncryptionConfig{ + AutoLockManagers: true, + }, + }, + TLSInfo: swarm.TLSInfo{ + TrustRoot: ` +-----BEGIN CERTIFICATE----- +MIIBajCCARCgAwIBAgIUaFCW5xsq8eyiJ+Pmcv3MCflMLnMwCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwODI0MTcyOTAwWhcNMzcwODE5MTcy +OTAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABDy7NebyUJyUjWJDBUdnZoV6GBxEGKO4TZPNDwnxDxJcUdLVaB7WGa4/DLrW +UfsVgh1JGik2VTiLuTMA1tLlNPOjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQl16XFtaaXiUAwEuJptJlDjfKskDAKBggqhkjO +PQQDAgNIADBFAiEAo9fTQNM5DP9bHVcTJYfl2Cay1bFu1E+lnpmN+EYJfeACIGKH +1pCUkZ+D0IB6CiEZGWSHyLuXPM1rlP+I5KuS7sB8 +-----END CERTIFICATE----- +`, + CertIssuerSubject: base64Decode("MBMxETAPBgNVBAMTCHN3YXJtLWNh"), + CertIssuerPublicKey: base64Decode( + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPLs15vJQnJSNYkMFR2dmhXoYHEQYo7hNk80PCfEPElxR0tVoHtYZrj8MutZR+xWCHUkaKTZVOIu5MwDW0uU08w=="), + }, + RootRotationInProgress: false, + }, +} + +func TestPrettyPrintInfo(t *testing.T) { + infoWithSwarm := sampleInfoNoSwarm + infoWithSwarm.Swarm = sampleSwarmInfo + + infoWithWarningsLinux := sampleInfoNoSwarm + infoWithWarningsLinux.MemoryLimit = false + infoWithWarningsLinux.SwapLimit = false + infoWithWarningsLinux.KernelMemory = false + infoWithWarningsLinux.OomKillDisable = false + infoWithWarningsLinux.CPUCfsQuota = false + infoWithWarningsLinux.CPUCfsPeriod = false + infoWithWarningsLinux.CPUShares = false + infoWithWarningsLinux.CPUSet = false + infoWithWarningsLinux.IPv4Forwarding = false + infoWithWarningsLinux.BridgeNfIptables = false + infoWithWarningsLinux.BridgeNfIP6tables = false + + for _, tc := range []struct { + dockerInfo types.Info + expectedGolden string + warningsGolden string + }{ + { + dockerInfo: sampleInfoNoSwarm, + expectedGolden: "docker-info-no-swarm", + }, + { + dockerInfo: infoWithSwarm, + expectedGolden: "docker-info-with-swarm", + }, + { + dockerInfo: infoWithWarningsLinux, + expectedGolden: "docker-info-no-swarm", + warningsGolden: "docker-info-warnings", + }, + } { + cli := test.NewFakeCli(&fakeClient{}) + assert.NilError(t, prettyPrintInfo(cli, tc.dockerInfo)) + golden.Assert(t, cli.OutBuffer().String(), tc.expectedGolden+".golden") + if tc.warningsGolden != "" { + golden.Assert(t, cli.ErrBuffer().String(), tc.warningsGolden+".golden") + } else { + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) + } + } +} diff --git a/cli/cli/command/system/inspect.go b/cli/cli/command/system/inspect.go new file mode 100644 index 00000000..b49b4b33 --- /dev/null +++ b/cli/cli/command/system/inspect.go @@ -0,0 +1,218 @@ +package system + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + inspectType string + size bool + ids []string +} + +// NewInspectCommand creates a new cobra.Command for `docker inspect` +func NewInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", + Short: "Return low-level information on Docker objects", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ids = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + var elementSearcher inspect.GetRefFunc + switch opts.inspectType { + case "", "container", "image", "node", "network", "service", "volume", "task", "plugin", "secret": + elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) + default: + return errors.Errorf("%q is not a valid value for --type", opts.inspectType) + } + return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) +} + +func inspectContainers(ctx context.Context, dockerCli command.Cli, getSize bool) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) + } +} + +func inspectImages(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ImageInspectWithRaw(ctx, ref) + } +} + +func inspectNetwork(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NetworkInspectWithRaw(ctx, ref, types.NetworkInspectOptions{}) + } +} + +func inspectNode(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NodeInspectWithRaw(ctx, ref) + } +} + +func inspectService(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + // Service inspect shows defaults values in empty fields. + return dockerCli.Client().ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) + } +} + +func inspectTasks(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().TaskInspectWithRaw(ctx, ref) + } +} + +func inspectVolume(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) + } +} + +func inspectPlugin(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().PluginInspectWithRaw(ctx, ref) + } +} + +func inspectSecret(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().SecretInspectWithRaw(ctx, ref) + } +} + +func inspectAll(ctx context.Context, dockerCli command.Cli, getSize bool, typeConstraint string) inspect.GetRefFunc { + var inspectAutodetect = []struct { + objectType string + isSizeSupported bool + isSwarmObject bool + objectInspector func(string) (interface{}, []byte, error) + }{ + { + objectType: "container", + isSizeSupported: true, + objectInspector: inspectContainers(ctx, dockerCli, getSize), + }, + { + objectType: "image", + objectInspector: inspectImages(ctx, dockerCli), + }, + { + objectType: "network", + objectInspector: inspectNetwork(ctx, dockerCli), + }, + { + objectType: "volume", + objectInspector: inspectVolume(ctx, dockerCli), + }, + { + objectType: "service", + isSwarmObject: true, + objectInspector: inspectService(ctx, dockerCli), + }, + { + objectType: "task", + isSwarmObject: true, + objectInspector: inspectTasks(ctx, dockerCli), + }, + { + objectType: "node", + isSwarmObject: true, + objectInspector: inspectNode(ctx, dockerCli), + }, + { + objectType: "plugin", + objectInspector: inspectPlugin(ctx, dockerCli), + }, + { + objectType: "secret", + isSwarmObject: true, + objectInspector: inspectSecret(ctx, dockerCli), + }, + } + + // isSwarmManager does an Info API call to verify that the daemon is + // a swarm manager. + isSwarmManager := func() bool { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return false + } + return info.Swarm.ControlAvailable + } + + return func(ref string) (interface{}, []byte, error) { + const ( + swarmSupportUnknown = iota + swarmSupported + swarmUnsupported + ) + + isSwarmSupported := swarmSupportUnknown + + for _, inspectData := range inspectAutodetect { + if typeConstraint != "" && inspectData.objectType != typeConstraint { + continue + } + if typeConstraint == "" && inspectData.isSwarmObject { + if isSwarmSupported == swarmSupportUnknown { + if isSwarmManager() { + isSwarmSupported = swarmSupported + } else { + isSwarmSupported = swarmUnsupported + } + } + if isSwarmSupported == swarmUnsupported { + continue + } + } + v, raw, err := inspectData.objectInspector(ref) + if err != nil { + if typeConstraint == "" && isErrSkippable(err) { + continue + } + return v, raw, err + } + if getSize && !inspectData.isSizeSupported { + fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) + } + return v, raw, err + } + return nil, nil, errors.Errorf("Error: No such object: %s", ref) + } +} + +func isErrSkippable(err error) bool { + return apiclient.IsErrNotFound(err) || + strings.Contains(err.Error(), "not supported") || + strings.Contains(err.Error(), "invalid reference format") +} diff --git a/cli/cli/command/system/prune.go b/cli/cli/command/system/prune.go new file mode 100644 index 00000000..d0369170 --- /dev/null +++ b/cli/cli/command/system/prune.go @@ -0,0 +1,139 @@ +package system + +import ( + "bytes" + "context" + "fmt" + "text/template" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/container" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/network" + "github.com/docker/cli/cli/command/volume" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/versions" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + pruneBuildCache bool + pruneVolumes bool + filter opts.FilterOpt +} + +// newPruneCommand creates a new cobra.Command for `docker prune` +func newPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt(), pruneBuildCache: true} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused data", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPrune(dockerCli, options) + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images not just dangling ones") + flags.BoolVar(&options.pruneVolumes, "volumes", false, "Prune volumes") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'label==')") + // "filter" flag is available in 1.28 (docker 17.04) and up + flags.SetAnnotation("filter", "version", []string{"1.28"}) + + return cmd +} + +const confirmationTemplate = `WARNING! This will remove: +{{- range $_, $warning := . }} + - {{ $warning }} +{{- end }} +Are you sure you want to continue?` + +// runBuildCachePrune executes a prune command for build cache +func runBuildCachePrune(dockerCli command.Cli, _ opts.FilterOpt) (uint64, string, error) { + report, err := dockerCli.Client().BuildCachePrune(context.Background()) + if err != nil { + return 0, "", err + } + return report.SpaceReclaimed, "", nil +} + +func runPrune(dockerCli command.Cli, options pruneOptions) error { + // TODO version this once "until" filter is supported for volumes + if options.pruneVolumes && options.filter.Value().Include("until") { + return fmt.Errorf(`ERROR: The "until" filter is not supported with "--volumes"`) + } + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.31") { + options.pruneBuildCache = false + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), confirmationMessage(options)) { + return nil + } + imagePrune := func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + return image.RunPrune(dockerCli, options.all, options.filter) + } + pruneFuncs := []func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error){ + container.RunPrune, + network.RunPrune, + } + if options.pruneVolumes { + pruneFuncs = append(pruneFuncs, volume.RunPrune) + } + pruneFuncs = append(pruneFuncs, imagePrune) + if options.pruneBuildCache { + pruneFuncs = append(pruneFuncs, runBuildCachePrune) + } + + var spaceReclaimed uint64 + for _, pruneFn := range pruneFuncs { + spc, output, err := pruneFn(dockerCli, options.filter) + if err != nil { + return err + } + spaceReclaimed += spc + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + } + + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + + return nil +} + +// confirmationMessage constructs a confirmation message that depends on the cli options. +func confirmationMessage(options pruneOptions) string { + t := template.Must(template.New("confirmation message").Parse(confirmationTemplate)) + + warnings := []string{ + "all stopped containers", + "all networks not used by at least one container", + } + if options.pruneVolumes { + warnings = append(warnings, "all volumes not used by at least one container") + } + if options.all { + warnings = append(warnings, "all images without at least one container associated to them") + } else { + warnings = append(warnings, "all dangling images") + } + if options.pruneBuildCache { + warnings = append(warnings, "all build cache") + } + if len(options.filter.String()) > 0 { + warnings = append(warnings, "Elements to be pruned will be filtered with:") + warnings = append(warnings, "label="+options.filter.String()) + } + + var buffer bytes.Buffer + t.Execute(&buffer, &warnings) + return buffer.String() +} diff --git a/cli/cli/command/system/prune_test.go b/cli/cli/command/system/prune_test.go new file mode 100644 index 00000000..c0b5cafd --- /dev/null +++ b/cli/cli/command/system/prune_test.go @@ -0,0 +1,22 @@ +package system + +import ( + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPrunePromptPre131DoesNotIncludeBuildCache(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{version: "1.30"}) + cmd := newPruneCommand(cli) + assert.NilError(t, cmd.Execute()) + expected := `WARNING! This will remove: + - all stopped containers + - all networks not used by at least one container + - all dangling images +Are you sure you want to continue? [y/N] ` + assert.Check(t, is.Equal(expected, cli.OutBuffer().String())) + +} diff --git a/cli/cli/command/system/testdata/docker-client-version.golden b/cli/cli/command/system/testdata/docker-client-version.golden new file mode 100644 index 00000000..04cc88a6 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-client-version.golden @@ -0,0 +1,44 @@ +Client: + Version: 18.99.5-ce + API version: 1.38 + Go version: go1.10.2 + Git commit: deadbeef + Built: Wed May 30 22:21:05 2018 + OS/Arch: linux/amd64 + Experimental: true + +Server: Docker Enterprise Edition (EE) 2.0 + Engine: + Version: 17.06.2-ee-15 + API version: 1.30 (minimum version 1.12) + Go version: go1.8.7 + Git commit: 64ddfa6 + Built: Mon Jul 9 23:38:38 2018 + OS/Arch: linux/amd64 + Experimental: false + Universal Control Plane: + Version: 17.06.2-ee-15 + ApiVersion: 1.30 + Arch: amd64 + BuildTime: Mon Jul 2 21:24:07 UTC 2018 + GitCommit: 4513922 + GoVersion: go1.9.4 + MinApiVersion: 1.20 + Os: linux + Version: 3.0.3-tp2 + Kubernetes: + Version: 1.8+ + buildDate: 2018-04-26T16:51:21Z + compiler: gc + gitCommit: 8d637aedf46b9c21dde723e29c645b9f27106fa5 + gitTreeState: clean + gitVersion: v1.8.11-docker-8d637ae + goVersion: go1.8.3 + major: 1 + minor: 8+ + platform: linux/amd64 + Calico: + Version: v3.0.8 + cni: v2.0.6 + kube-controllers: v2.0.5 + node: v3.0.8 diff --git a/cli/cli/command/system/testdata/docker-info-no-swarm.golden b/cli/cli/command/system/testdata/docker-info-no-swarm.golden new file mode 100644 index 00000000..7a3e9667 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-no-swarm.golden @@ -0,0 +1,51 @@ +Containers: 0 + Running: 0 + Paused: 0 + Stopped: 0 +Images: 0 +Server Version: 17.06.1-ce +Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 0 + Dirperm1 Supported: true +Logging Driver: json-file +Cgroup Driver: cgroupfs +Plugins: + Volume: local + Network: bridge host macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog +Swarm: inactive +Runtimes: runc +Default Runtime: runc +Init Binary: docker-init +containerd version: 6e23458c129b551d5c9871e5174f6b1b7f6d1170 +runc version: 810190ceaa507aa2727d7ae6f4790c76ec150bd2 +init version: 949e6fa +Security Options: + apparmor + seccomp + Profile: default +Kernel Version: 4.4.0-87-generic +Operating System: Ubuntu 16.04.3 LTS +OSType: linux +Architecture: x86_64 +CPUs: 2 +Total Memory: 1.953GiB +Name: system-sample +ID: EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX +Docker Root Dir: /var/lib/docker +Debug Mode (client): false +Debug Mode (server): true + File Descriptors: 33 + Goroutines: 135 + System Time: 2017-08-24T17:44:34.077811894Z + EventsListeners: 0 +Registry: https://index.docker.io/v1/ +Labels: + provider=digitalocean +Experimental: false +Insecure Registries: + 127.0.0.0/8 +Live Restore Enabled: false + diff --git a/cli/cli/command/system/testdata/docker-info-warnings.golden b/cli/cli/command/system/testdata/docker-info-warnings.golden new file mode 100644 index 00000000..a7a4d792 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-warnings.golden @@ -0,0 +1,11 @@ +WARNING: No memory limit support +WARNING: No swap limit support +WARNING: No kernel memory limit support +WARNING: No oom kill disable support +WARNING: No cpu cfs quota support +WARNING: No cpu cfs period support +WARNING: No cpu shares support +WARNING: No cpuset support +WARNING: IPv4 forwarding is disabled +WARNING: bridge-nf-call-iptables is disabled +WARNING: bridge-nf-call-ip6tables is disabled diff --git a/cli/cli/command/system/testdata/docker-info-with-swarm.golden b/cli/cli/command/system/testdata/docker-info-with-swarm.golden new file mode 100644 index 00000000..17bb70fa --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-with-swarm.golden @@ -0,0 +1,73 @@ +Containers: 0 + Running: 0 + Paused: 0 + Stopped: 0 +Images: 0 +Server Version: 17.06.1-ce +Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 0 + Dirperm1 Supported: true +Logging Driver: json-file +Cgroup Driver: cgroupfs +Plugins: + Volume: local + Network: bridge host macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog +Swarm: active + NodeID: qo2dfdig9mmxqkawulggepdih + Is Manager: true + ClusterID: 9vs5ygs0gguyyec4iqf2314c0 + Managers: 1 + Nodes: 1 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Force Rotate: 0 + Autolock Managers: true + Root Rotation In Progress: false + Node Address: 165.227.107.89 + Manager Addresses: + 165.227.107.89:2377 +Runtimes: runc +Default Runtime: runc +Init Binary: docker-init +containerd version: 6e23458c129b551d5c9871e5174f6b1b7f6d1170 +runc version: 810190ceaa507aa2727d7ae6f4790c76ec150bd2 +init version: 949e6fa +Security Options: + apparmor + seccomp + Profile: default +Kernel Version: 4.4.0-87-generic +Operating System: Ubuntu 16.04.3 LTS +OSType: linux +Architecture: x86_64 +CPUs: 2 +Total Memory: 1.953GiB +Name: system-sample +ID: EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX +Docker Root Dir: /var/lib/docker +Debug Mode (client): false +Debug Mode (server): true + File Descriptors: 33 + Goroutines: 135 + System Time: 2017-08-24T17:44:34.077811894Z + EventsListeners: 0 +Registry: https://index.docker.io/v1/ +Labels: + provider=digitalocean +Experimental: false +Insecure Registries: + 127.0.0.0/8 +Live Restore Enabled: false + diff --git a/cli/cli/command/system/version.go b/cli/cli/command/system/version.go new file mode 100644 index 00000000..7593b11b --- /dev/null +++ b/cli/cli/command/system/version.go @@ -0,0 +1,270 @@ +package system + +import ( + "context" + "fmt" + "runtime" + "sort" + "text/tabwriter" + "text/template" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/kubernetes" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + kubernetesClient "k8s.io/client-go/kubernetes" +) + +var versionTemplate = `{{with .Client -}} +Client:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}} + Version: {{.Version}} + API version: {{.APIVersion}}{{if ne .APIVersion .DefaultAPIVersion}} (downgraded from {{.DefaultAPIVersion}}){{end}} + Go version: {{.GoVersion}} + Git commit: {{.GitCommit}} + Built: {{.BuildTime}} + OS/Arch: {{.Os}}/{{.Arch}} + Experimental: {{.Experimental}} +{{- end}} + +{{- if .ServerOK}}{{with .Server}} + +Server:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}} + {{- range $component := .Components}} + {{$component.Name}}: + {{- if eq $component.Name "Engine" }} + Version: {{.Version}} + API version: {{index .Details "ApiVersion"}} (minimum version {{index .Details "MinAPIVersion"}}) + Go version: {{index .Details "GoVersion"}} + Git commit: {{index .Details "GitCommit"}} + Built: {{index .Details "BuildTime"}} + OS/Arch: {{index .Details "Os"}}/{{index .Details "Arch"}} + Experimental: {{index .Details "Experimental"}} + {{- else }} + Version: {{$component.Version}} + {{- $detailsOrder := getDetailsOrder $component}} + {{- range $key := $detailsOrder}} + {{$key}}: {{index $component.Details $key}} + {{- end}} + {{- end}} + {{- end}} + {{- end}}{{- end}}` + +type versionOptions struct { + format string + kubeConfig string +} + +// versionInfo contains version information of both the Client, and Server +type versionInfo struct { + Client clientVersion + Server *types.Version +} + +type clientVersion struct { + Platform struct{ Name string } `json:",omitempty"` + + Version string + APIVersion string `json:"ApiVersion"` + DefaultAPIVersion string `json:"DefaultAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + BuildTime string `json:",omitempty"` + Experimental bool +} + +type kubernetesVersion struct { + Kubernetes string + StackAPI string +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v versionInfo) ServerOK() bool { + return v.Server != nil +} + +// NewVersionCommand creates a new cobra.Command for `docker version` +func NewVersionCommand(dockerCli command.Cli) *cobra.Command { + var opts versionOptions + + cmd := &cobra.Command{ + Use: "version [OPTIONS]", + Short: "Show the Docker version information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runVersion(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.kubeConfig, "kubeconfig", "", "Kubernetes config file") + flags.SetAnnotation("kubeconfig", "kubernetes", nil) + + return cmd +} + +func reformatDate(buildTime string) string { + t, errTime := time.Parse(time.RFC3339Nano, buildTime) + if errTime == nil { + return t.Format(time.ANSIC) + } + return buildTime +} + +func runVersion(dockerCli command.Cli, opts *versionOptions) error { + var err error + tmpl, err := newVersionTemplate(opts.format) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + orchestrator, err := command.GetStackOrchestrator("", dockerCli.ConfigFile().StackOrchestrator, dockerCli.Err()) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + vd := versionInfo{ + Client: clientVersion{ + Platform: struct{ Name string }{cli.PlatformName}, + Version: cli.Version, + APIVersion: dockerCli.Client().ClientVersion(), + DefaultAPIVersion: dockerCli.DefaultVersion(), + GoVersion: runtime.Version(), + GitCommit: cli.GitCommit, + BuildTime: reformatDate(cli.BuildTime), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: dockerCli.ClientInfo().HasExperimental, + }, + } + + sv, err := dockerCli.Client().ServerVersion(context.Background()) + if err == nil { + vd.Server = &sv + var kubeVersion *kubernetesVersion + if orchestrator.HasKubernetes() { + kubeVersion = getKubernetesVersion(opts.kubeConfig) + } + foundEngine := false + foundKubernetes := false + for _, component := range sv.Components { + switch component.Name { + case "Engine": + foundEngine = true + buildTime, ok := component.Details["BuildTime"] + if ok { + component.Details["BuildTime"] = reformatDate(buildTime) + } + case "Kubernetes": + foundKubernetes = true + if _, ok := component.Details["StackAPI"]; !ok && kubeVersion != nil { + component.Details["StackAPI"] = kubeVersion.StackAPI + } + } + } + + if !foundEngine { + vd.Server.Components = append(vd.Server.Components, types.ComponentVersion{ + Name: "Engine", + Version: sv.Version, + Details: map[string]string{ + "ApiVersion": sv.APIVersion, + "MinAPIVersion": sv.MinAPIVersion, + "GitCommit": sv.GitCommit, + "GoVersion": sv.GoVersion, + "Os": sv.Os, + "Arch": sv.Arch, + "BuildTime": reformatDate(vd.Server.BuildTime), + "Experimental": fmt.Sprintf("%t", sv.Experimental), + }, + }) + } + if !foundKubernetes && kubeVersion != nil { + vd.Server.Components = append(vd.Server.Components, types.ComponentVersion{ + Name: "Kubernetes", + Version: kubeVersion.Kubernetes, + Details: map[string]string{ + "StackAPI": kubeVersion.StackAPI, + }, + }) + } + } + if err2 := prettyPrintVersion(dockerCli, vd, tmpl); err2 != nil && err == nil { + err = err2 + } + return err +} + +func prettyPrintVersion(dockerCli command.Cli, vd versionInfo, tmpl *template.Template) error { + t := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 1, ' ', 0) + err := tmpl.Execute(t, vd) + t.Write([]byte("\n")) + t.Flush() + return err +} + +func newVersionTemplate(templateFormat string) (*template.Template, error) { + if templateFormat == "" { + templateFormat = versionTemplate + } + tmpl := templates.New("version").Funcs(template.FuncMap{"getDetailsOrder": getDetailsOrder}) + tmpl, err := tmpl.Parse(templateFormat) + + return tmpl, errors.Wrap(err, "Template parsing error") +} + +func getDetailsOrder(v types.ComponentVersion) []string { + out := make([]string, 0, len(v.Details)) + for k := range v.Details { + out = append(out, k) + } + sort.Strings(out) + return out +} + +func getKubernetesVersion(kubeConfig string) *kubernetesVersion { + version := kubernetesVersion{ + Kubernetes: "Unknown", + StackAPI: "Unknown", + } + clientConfig := kubernetes.NewKubernetesConfig(kubeConfig) + config, err := clientConfig.ClientConfig() + if err != nil { + logrus.Debugf("failed to get Kubernetes configuration: %s", err) + return &version + } + kubeClient, err := kubernetesClient.NewForConfig(config) + if err != nil { + logrus.Debugf("failed to get Kubernetes client: %s", err) + return &version + } + version.StackAPI = getStackVersion(kubeClient) + version.Kubernetes = getKubernetesServerVersion(kubeClient) + return &version +} + +func getStackVersion(client *kubernetesClient.Clientset) string { + apiVersion, err := kubernetes.GetStackAPIVersion(client) + if err != nil { + logrus.Debugf("failed to get Stack API version: %s", err) + return "Unknown" + } + return string(apiVersion) +} + +func getKubernetesServerVersion(client *kubernetesClient.Clientset) string { + kubeVersion, err := client.DiscoveryClient.ServerVersion() + if err != nil { + logrus.Debugf("failed to get Kubernetes server version: %s", err) + return "Unknown" + } + return kubeVersion.String() +} diff --git a/cli/cli/command/system/version_test.go b/cli/cli/command/system/version_test.go new file mode 100644 index 00000000..0a4f47e3 --- /dev/null +++ b/cli/cli/command/system/version_test.go @@ -0,0 +1,113 @@ +package system + +import ( + "context" + "fmt" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" +) + +func TestVersionWithoutServer(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serverVersion: func(ctx context.Context) (types.Version, error) { + return types.Version{}, fmt.Errorf("no server") + }, + }) + cmd := NewVersionCommand(cli) + cmd.SetOutput(cli.Err()) + assert.ErrorContains(t, cmd.Execute(), "no server") + out := cli.OutBuffer().String() + // TODO: use an assertion like e2e/image/build_test.go:assertBuildOutput() + // instead of contains/not contains + assert.Check(t, is.Contains(out, "Client:")) + assert.Assert(t, !strings.Contains(out, "Server:"), "actual: %s", out) +} + +func TestVersionAlign(t *testing.T) { + vi := versionInfo{ + Client: clientVersion{ + Version: "18.99.5-ce", + APIVersion: "1.38", + DefaultAPIVersion: "1.38", + GitCommit: "deadbeef", + GoVersion: "go1.10.2", + Os: "linux", + Arch: "amd64", + BuildTime: "Wed May 30 22:21:05 2018", + Experimental: true, + }, + Server: &types.Version{}, + } + + vi.Server.Platform.Name = "Docker Enterprise Edition (EE) 2.0" + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Engine", + Version: "17.06.2-ee-15", + Details: map[string]string{ + "ApiVersion": "1.30", + "MinAPIVersion": "1.12", + "GitCommit": "64ddfa6", + "GoVersion": "go1.8.7", + "Os": "linux", + "Arch": "amd64", + "BuildTime": "Mon Jul 9 23:38:38 2018", + "Experimental": "false", + }, + }) + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Universal Control Plane", + Version: "17.06.2-ee-15", + Details: map[string]string{ + "Version": "3.0.3-tp2", + "ApiVersion": "1.30", + "Arch": "amd64", + "BuildTime": "Mon Jul 2 21:24:07 UTC 2018", + "GitCommit": "4513922", + "GoVersion": "go1.9.4", + "MinApiVersion": "1.20", + "Os": "linux", + }, + }) + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Kubernetes", + Version: "1.8+", + Details: map[string]string{ + "buildDate": "2018-04-26T16:51:21Z", + "compiler": "gc", + "gitCommit": "8d637aedf46b9c21dde723e29c645b9f27106fa5", + "gitTreeState": "clean", + "gitVersion": "v1.8.11-docker-8d637ae", + "goVersion": "go1.8.3", + "major": "1", + "minor": "8+", + "platform": "linux/amd64", + }, + }) + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Calico", + Version: "v3.0.8", + Details: map[string]string{ + "cni": "v2.0.6", + "kube-controllers": "v2.0.5", + "node": "v3.0.8", + }, + }) + + cli := test.NewFakeCli(&fakeClient{}) + tmpl, err := newVersionTemplate("") + assert.NilError(t, err) + assert.NilError(t, prettyPrintVersion(cli, vi, tmpl)) + assert.Check(t, golden.String(cli.OutBuffer().String(), "docker-client-version.golden")) + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) +} diff --git a/cli/cli/command/task/client_test.go b/cli/cli/command/task/client_test.go new file mode 100644 index 00000000..9aa84977 --- /dev/null +++ b/cli/cli/command/task/client_test.go @@ -0,0 +1,29 @@ +package task + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.APIClient + nodeInspectWithRaw func(ref string) (swarm.Node, []byte, error) + serviceInspectWithRaw func(ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectWithRaw != nil { + return cli.nodeInspectWithRaw(ref) + } + return swarm.Node{}, nil, nil +} + +func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if cli.serviceInspectWithRaw != nil { + return cli.serviceInspectWithRaw(ref, options) + } + return swarm.Service{}, nil, nil +} diff --git a/cli/cli/command/task/print.go b/cli/cli/command/task/print.go new file mode 100644 index 00000000..0f430e50 --- /dev/null +++ b/cli/cli/command/task/print.go @@ -0,0 +1,93 @@ +package task + +import ( + "context" + "fmt" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/docker/api/types/swarm" +) + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +// Print task information in a format. +// Besides this, command `docker node ps ` +// and `docker stack ps` will call this, too. +func Print(ctx context.Context, dockerCli command.Cli, tasks []swarm.Task, resolver *idresolver.IDResolver, trunc, quiet bool, format string) error { + sort.Stable(tasksBySlot(tasks)) + + names := map[string]string{} + nodes := map[string]string{} + + tasksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewTaskFormat(format, quiet), + Trunc: trunc, + } + + prevName := "" + for _, task := range tasks { + serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) + if err != nil { + return err + } + + nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) + if err != nil { + return err + } + + var name string + if task.Slot != 0 { + name = fmt.Sprintf("%v.%v", serviceName, task.Slot) + } else { + name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) + } + + // Indent the name if necessary + indentedName := name + if name == prevName { + indentedName = fmt.Sprintf(" \\_ %s", indentedName) + } + prevName = name + + names[task.ID] = name + if tasksCtx.Format.IsTable() { + names[task.ID] = indentedName + } + nodes[task.ID] = nodeValue + } + + return formatter.TaskWrite(tasksCtx, tasks, names, nodes) +} + +// DefaultFormat returns the default format from the config file, or table +// format if nothing is set in the config. +func DefaultFormat(configFile *configfile.ConfigFile, quiet bool) string { + if len(configFile.TasksFormat) > 0 && !quiet { + return configFile.TasksFormat + } + return formatter.TableFormatKey +} diff --git a/cli/cli/command/task/print_test.go b/cli/cli/command/task/print_test.go new file mode 100644 index 00000000..6fa6e586 --- /dev/null +++ b/cli/cli/command/task/print_test.go @@ -0,0 +1,128 @@ +package task + +import ( + "context" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestTaskPrintWithQuietOption(t *testing.T) { + quiet := true + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{*Task(TaskID("id-foo"))} + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, formatter.TableFormatKey) + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-quiet-option.golden") +} + +func TestTaskPrintWithNoTruncOption(t *testing.T) { + quiet := false + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskID("id-foo-yov6omdek8fg3k5stosyp2m50")), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .ID }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-no-trunc-option.golden") +} + +func TestTaskPrintWithGlobalService(t *testing.T) { + quiet := false + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskServiceID("service-id-foo"), TaskNodeID("node-id-bar"), TaskSlot(0)), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-global-service.golden") +} + +func TestTaskPrintWithReplicatedService(t *testing.T) { + quiet := false + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskServiceID("service-id-foo"), TaskSlot(1)), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-replicated-service.golden") +} + +func TestTaskPrintWithIndentation(t *testing.T) { + quiet := false + trunc := false + noResolve := false + apiClient := &fakeClient{ + serviceInspectWithRaw: func(ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return *Service(ServiceName("service-name-foo")), nil, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + } + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task( + TaskID("id-foo"), + TaskServiceID("service-id-foo"), + TaskNodeID("id-node"), + WithTaskSpec(TaskImage("myimage:mytag")), + TaskDesiredState(swarm.TaskStateReady), + WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))), + ), + *Task( + TaskID("id-bar"), + TaskServiceID("service-id-foo"), + TaskNodeID("id-node"), + WithTaskSpec(TaskImage("myimage:mytag")), + TaskDesiredState(swarm.TaskStateReady), + WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))), + ), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, formatter.TableFormatKey) + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-indentation.golden") +} + +func TestTaskPrintWithResolution(t *testing.T) { + quiet := false + trunc := false + noResolve := false + apiClient := &fakeClient{ + serviceInspectWithRaw: func(ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return *Service(ServiceName("service-name-foo")), nil, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + } + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskServiceID("service-id-foo"), TaskSlot(1)), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }} {{ .Node }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-resolution.golden") +} diff --git a/cli/cli/command/task/testdata/task-print-with-global-service.golden b/cli/cli/command/task/testdata/task-print-with-global-service.golden new file mode 100644 index 00000000..fbc81248 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-global-service.golden @@ -0,0 +1 @@ +service-id-foo.node-id-bar diff --git a/cli/cli/command/task/testdata/task-print-with-indentation.golden b/cli/cli/command/task/testdata/task-print-with-indentation.golden new file mode 100644 index 00000000..8fa174a4 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-indentation.golden @@ -0,0 +1,3 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +id-foo service-name-foo.1 myimage:mytag node-name-bar Ready Failed 2 hours ago +id-bar \_ service-name-foo.1 myimage:mytag node-name-bar Ready Failed 2 hours ago diff --git a/cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden b/cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden new file mode 100644 index 00000000..184d2de2 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden @@ -0,0 +1 @@ +id-foo-yov6omdek8fg3k5stosyp2m50 diff --git a/cli/cli/command/task/testdata/task-print-with-quiet-option.golden b/cli/cli/command/task/testdata/task-print-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/task/testdata/task-print-with-replicated-service.golden b/cli/cli/command/task/testdata/task-print-with-replicated-service.golden new file mode 100644 index 00000000..9ecebdaf --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-replicated-service.golden @@ -0,0 +1 @@ +service-id-foo.1 diff --git a/cli/cli/command/task/testdata/task-print-with-resolution.golden b/cli/cli/command/task/testdata/task-print-with-resolution.golden new file mode 100644 index 00000000..747d1af4 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-resolution.golden @@ -0,0 +1 @@ +service-name-foo.1 node-name-bar diff --git a/cli/cli/command/trust.go b/cli/cli/command/trust.go new file mode 100644 index 00000000..65f24085 --- /dev/null +++ b/cli/cli/command/trust.go @@ -0,0 +1,15 @@ +package command + +import ( + "github.com/spf13/pflag" +) + +// AddTrustVerificationFlags adds content trust flags to the provided flagset +func AddTrustVerificationFlags(fs *pflag.FlagSet, v *bool, trusted bool) { + fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image verification") +} + +// AddTrustSigningFlags adds "signing" flags to the provided flagset +func AddTrustSigningFlags(fs *pflag.FlagSet, v *bool, trusted bool) { + fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image signing") +} diff --git a/cli/cli/command/trust/cmd.go b/cli/cli/command/trust/cmd.go new file mode 100644 index 00000000..bb6ceace --- /dev/null +++ b/cli/cli/command/trust/cmd.go @@ -0,0 +1,25 @@ +package trust + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewTrustCommand returns a cobra command for `trust` subcommands +func NewTrustCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "trust", + Short: "Manage trust on Docker images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newRevokeCommand(dockerCli), + newSignCommand(dockerCli), + newTrustKeyCommand(dockerCli), + newTrustSignerCommand(dockerCli), + newInspectCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/trust/common.go b/cli/cli/command/trust/common.go new file mode 100644 index 00000000..9173e6ee --- /dev/null +++ b/cli/cli/command/trust/common.go @@ -0,0 +1,167 @@ +package trust + +import ( + "context" + "encoding/hex" + "fmt" + "sort" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +// trustTagKey represents a unique signed tag and hex-encoded hash pair +type trustTagKey struct { + SignedTag string + Digest string +} + +// trustTagRow encodes all human-consumable information for a signed tag, including signers +type trustTagRow struct { + trustTagKey + Signers []string +} + +type trustTagRowList []trustTagRow + +func (tagComparator trustTagRowList) Len() int { + return len(tagComparator) +} + +func (tagComparator trustTagRowList) Less(i, j int) bool { + return tagComparator[i].SignedTag < tagComparator[j].SignedTag +} + +func (tagComparator trustTagRowList) Swap(i, j int) { + tagComparator[i], tagComparator[j] = tagComparator[j], tagComparator[i] +} + +// trustRepo represents consumable information about a trusted repository +type trustRepo struct { + Name string + SignedTags trustTagRowList + Signers []trustSigner + AdminstrativeKeys []trustSigner +} + +// trustSigner represents a trusted signer in a trusted repository +// a signer is defined by a name and list of trustKeys +type trustSigner struct { + Name string `json:",omitempty"` + Keys []trustKey `json:",omitempty"` +} + +// trustKey contains information about trusted keys +type trustKey struct { + ID string `json:",omitempty"` +} + +// lookupTrustInfo returns processed signature and role information about a notary repository. +// This information is to be pretty printed or serialized into a machine-readable format. +func lookupTrustInfo(cli command.Cli, remote string) (trustTagRowList, []client.RoleWithSignatures, []data.Role, error) { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), remote) + if err != nil { + return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, err + } + tag := imgRefAndAuth.Tag() + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly) + if err != nil { + return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + + if err = clearChangeList(notaryRepo); err != nil { + return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, err + } + defer clearChangeList(notaryRepo) + + // Retrieve all released signatures, match them, and pretty print them + allSignedTargets, err := notaryRepo.GetAllTargetMetadataByName(tag) + if err != nil { + logrus.Debug(trust.NotaryError(remote, err)) + // print an empty table if we don't have signed targets, but have an initialized notary repo + if _, ok := err.(client.ErrNoSuchTarget); !ok { + return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signatures or cannot access %s", remote) + } + } + signatureRows := matchReleasedSignatures(allSignedTargets) + + // get the administrative roles + adminRolesWithSigs, err := notaryRepo.ListRoles() + if err != nil { + return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signers for %s", remote) + } + + // get delegation roles with the canonical key IDs + delegationRoles, err := notaryRepo.GetDelegationRoles() + if err != nil { + logrus.Debugf("no delegation roles found, or error fetching them for %s: %v", remote, err) + } + + return signatureRows, adminRolesWithSigs, delegationRoles, nil +} + +func formatAdminRole(roleWithSigs client.RoleWithSignatures) string { + adminKeyList := roleWithSigs.KeyIDs + sort.Strings(adminKeyList) + + var role string + switch roleWithSigs.Name { + case data.CanonicalTargetsRole: + role = "Repository Key" + case data.CanonicalRootRole: + role = "Root Key" + default: + return "" + } + return fmt.Sprintf("%s:\t%s\n", role, strings.Join(adminKeyList, ", ")) +} + +func getDelegationRoleToKeyMap(rawDelegationRoles []data.Role) map[string][]string { + signerRoleToKeyIDs := make(map[string][]string) + for _, delRole := range rawDelegationRoles { + switch delRole.Name { + case trust.ReleasesRole, data.CanonicalRootRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole, data.CanonicalTimestampRole: + continue + default: + signerRoleToKeyIDs[notaryRoleToSigner(delRole.Name)] = delRole.KeyIDs + } + } + return signerRoleToKeyIDs +} + +// aggregate all signers for a "released" hash+tagname pair. To be "released," the tag must have been +// signed into the "targets" or "targets/releases" role. Output is sorted by tag name +func matchReleasedSignatures(allTargets []client.TargetSignedStruct) trustTagRowList { + signatureRows := trustTagRowList{} + // do a first pass to get filter on tags signed into "targets" or "targets/releases" + releasedTargetRows := map[trustTagKey][]string{} + for _, tgt := range allTargets { + if isReleasedTarget(tgt.Role.Name) { + releasedKey := trustTagKey{tgt.Target.Name, hex.EncodeToString(tgt.Target.Hashes[notary.SHA256])} + releasedTargetRows[releasedKey] = []string{} + } + } + + // now fill out all signers on released keys + for _, tgt := range allTargets { + targetKey := trustTagKey{tgt.Target.Name, hex.EncodeToString(tgt.Target.Hashes[notary.SHA256])} + // only considered released targets + if _, ok := releasedTargetRows[targetKey]; ok && !isReleasedTarget(tgt.Role.Name) { + releasedTargetRows[targetKey] = append(releasedTargetRows[targetKey], notaryRoleToSigner(tgt.Role.Name)) + } + } + + // compile the final output as a sorted slice + for targetKey, signers := range releasedTargetRows { + signatureRows = append(signatureRows, trustTagRow{targetKey, signers}) + } + sort.Sort(signatureRows) + return signatureRows +} diff --git a/cli/cli/command/trust/helpers.go b/cli/cli/command/trust/helpers.go new file mode 100644 index 00000000..b2819d2e --- /dev/null +++ b/cli/cli/command/trust/helpers.go @@ -0,0 +1,47 @@ +package trust + +import ( + "strings" + + "github.com/docker/cli/cli/trust" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +const releasedRoleName = "Repo Admin" +const releasesRoleTUFName = "targets/releases" + +// isReleasedTarget checks if a role name is "released": +// either targets/releases or targets TUF roles +func isReleasedTarget(role data.RoleName) bool { + return role == data.CanonicalTargetsRole || role == trust.ReleasesRole +} + +// notaryRoleToSigner converts TUF role name to a human-understandable signer name +func notaryRoleToSigner(tufRole data.RoleName) string { + // don't show a signer for "targets" or "targets/releases" + if isReleasedTarget(data.RoleName(tufRole.String())) { + return releasedRoleName + } + return strings.TrimPrefix(tufRole.String(), "targets/") +} + +// clearChangelist clears the notary staging changelist. +func clearChangeList(notaryRepo client.Repository) error { + cl, err := notaryRepo.GetChangelist() + if err != nil { + return err + } + return cl.Clear("") +} + +// getOrGenerateRootKeyAndInitRepo initializes the notary repository +// with a remotely managed snapshot key. The initialization will use +// an existing root key if one is found, else a new one will be generated. +func getOrGenerateRootKeyAndInitRepo(notaryRepo client.Repository) error { + rootKey, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + if err != nil { + return err + } + return notaryRepo.Initialize([]string{rootKey.ID()}, data.CanonicalSnapshotRole) +} diff --git a/cli/cli/command/trust/helpers_test.go b/cli/cli/command/trust/helpers_test.go new file mode 100644 index 00000000..fab61214 --- /dev/null +++ b/cli/cli/command/trust/helpers_test.go @@ -0,0 +1,24 @@ +package trust + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "gotest.tools/assert" +) + +func TestGetOrGenerateNotaryKeyAndInitRepo(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + + err = getOrGenerateRootKeyAndInitRepo(notaryRepo) + assert.Error(t, err, "client is offline") +} diff --git a/cli/cli/command/trust/inspect.go b/cli/cli/command/trust/inspect.go new file mode 100644 index 00000000..9f10878a --- /dev/null +++ b/cli/cli/command/trust/inspect.go @@ -0,0 +1,115 @@ +package trust + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/tuf/data" +) + +type inspectOptions struct { + remotes []string + // FIXME(n4ss): this is consistent with `docker service inspect` but we should provide + // a `--format` flag too. (format and pretty-print should be exclusive) + prettyPrint bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + options := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect IMAGE[:TAG] [IMAGE[:TAG]...]", + Short: "Return low-level information about keys and signatures", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remotes = args + + return runInspect(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&options.prettyPrint, "pretty", false, "Print the information in a human friendly format") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + if opts.prettyPrint { + var err error + + for index, remote := range opts.remotes { + if err = prettyPrintTrustInfo(dockerCli, remote); err != nil { + return err + } + + // Additional separator between the inspection output of each image + if index < len(opts.remotes)-1 { + fmt.Fprint(dockerCli.Out(), "\n\n") + } + } + + return err + } + + getRefFunc := func(ref string) (interface{}, []byte, error) { + i, err := getRepoTrustInfo(dockerCli, ref) + return nil, i, err + } + return inspect.Inspect(dockerCli.Out(), opts.remotes, "", getRefFunc) +} + +func getRepoTrustInfo(cli command.Cli, remote string) ([]byte, error) { + signatureRows, adminRolesWithSigs, delegationRoles, err := lookupTrustInfo(cli, remote) + if err != nil { + return []byte{}, err + } + // process the signatures to include repo admin if signed by the base targets role + for idx, sig := range signatureRows { + if len(sig.Signers) == 0 { + signatureRows[idx].Signers = append(sig.Signers, releasedRoleName) + } + } + + signerList, adminList := []trustSigner{}, []trustSigner{} + + signerRoleToKeyIDs := getDelegationRoleToKeyMap(delegationRoles) + + for signerName, signerKeys := range signerRoleToKeyIDs { + signerKeyList := []trustKey{} + for _, keyID := range signerKeys { + signerKeyList = append(signerKeyList, trustKey{ID: keyID}) + } + signerList = append(signerList, trustSigner{signerName, signerKeyList}) + } + sort.Slice(signerList, func(i, j int) bool { return signerList[i].Name > signerList[j].Name }) + + for _, adminRole := range adminRolesWithSigs { + switch adminRole.Name { + case data.CanonicalRootRole: + rootKeys := []trustKey{} + for _, keyID := range adminRole.KeyIDs { + rootKeys = append(rootKeys, trustKey{ID: keyID}) + } + adminList = append(adminList, trustSigner{"Root", rootKeys}) + case data.CanonicalTargetsRole: + targetKeys := []trustKey{} + for _, keyID := range adminRole.KeyIDs { + targetKeys = append(targetKeys, trustKey{ID: keyID}) + } + adminList = append(adminList, trustSigner{"Repository", targetKeys}) + } + } + sort.Slice(adminList, func(i, j int) bool { return adminList[i].Name > adminList[j].Name }) + + return json.Marshal(trustRepo{ + Name: remote, + SignedTags: signatureRows, + Signers: signerList, + AdminstrativeKeys: adminList, + }) +} diff --git a/cli/cli/command/trust/inspect_pretty.go b/cli/cli/command/trust/inspect_pretty.go new file mode 100644 index 00000000..af146d20 --- /dev/null +++ b/cli/cli/command/trust/inspect_pretty.go @@ -0,0 +1,90 @@ +package trust + +import ( + "fmt" + "io" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/theupdateframework/notary/client" +) + +func prettyPrintTrustInfo(cli command.Cli, remote string) error { + signatureRows, adminRolesWithSigs, delegationRoles, err := lookupTrustInfo(cli, remote) + if err != nil { + return err + } + + if len(signatureRows) > 0 { + fmt.Fprintf(cli.Out(), "\nSignatures for %s\n\n", remote) + + if err := printSignatures(cli.Out(), signatureRows); err != nil { + return err + } + } else { + fmt.Fprintf(cli.Out(), "\nNo signatures for %s\n\n", remote) + } + signerRoleToKeyIDs := getDelegationRoleToKeyMap(delegationRoles) + + // If we do not have additional signers, do not display + if len(signerRoleToKeyIDs) > 0 { + fmt.Fprintf(cli.Out(), "\nList of signers and their keys for %s\n\n", remote) + if err := printSignerInfo(cli.Out(), signerRoleToKeyIDs); err != nil { + return err + } + } + + // This will always have the root and targets information + fmt.Fprintf(cli.Out(), "\nAdministrative keys for %s\n\n", remote) + printSortedAdminKeys(cli.Out(), adminRolesWithSigs) + return nil +} + +func printSortedAdminKeys(out io.Writer, adminRoles []client.RoleWithSignatures) { + sort.Slice(adminRoles, func(i, j int) bool { return adminRoles[i].Name > adminRoles[j].Name }) + for _, adminRole := range adminRoles { + if formattedAdminRole := formatAdminRole(adminRole); formattedAdminRole != "" { + fmt.Fprintf(out, " %s", formattedAdminRole) + } + } +} + +// pretty print with ordered rows +func printSignatures(out io.Writer, signatureRows trustTagRowList) error { + trustTagCtx := formatter.Context{ + Output: out, + Format: formatter.NewTrustTagFormat(), + } + // convert the formatted type before printing + formattedTags := []formatter.SignedTagInfo{} + for _, sigRow := range signatureRows { + formattedSigners := sigRow.Signers + if len(formattedSigners) == 0 { + formattedSigners = append(formattedSigners, fmt.Sprintf("(%s)", releasedRoleName)) + } + formattedTags = append(formattedTags, formatter.SignedTagInfo{ + Name: sigRow.SignedTag, + Digest: sigRow.Digest, + Signers: formattedSigners, + }) + } + return formatter.TrustTagWrite(trustTagCtx, formattedTags) +} + +func printSignerInfo(out io.Writer, roleToKeyIDs map[string][]string) error { + signerInfoCtx := formatter.Context{ + Output: out, + Format: formatter.NewSignerInfoFormat(), + Trunc: true, + } + formattedSignerInfo := formatter.SignerInfoList{} + for name, keyIDs := range roleToKeyIDs { + formattedSignerInfo = append(formattedSignerInfo, formatter.SignerInfo{ + Name: name, + Keys: keyIDs, + }) + } + sort.Sort(formattedSignerInfo) + return formatter.SignerInfoWrite(signerInfoCtx, formattedSignerInfo) +} diff --git a/cli/cli/command/trust/inspect_pretty_test.go b/cli/cli/command/trust/inspect_pretty_test.go new file mode 100644 index 00000000..8c5141b3 --- /dev/null +++ b/cli/cli/command/trust/inspect_pretty_test.go @@ -0,0 +1,442 @@ +package trust + +import ( + "encoding/hex" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + dockerClient "github.com/docker/docker/client" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +// TODO(n4ss): remove common tests with the regular inspect command + +type fakeClient struct { + dockerClient.Client +} + +func TestTrustInspectPrettyCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires at least 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE"}, + expectedError: "invalid reference format", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + cmd.Flags().Set("pretty", "true") + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustInspectPrettyCommandOfflineErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"nonexistent-reg-name.io/image"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access nonexistent-reg-name.io/image") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd = newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"nonexistent-reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access nonexistent-reg-name.io/image") +} + +func TestTrustInspectPrettyCommandUninitializedErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetUninitializedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/unsigned-img"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access reg/unsigned-img") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetUninitializedNotaryRepository) + cmd = newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/unsigned-img:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access reg/unsigned-img:tag") +} + +func TestTrustInspectPrettyCommandEmptyNotaryRepoErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/img:unsigned-tag"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "No signatures for reg/img:unsigned-tag")) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Administrative keys for reg/img")) + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd = newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/img"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "No signatures for reg/img")) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Administrative keys for reg/img")) +} + +func TestTrustInspectPrettyCommandFullRepoWithoutSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedWithNoSignersNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-full-repo-no-signers.golden") +} + +func TestTrustInspectPrettyCommandOneTagWithoutSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedWithNoSignersNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo:green"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-one-tag-no-signers.golden") +} + +func TestTrustInspectPrettyCommandFullRepoWithSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-full-repo-with-signers.golden") +} + +func TestTrustInspectPrettyCommandUnsignedTagInSignedRepo(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo:unsigned"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-unsigned-tag-with-signers.golden") +} + +func TestNotaryRoleToSigner(t *testing.T) { + assert.Check(t, is.Equal(releasedRoleName, notaryRoleToSigner(data.CanonicalTargetsRole))) + assert.Check(t, is.Equal(releasedRoleName, notaryRoleToSigner(trust.ReleasesRole))) + assert.Check(t, is.Equal("signer", notaryRoleToSigner("targets/signer"))) + assert.Check(t, is.Equal("docker/signer", notaryRoleToSigner("targets/docker/signer"))) + + // It's nonsense for other base roles to have signed off on a target, but this function leaves role names intact + for _, role := range data.BaseRoles { + if role == data.CanonicalTargetsRole { + continue + } + assert.Check(t, is.Equal(role.String(), notaryRoleToSigner(role))) + } + assert.Check(t, is.Equal("notarole", notaryRoleToSigner(data.RoleName("notarole")))) +} + +// check if a role name is "released": either targets/releases or targets TUF roles +func TestIsReleasedTarget(t *testing.T) { + assert.Check(t, isReleasedTarget(trust.ReleasesRole)) + for _, role := range data.BaseRoles { + assert.Check(t, is.Equal(role == data.CanonicalTargetsRole, isReleasedTarget(role))) + } + assert.Check(t, !isReleasedTarget(data.RoleName("targets/not-releases"))) + assert.Check(t, !isReleasedTarget(data.RoleName("random"))) + assert.Check(t, !isReleasedTarget(data.RoleName("targets/releases/subrole"))) +} + +// creates a mock delegation with a given name and no keys +func mockDelegationRoleWithName(name string) data.DelegationRole { + baseRole := data.NewBaseRole( + data.RoleName(name), + notary.MinThreshold, + ) + return data.DelegationRole{BaseRole: baseRole, Paths: []string{}} +} + +func TestMatchEmptySignatures(t *testing.T) { + // first try empty targets + emptyTgts := []client.TargetSignedStruct{} + + matchedSigRows := matchReleasedSignatures(emptyTgts) + assert.Check(t, is.Len(matchedSigRows, 0)) +} + +func TestMatchUnreleasedSignatures(t *testing.T) { + // try an "unreleased" target with 3 signatures, 0 rows will appear + unreleasedTgts := []client.TargetSignedStruct{} + + tgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + unreleasedTgts = append(unreleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: tgt}) + } + + matchedSigRows := matchReleasedSignatures(unreleasedTgts) + assert.Check(t, is.Len(matchedSigRows, 0)) +} + +func TestMatchOneReleasedSingleSignature(t *testing.T) { + // now try only 1 "released" target with no additional sigs, 1 row will appear with 0 signers + oneReleasedTgt := []client.TargetSignedStruct{} + + // make and append the "released" target to our mock input + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: releasedTgt}) + + // make and append 3 non-released signatures on the "unreleased" target + unreleasedTgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: unreleasedTgt}) + } + + matchedSigRows := matchReleasedSignatures(oneReleasedTgt) + assert.Check(t, is.Len(matchedSigRows, 1)) + + outputRow := matchedSigRows[0] + // Empty signers because "targets/releases" doesn't show up + assert.Check(t, is.Len(outputRow.Signers, 0)) + assert.Check(t, is.Equal(releasedTgt.Name, outputRow.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(releasedTgt.Hashes[notary.SHA256]), outputRow.Digest)) +} + +func TestMatchOneReleasedMultiSignature(t *testing.T) { + // now try only 1 "released" target with 3 additional sigs, 1 row will appear with 3 signers + oneReleasedTgt := []client.TargetSignedStruct{} + + // make and append the "released" target to our mock input + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: releasedTgt}) + + // make and append 3 non-released signatures on both the "released" and "unreleased" targets + unreleasedTgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: unreleasedTgt}) + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: releasedTgt}) + } + + matchedSigRows := matchReleasedSignatures(oneReleasedTgt) + assert.Check(t, is.Len(matchedSigRows, 1)) + + outputRow := matchedSigRows[0] + // We should have three signers + assert.Check(t, is.DeepEqual(outputRow.Signers, []string{"a", "b", "c"})) + assert.Check(t, is.Equal(releasedTgt.Name, outputRow.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(releasedTgt.Hashes[notary.SHA256]), outputRow.Digest)) +} + +func TestMatchMultiReleasedMultiSignature(t *testing.T) { + // now try 3 "released" targets with additional sigs to show 3 rows as follows: + // target-a is signed by targets/releases and targets/a - a will be the signer + // target-b is signed by targets/releases, targets/a, targets/b - a and b will be the signers + // target-c is signed by targets/releases, targets/a, targets/b, targets/c - a, b, and c will be the signers + multiReleasedTgts := []client.TargetSignedStruct{} + // make target-a, target-b, and target-c + targetA := client.Target{Name: "target-a", Hashes: data.Hashes{notary.SHA256: []byte("target-a-hash")}} + targetB := client.Target{Name: "target-b", Hashes: data.Hashes{notary.SHA256: []byte("target-b-hash")}} + targetC := client.Target{Name: "target-c", Hashes: data.Hashes{notary.SHA256: []byte("target-c-hash")}} + + // have targets/releases "sign" on all of these targets so they are released + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: targetA}) + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: targetB}) + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: targetC}) + + // targets/a signs off on all three targets (target-a, target-b, target-c): + for _, tgt := range []client.Target{targetA, targetB, targetC} { + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/a"), Target: tgt}) + } + + // targets/b signs off on the final two targets (target-b, target-c): + for _, tgt := range []client.Target{targetB, targetC} { + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/b"), Target: tgt}) + } + + // targets/c only signs off on the last target (target-c): + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/c"), Target: targetC}) + + matchedSigRows := matchReleasedSignatures(multiReleasedTgts) + assert.Check(t, is.Len(matchedSigRows, 3)) + + // note that the output is sorted by tag name, so we can reliably index to validate data: + outputTargetA := matchedSigRows[0] + assert.Check(t, is.DeepEqual(outputTargetA.Signers, []string{"a"})) + assert.Check(t, is.Equal(targetA.Name, outputTargetA.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(targetA.Hashes[notary.SHA256]), outputTargetA.Digest)) + + outputTargetB := matchedSigRows[1] + assert.Check(t, is.DeepEqual(outputTargetB.Signers, []string{"a", "b"})) + assert.Check(t, is.Equal(targetB.Name, outputTargetB.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(targetB.Hashes[notary.SHA256]), outputTargetB.Digest)) + + outputTargetC := matchedSigRows[2] + assert.Check(t, is.DeepEqual(outputTargetC.Signers, []string{"a", "b", "c"})) + assert.Check(t, is.Equal(targetC.Name, outputTargetC.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(targetC.Hashes[notary.SHA256]), outputTargetC.Digest)) +} + +func TestMatchReleasedSignatureFromTargets(t *testing.T) { + // now try only 1 "released" target with no additional sigs, one rows will appear + oneReleasedTgt := []client.TargetSignedStruct{} + // make and append the "released" target to our mock input + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(data.CanonicalTargetsRole.String()), Target: releasedTgt}) + matchedSigRows := matchReleasedSignatures(oneReleasedTgt) + assert.Check(t, is.Len(matchedSigRows, 1)) + outputRow := matchedSigRows[0] + // Empty signers because "targets" doesn't show up + assert.Check(t, is.Len(outputRow.Signers, 0)) + assert.Check(t, is.Equal(releasedTgt.Name, outputRow.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(releasedTgt.Hashes[notary.SHA256]), outputRow.Digest)) +} + +func TestGetSignerRolesWithKeyIDs(t *testing.T) { + roles := []data.Role{ + { + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: "targets/alice", + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key21", "key22"}, + }, + Name: "targets/releases", + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key31"}, + }, + Name: data.CanonicalTargetsRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key41", "key01"}, + }, + Name: data.CanonicalRootRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key51"}, + }, + Name: data.CanonicalSnapshotRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key61"}, + }, + Name: data.CanonicalTimestampRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key71", "key72"}, + }, + Name: "targets/bob", + }, + } + expectedSignerRoleToKeyIDs := map[string][]string{ + "alice": {"key11"}, + "bob": {"key71", "key72"}, + } + + var roleWithSigs []client.RoleWithSignatures + for _, role := range roles { + roleWithSig := client.RoleWithSignatures{Role: role, Signatures: nil} + roleWithSigs = append(roleWithSigs, roleWithSig) + } + signerRoleToKeyIDs := getDelegationRoleToKeyMap(roles) + assert.Check(t, is.DeepEqual(expectedSignerRoleToKeyIDs, signerRoleToKeyIDs)) +} + +func TestFormatAdminRole(t *testing.T) { + aliceRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: "targets/alice", + } + aliceRoleWithSigs := client.RoleWithSignatures{Role: aliceRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(aliceRoleWithSigs))) + + releasesRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: "targets/releases", + } + releasesRoleWithSigs := client.RoleWithSignatures{Role: releasesRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(releasesRoleWithSigs))) + + timestampRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: data.CanonicalTimestampRole, + } + timestampRoleWithSigs := client.RoleWithSignatures{Role: timestampRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(timestampRoleWithSigs))) + + snapshotRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: data.CanonicalSnapshotRole, + } + snapshotRoleWithSigs := client.RoleWithSignatures{Role: snapshotRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(snapshotRoleWithSigs))) + + rootRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: data.CanonicalRootRole, + } + rootRoleWithSigs := client.RoleWithSignatures{Role: rootRole, Signatures: nil} + assert.Check(t, is.Equal("Root Key:\tkey11\n", formatAdminRole(rootRoleWithSigs))) + + targetsRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key99", "abc", "key11"}, + }, + Name: data.CanonicalTargetsRole, + } + targetsRoleWithSigs := client.RoleWithSignatures{Role: targetsRole, Signatures: nil} + assert.Check(t, is.Equal("Repository Key:\tabc, key11, key99\n", formatAdminRole(targetsRoleWithSigs))) +} diff --git a/cli/cli/command/trust/inspect_test.go b/cli/cli/command/trust/inspect_test.go new file mode 100644 index 00000000..3cf6e0eb --- /dev/null +++ b/cli/cli/command/trust/inspect_test.go @@ -0,0 +1,131 @@ +package trust + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestTrustInspectCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires at least 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE"}, + expectedError: "invalid reference format", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{})) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustInspectCommandOfflineErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetOfflineNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"nonexistent-reg-name.io/image"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access nonexistent-reg-name.io/image") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetOfflineNotaryRepository) + cmd = newInspectCommand(cli) + cmd.SetArgs([]string{"nonexistent-reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access nonexistent-reg-name.io/image") +} + +func TestTrustInspectCommandUninitializedErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetUninitializedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"reg/unsigned-img"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access reg/unsigned-img") + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-uninitialized.golden") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetUninitializedNotaryRepository) + cmd = newInspectCommand(cli) + cmd.SetArgs([]string{"reg/unsigned-img:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access reg/unsigned-img:tag") + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-uninitialized.golden") +} + +func TestTrustInspectCommandEmptyNotaryRepo(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetEmptyTargetsNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"reg/img:unsigned-tag"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-empty-repo.golden") +} + +func TestTrustInspectCommandFullRepoWithoutSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetLoadedWithNoSignersNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"signed-repo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-full-repo-no-signers.golden") +} + +func TestTrustInspectCommandOneTagWithoutSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetLoadedWithNoSignersNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"signed-repo:green"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-one-tag-no-signers.golden") +} + +func TestTrustInspectCommandFullRepoWithSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"signed-repo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-full-repo-with-signers.golden") +} + +func TestTrustInspectCommandMultipleFullReposWithSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"signed-repo", "signed-repo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-multiple-repos-with-signers.golden") +} + +func TestTrustInspectCommandUnsignedTagInSignedRepo(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"signed-repo:unsigned"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-unsigned-tag-with-signers.golden") +} diff --git a/cli/cli/command/trust/key.go b/cli/cli/command/trust/key.go new file mode 100644 index 00000000..f57b44c7 --- /dev/null +++ b/cli/cli/command/trust/key.go @@ -0,0 +1,22 @@ +package trust + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// newTrustKeyCommand returns a cobra command for `trust key` subcommands +func newTrustKeyCommand(dockerCli command.Streams) *cobra.Command { + cmd := &cobra.Command{ + Use: "key", + Short: "Manage keys for signing Docker images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newKeyGenerateCommand(dockerCli), + newKeyLoadCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/trust/key_generate.go b/cli/cli/command/trust/key_generate.go new file mode 100644 index 00000000..47223c3d --- /dev/null +++ b/cli/cli/command/trust/key_generate.go @@ -0,0 +1,134 @@ +package trust + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + tufutils "github.com/theupdateframework/notary/tuf/utils" +) + +type keyGenerateOptions struct { + name string + directory string +} + +func newKeyGenerateCommand(dockerCli command.Streams) *cobra.Command { + options := keyGenerateOptions{} + cmd := &cobra.Command{ + Use: "generate NAME", + Short: "Generate and load a signing key-pair", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + return setupPassphraseAndGenerateKeys(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.StringVar(&options.directory, "dir", "", "Directory to generate key in, defaults to current directory") + return cmd +} + +// key names can use lowercase alphanumeric + _ + - characters +var validKeyName = regexp.MustCompile(`^[a-z0-9][a-z0-9\_\-]*$`).MatchString + +// validate that all of the key names are unique and are alphanumeric + _ + - +// and that we do not already have public key files in the target dir on disk +func validateKeyArgs(keyName string, targetDir string) error { + if !validKeyName(keyName) { + return fmt.Errorf("key name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", keyName) + } + + pubKeyFileName := keyName + ".pub" + if _, err := os.Stat(targetDir); err != nil { + return fmt.Errorf("public key path does not exist: \"%s\"", targetDir) + } + targetPath := filepath.Join(targetDir, pubKeyFileName) + if _, err := os.Stat(targetPath); err == nil { + return fmt.Errorf("public key file already exists: \"%s\"", targetPath) + } + return nil +} + +func setupPassphraseAndGenerateKeys(streams command.Streams, opts keyGenerateOptions) error { + targetDir := opts.directory + if targetDir == "" { + cwd, err := os.Getwd() + if err != nil { + return err + } + targetDir = cwd + } + return validateAndGenerateKey(streams, opts.name, targetDir) +} + +func validateAndGenerateKey(streams command.Streams, keyName string, workingDir string) error { + freshPassRetGetter := func() notary.PassRetriever { return trust.GetPassphraseRetriever(streams.In(), streams.Out()) } + if err := validateKeyArgs(keyName, workingDir); err != nil { + return err + } + fmt.Fprintf(streams.Out(), "Generating key for %s...\n", keyName) + // Automatically load the private key to local storage for use + privKeyFileStore, err := trustmanager.NewKeyFileStore(trust.GetTrustDirectory(), freshPassRetGetter()) + if err != nil { + return err + } + + pubPEM, err := generateKeyAndOutputPubPEM(keyName, privKeyFileStore) + if err != nil { + fmt.Fprintf(streams.Out(), err.Error()) + return errors.Wrapf(err, "failed to generate key for %s", keyName) + } + + // Output the public key to a file in the CWD or specified dir + writtenPubFile, err := writePubKeyPEMToDir(pubPEM, keyName, workingDir) + if err != nil { + return err + } + fmt.Fprintf(streams.Out(), "Successfully generated and loaded private key. Corresponding public key available: %s\n", writtenPubFile) + + return nil +} + +func generateKeyAndOutputPubPEM(keyName string, privKeyStore trustmanager.KeyStore) (pem.Block, error) { + privKey, err := tufutils.GenerateKey(data.ECDSAKey) + if err != nil { + return pem.Block{}, err + } + + privKeyStore.AddKey(trustmanager.KeyInfo{Role: data.RoleName(keyName)}, privKey) + if err != nil { + return pem.Block{}, err + } + + pubKey := data.PublicKeyFromPrivate(privKey) + return pem.Block{ + Type: "PUBLIC KEY", + Headers: map[string]string{ + "role": keyName, + }, + Bytes: pubKey.Public(), + }, nil +} + +func writePubKeyPEMToDir(pubPEM pem.Block, keyName, workingDir string) (string, error) { + // Output the public key to a file in the CWD or specified dir + pubFileName := strings.Join([]string{keyName, "pub"}, ".") + pubFilePath := filepath.Join(workingDir, pubFileName) + if err := ioutil.WriteFile(pubFilePath, pem.EncodeToMemory(&pubPEM), notary.PrivNoExecPerms); err != nil { + return "", errors.Wrapf(err, "failed to write public key to %s", pubFilePath) + } + return pubFilePath, nil +} diff --git a/cli/cli/command/trust/key_generate_test.go b/cli/cli/command/trust/key_generate_test.go new file mode 100644 index 00000000..b4c798d8 --- /dev/null +++ b/cli/cli/command/trust/key_generate_test.go @@ -0,0 +1,134 @@ +package trust + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/internal/test" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustmanager" + tufutils "github.com/theupdateframework/notary/tuf/utils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustKeyGenerateErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"key-1", "key-2"}, + expectedError: "requires exactly 1 argument", + }, + } + + tmpDir, err := ioutil.TempDir("", "docker-key-generate-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newKeyGenerateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestGenerateKeySuccess(t *testing.T) { + pubKeyCWD, err := ioutil.TempDir("", "pub-keys-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyCWD) + + privKeyStorageDir, err := ioutil.TempDir("", "priv-keys-") + assert.NilError(t, err) + defer os.RemoveAll(privKeyStorageDir) + + passwd := "password" + cannedPasswordRetriever := passphrase.ConstantRetriever(passwd) + // generate a single key + keyName := "alice" + privKeyFileStore, err := trustmanager.NewKeyFileStore(privKeyStorageDir, cannedPasswordRetriever) + assert.NilError(t, err) + + pubKeyPEM, err := generateKeyAndOutputPubPEM(keyName, privKeyFileStore) + assert.NilError(t, err) + + assert.Check(t, is.Equal(keyName, pubKeyPEM.Headers["role"])) + // the default GUN is empty + assert.Check(t, is.Equal("", pubKeyPEM.Headers["gun"])) + // assert public key header + assert.Check(t, is.Equal("PUBLIC KEY", pubKeyPEM.Type)) + + // check that an appropriate ~//private/.key file exists + expectedPrivKeyDir := filepath.Join(privKeyStorageDir, notary.PrivDir) + _, err = os.Stat(expectedPrivKeyDir) + assert.NilError(t, err) + + keyFiles, err := ioutil.ReadDir(expectedPrivKeyDir) + assert.NilError(t, err) + assert.Check(t, is.Len(keyFiles, 1)) + privKeyFilePath := filepath.Join(expectedPrivKeyDir, keyFiles[0].Name()) + + // verify the key content + privFrom, _ := os.OpenFile(privKeyFilePath, os.O_RDONLY, notary.PrivExecPerms) + defer privFrom.Close() + fromBytes, _ := ioutil.ReadAll(privFrom) + privKeyPEM, _ := pem.Decode(fromBytes) + assert.Check(t, is.Equal(keyName, privKeyPEM.Headers["role"])) + // the default GUN is empty + assert.Check(t, is.Equal("", privKeyPEM.Headers["gun"])) + // assert encrypted header + assert.Check(t, is.Equal("ENCRYPTED PRIVATE KEY", privKeyPEM.Type)) + // check that the passphrase matches + _, err = tufutils.ParsePKCS8ToTufKey(privKeyPEM.Bytes, []byte(passwd)) + assert.NilError(t, err) + + // check that the public key exists at the correct path if we use the helper: + returnedPath, err := writePubKeyPEMToDir(pubKeyPEM, keyName, pubKeyCWD) + assert.NilError(t, err) + expectedPubKeyPath := filepath.Join(pubKeyCWD, keyName+".pub") + assert.Check(t, is.Equal(returnedPath, expectedPubKeyPath)) + _, err = os.Stat(expectedPubKeyPath) + assert.NilError(t, err) + // check that the public key is the only file output in CWD + cwdKeyFiles, err := ioutil.ReadDir(pubKeyCWD) + assert.NilError(t, err) + assert.Check(t, is.Len(cwdKeyFiles, 1)) +} + +func TestValidateKeyArgs(t *testing.T) { + pubKeyCWD, err := ioutil.TempDir("", "pub-keys-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyCWD) + + err = validateKeyArgs("a", pubKeyCWD) + assert.NilError(t, err) + + err = validateKeyArgs("a/b", pubKeyCWD) + assert.Error(t, err, "key name \"a/b\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character") + + err = validateKeyArgs("-", pubKeyCWD) + assert.Error(t, err, "key name \"-\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character") + + assert.NilError(t, ioutil.WriteFile(filepath.Join(pubKeyCWD, "a.pub"), []byte("abc"), notary.PrivExecPerms)) + err = validateKeyArgs("a", pubKeyCWD) + assert.Error(t, err, fmt.Sprintf("public key file already exists: \"%s\"", filepath.Join(pubKeyCWD, "a.pub"))) + + err = validateKeyArgs("a", "/random/dir/") + assert.Error(t, err, "public key path does not exist: \"/random/dir/\"") +} diff --git a/cli/cli/command/trust/key_load.go b/cli/cli/command/trust/key_load.go new file mode 100644 index 00000000..9263cbda --- /dev/null +++ b/cli/cli/command/trust/key_load.go @@ -0,0 +1,115 @@ +package trust + +import ( + "bytes" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustmanager" + tufutils "github.com/theupdateframework/notary/tuf/utils" +) + +const ( + nonOwnerReadWriteMask = 0077 +) + +type keyLoadOptions struct { + keyName string +} + +func newKeyLoadCommand(dockerCli command.Streams) *cobra.Command { + var options keyLoadOptions + cmd := &cobra.Command{ + Use: "load [OPTIONS] KEYFILE", + Short: "Load a private key file for signing", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return loadPrivKey(dockerCli, args[0], options) + }, + } + flags := cmd.Flags() + flags.StringVar(&options.keyName, "name", "signer", "Name for the loaded key") + return cmd +} + +func loadPrivKey(streams command.Streams, keyPath string, options keyLoadOptions) error { + // validate the key name if provided + if options.keyName != "" && !validKeyName(options.keyName) { + return fmt.Errorf("key name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", options.keyName) + } + trustDir := trust.GetTrustDirectory() + keyFileStore, err := storage.NewPrivateKeyFileStorage(trustDir, notary.KeyExtension) + if err != nil { + return err + } + privKeyImporters := []trustmanager.Importer{keyFileStore} + + fmt.Fprintf(streams.Out(), "Loading key from \"%s\"...\n", keyPath) + + // Always use a fresh passphrase retriever for each import + passRet := trust.GetPassphraseRetriever(streams.In(), streams.Out()) + keyBytes, err := getPrivKeyBytesFromPath(keyPath) + if err != nil { + return errors.Wrapf(err, "refusing to load key from %s", keyPath) + } + if err := loadPrivKeyBytesToStore(keyBytes, privKeyImporters, keyPath, options.keyName, passRet); err != nil { + return errors.Wrapf(err, "error importing key from %s", keyPath) + } + fmt.Fprintf(streams.Out(), "Successfully imported key from %s\n", keyPath) + return nil +} + +func getPrivKeyBytesFromPath(keyPath string) ([]byte, error) { + fileInfo, err := os.Stat(keyPath) + if err != nil { + return nil, err + } + if fileInfo.Mode()&nonOwnerReadWriteMask != 0 { + return nil, fmt.Errorf("private key file %s must not be readable or writable by others", keyPath) + } + + from, err := os.OpenFile(keyPath, os.O_RDONLY, notary.PrivExecPerms) + if err != nil { + return nil, err + } + defer from.Close() + + return ioutil.ReadAll(from) +} + +func loadPrivKeyBytesToStore(privKeyBytes []byte, privKeyImporters []trustmanager.Importer, keyPath, keyName string, passRet notary.PassRetriever) error { + var err error + if _, _, err = tufutils.ExtractPrivateKeyAttributes(privKeyBytes); err != nil { + return fmt.Errorf("provided file %s is not a supported private key - to add a signer's public key use docker trust signer add", keyPath) + } + if privKeyBytes, err = decodePrivKeyIfNecessary(privKeyBytes, passRet); err != nil { + return errors.Wrapf(err, "cannot load key from provided file %s", keyPath) + } + // Make a reader, rewind the file pointer + return trustmanager.ImportKeys(bytes.NewReader(privKeyBytes), privKeyImporters, keyName, "", passRet) +} + +func decodePrivKeyIfNecessary(privPemBytes []byte, passRet notary.PassRetriever) ([]byte, error) { + pemBlock, _ := pem.Decode(privPemBytes) + _, containsDEKInfo := pemBlock.Headers["DEK-Info"] + if containsDEKInfo || pemBlock.Type == "ENCRYPTED PRIVATE KEY" { + // if we do not have enough information to properly import, try to decrypt the key + if _, ok := pemBlock.Headers["path"]; !ok { + privKey, _, err := trustmanager.GetPasswdDecryptBytes(passRet, privPemBytes, "", "encrypted") + if err != nil { + return []byte{}, fmt.Errorf("could not decrypt key") + } + privPemBytes = privKey.Private() + } + } + return privPemBytes, nil +} diff --git a/cli/cli/command/trust/key_load_test.go b/cli/cli/command/trust/key_load_test.go new file mode 100644 index 00000000..e0e35aab --- /dev/null +++ b/cli/cli/command/trust/key_load_test.go @@ -0,0 +1,253 @@ +package trust + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/internal/test" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustmanager" + tufutils "github.com/theupdateframework/notary/tuf/utils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestTrustKeyLoadErrors(t *testing.T) { + noSuchFile := "stat iamnotakey: no such file or directory" + if runtime.GOOS == "windows" { + noSuchFile = "CreateFile iamnotakey: The system cannot find the file specified." + } + testCases := []struct { + name string + args []string + expectedError string + expectedOutput string + }{ + { + name: "not-enough-args", + expectedError: "exactly 1 argument", + expectedOutput: "", + }, + { + name: "too-many-args", + args: []string{"iamnotakey", "alsonotakey"}, + expectedError: "exactly 1 argument", + expectedOutput: "", + }, + { + name: "not-a-key", + args: []string{"iamnotakey"}, + expectedError: "refusing to load key from iamnotakey: " + noSuchFile, + expectedOutput: "Loading key from \"iamnotakey\"...\n", + }, + { + name: "bad-key-name", + args: []string{"iamnotakey", "--name", "KEYNAME"}, + expectedError: "key name \"KEYNAME\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + expectedOutput: "", + }, + } + tmpDir, err := ioutil.TempDir("", "docker-key-load-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newKeyLoadCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + assert.Check(t, is.Contains(cli.OutBuffer().String(), tc.expectedOutput)) + } +} + +var rsaPrivKeyFixture = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAs7yVMzCw8CBZPoN+QLdx3ZzbVaHnouHIKu+ynX60IZ3stpbb +6rowu78OWON252JcYJqe++2GmdIgbBhg+mZDwhX0ZibMVztJaZFsYL+Ch/2J9KqD +A5NtE1s/XdhYoX5hsv7W4ok9jLFXRYIMj+T4exJRlR4f4GP9p0fcqPWd9/enPnlJ +JFTmu0DXJTZUMVS1UrXUy5t/DPXdrwyl8pM7VCqO3bqK7jqE6mWawdTkEeiku1fJ +ydP0285uiYTbj1Q38VVhPwXzMuLbkaUgRJhCI4BcjfQIjtJLbWpS+VdhUEvtgMVx +XJMKxCVGG69qjXyj9TjI7pxanb/bWglhovJN9wIDAQABAoIBAQCSnMsLxbUfOxPx +RWuwOLN+NZxIvtfnastQEtSdWiRvo5Xa3zYmw5hLHa8DXRC57+cwug/jqr54LQpb +gotg1hiBck05In7ezTK2FXTVeoJskal91bUnLpP0DSOkVnz9xszFKNF6Wr7FTEfH +IC1FF16Fbcz0mW0hKg9X6+uYOzqPcKpQRwli5LAwhT18Alf9h4/3NCeKotiJyr2J +xvcEH1eY2m2c/jQZurBkys7qBC3+i8LJEOW8MBQt7mxajwfbU91wtP2YoqMcoYiS +zsPbYp7Ui2t4G9Yn+OJw+uj4RGP1Bo4nSyRxWDtg+8Zug/JYU6/s+8kVRpiGffd3 +T1GvoxUhAoGBAOnPDWG/g1xlJf65Rh71CxMs638zhYbIloU2K4Rqr05DHe7GryTS +9hLVrwhHddK+KwfVbR8HFMPo1DC/NVbuKt8StTAadAu3HsC088gWd28nOiGAWuvH +Bo3x/DYQGYwGFfoo4rzCOgMj6DJjXmcWEXNv3NDMoXoYpkxa0g6zZDyHAoGBAMTL +t7EUneJT+Mm7wyL1I5bmaT/HFwqoUQB2ccBPVD8p1el62NgLdfhOa8iNlBVhMrlh +2aTjrMlSPcjr9sCgKrLcenSWw+2qFsf4+SmV01ntB9kWes2phXpnB0ynXIcbeG05 ++BLxbqDTVV0Iqh4r/dGeplyV2WyL3mTpkT3hRq8RAoGAZ93degEUICWnHWO9LN97 +Dge0joua0+ekRoVsC6VBP6k9UOfewqMdQfy/hxQH2Zk1kINVuKTyqp1yNj2bOoUP +co3jA/2cc9/jv4QjkE26vRxWDK/ytC90T/aiLno0fyns9XbYUzaNgvuemVPfijgZ +hIi7Nd7SFWWB6wWlr3YuH10CgYEAwh7JVa2mh8iZEjVaKTNyJbmmfDjgq6yYKkKr +ti0KRzv3O9Xn7ERx27tPaobtWaGFLYQt8g57NCMhuv23aw8Sz1fYmwTUw60Rx7P5 +42FdF8lOAn/AJvpfJfxXIO+9v7ADPIr//3+TxqRwAdM4K4btWkaKh61wyTe26gfT +MxzyYmECgYAnlU5zsGyiZqwoXVktkhtZrE7Qu0SoztzFb8KpvFNmMTPF1kAAYmJY +GIhbizeGJ3h4cUdozKmt8ZWIt6uFDEYCqEA7XF4RH75dW25x86mpIPO7iRl9eisY +IsLeMYqTIwXAwGx6Ka9v5LOL1kzcHQ2iVj6+QX+yoptSft1dYa9jOA== +-----END RSA PRIVATE KEY-----`) + +const rsaPrivKeyID = "ee69e8e07a14756ad5ff0aca2336b37f86b0ac1710d1f3e94440081e080aecd7" + +var ecPrivKeyFixture = []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINfxKtDH3ug7ZIQPDyeAzujCdhw36D+bf9ToPE1A7YEyoAoGCCqGSM49 +AwEHoUQDQgAEUIH9AYtrcDFzZrFJBdJZkn21d+4cH3nzy2O6Q/ct4BjOBKa+WCdR +tPo78bA+C/7t81ADQO8Jqaj59W50rwoqDQ== +-----END EC PRIVATE KEY-----`) + +const ecPrivKeyID = "46157cb0becf9c72c3219e11d4692424fef9bf4460812ccc8a71a3dfcafc7e60" + +var testKeys = map[string][]byte{ + ecPrivKeyID: ecPrivKeyFixture, + rsaPrivKeyID: rsaPrivKeyFixture, +} + +func TestLoadKeyFromPath(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + for keyID, keyBytes := range testKeys { + t.Run(fmt.Sprintf("load-key-id-%s-from-path", keyID), func(t *testing.T) { + testLoadKeyFromPath(t, keyID, keyBytes) + }) + } +} + +func testLoadKeyFromPath(t *testing.T, privKeyID string, privKeyFixture []byte) { + privKeyDir, err := ioutil.TempDir("", "key-load-test-") + assert.NilError(t, err) + defer os.RemoveAll(privKeyDir) + privKeyFilepath := filepath.Join(privKeyDir, "privkey.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, notary.PrivNoExecPerms)) + + keyStorageDir, err := ioutil.TempDir("", "loaded-keys-") + assert.NilError(t, err) + defer os.RemoveAll(keyStorageDir) + + passwd := "password" + cannedPasswordRetriever := passphrase.ConstantRetriever(passwd) + keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension) + assert.NilError(t, err) + privKeyImporters := []trustmanager.Importer{keyFileStore} + + // get the privKeyBytes + privKeyBytes, err := getPrivKeyBytesFromPath(privKeyFilepath) + assert.NilError(t, err) + + // import the key to our keyStorageDir + assert.Check(t, loadPrivKeyBytesToStore(privKeyBytes, privKeyImporters, privKeyFilepath, "signer-name", cannedPasswordRetriever)) + + // check that the appropriate ~//private/.key file exists + expectedImportKeyPath := filepath.Join(keyStorageDir, notary.PrivDir, privKeyID+"."+notary.KeyExtension) + _, err = os.Stat(expectedImportKeyPath) + assert.NilError(t, err) + + // verify the key content + from, _ := os.OpenFile(expectedImportKeyPath, os.O_RDONLY, notary.PrivExecPerms) + defer from.Close() + fromBytes, _ := ioutil.ReadAll(from) + keyPEM, _ := pem.Decode(fromBytes) + assert.Check(t, is.Equal("signer-name", keyPEM.Headers["role"])) + // the default GUN is empty + assert.Check(t, is.Equal("", keyPEM.Headers["gun"])) + // assert encrypted header + assert.Check(t, is.Equal("ENCRYPTED PRIVATE KEY", keyPEM.Type)) + + decryptedKey, err := tufutils.ParsePKCS8ToTufKey(keyPEM.Bytes, []byte(passwd)) + assert.NilError(t, err) + fixturePEM, _ := pem.Decode(privKeyFixture) + assert.Check(t, is.DeepEqual(fixturePEM.Bytes, decryptedKey.Private())) +} + +func TestLoadKeyTooPermissive(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + for keyID, keyBytes := range testKeys { + t.Run(fmt.Sprintf("load-key-id-%s-too-permissive", keyID), func(t *testing.T) { + testLoadKeyTooPermissive(t, keyBytes) + }) + } +} + +func testLoadKeyTooPermissive(t *testing.T, privKeyFixture []byte) { + privKeyDir, err := ioutil.TempDir("", "key-load-test-") + assert.NilError(t, err) + defer os.RemoveAll(privKeyDir) + privKeyFilepath := filepath.Join(privKeyDir, "privkey477.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0477)) + + keyStorageDir, err := ioutil.TempDir("", "loaded-keys-") + assert.NilError(t, err) + defer os.RemoveAll(keyStorageDir) + + // import the key to our keyStorageDir + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + expected := fmt.Sprintf("private key file %s must not be readable or writable by others", privKeyFilepath) + assert.Error(t, err, expected) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey667.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0677)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + expected = fmt.Sprintf("private key file %s must not be readable or writable by others", privKeyFilepath) + assert.Error(t, err, expected) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey777.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0777)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + expected = fmt.Sprintf("private key file %s must not be readable or writable by others", privKeyFilepath) + assert.Error(t, err, expected) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey400.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0400)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + assert.NilError(t, err) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey600.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0600)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + assert.NilError(t, err) +} + +var pubKeyFixture = []byte(`-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUIH9AYtrcDFzZrFJBdJZkn21d+4c +H3nzy2O6Q/ct4BjOBKa+WCdRtPo78bA+C/7t81ADQO8Jqaj59W50rwoqDQ== +-----END PUBLIC KEY-----`) + +func TestLoadPubKeyFailure(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + pubKeyDir, err := ioutil.TempDir("", "key-load-test-pubkey-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyDir) + pubKeyFilepath := filepath.Join(pubKeyDir, "pubkey.pem") + assert.NilError(t, ioutil.WriteFile(pubKeyFilepath, pubKeyFixture, notary.PrivNoExecPerms)) + keyStorageDir, err := ioutil.TempDir("", "loaded-keys-") + assert.NilError(t, err) + defer os.RemoveAll(keyStorageDir) + + passwd := "password" + cannedPasswordRetriever := passphrase.ConstantRetriever(passwd) + keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension) + assert.NilError(t, err) + privKeyImporters := []trustmanager.Importer{keyFileStore} + + pubKeyBytes, err := getPrivKeyBytesFromPath(pubKeyFilepath) + assert.NilError(t, err) + + // import the key to our keyStorageDir - it should fail + err = loadPrivKeyBytesToStore(pubKeyBytes, privKeyImporters, pubKeyFilepath, "signer-name", cannedPasswordRetriever) + expected := fmt.Sprintf("provided file %s is not a supported private key - to add a signer's public key use docker trust signer add", pubKeyFilepath) + assert.Error(t, err, expected) +} diff --git a/cli/cli/command/trust/revoke.go b/cli/cli/command/trust/revoke.go new file mode 100644 index 00000000..31437b03 --- /dev/null +++ b/cli/cli/command/trust/revoke.go @@ -0,0 +1,125 @@ +package trust + +import ( + "context" + "fmt" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type revokeOptions struct { + forceYes bool +} + +func newRevokeCommand(dockerCli command.Cli) *cobra.Command { + options := revokeOptions{} + cmd := &cobra.Command{ + Use: "revoke [OPTIONS] IMAGE[:TAG]", + Short: "Remove trust for an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return revokeTrust(dockerCli, args[0], options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.forceYes, "yes", "y", false, "Do not prompt for confirmation") + return cmd +} + +func revokeTrust(cli command.Cli, remote string, options revokeOptions) error { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), remote) + if err != nil { + return err + } + tag := imgRefAndAuth.Tag() + if imgRefAndAuth.Tag() == "" && imgRefAndAuth.Digest() != "" { + return fmt.Errorf("cannot use a digest reference for IMAGE:TAG") + } + if imgRefAndAuth.Tag() == "" && !options.forceYes { + deleteRemote := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("Please confirm you would like to delete all signature data for %s?", remote)) + if !deleteRemote { + fmt.Fprintf(cli.Out(), "\nAborting action.\n") + return nil + } + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return err + } + + if err = clearChangeList(notaryRepo); err != nil { + return err + } + defer clearChangeList(notaryRepo) + if err := revokeSignature(notaryRepo, tag); err != nil { + return errors.Wrapf(err, "could not remove signature for %s", remote) + } + fmt.Fprintf(cli.Out(), "Successfully deleted signature for %s\n", remote) + return nil +} + +func revokeSignature(notaryRepo client.Repository, tag string) error { + if tag != "" { + // Revoke signature for the specified tag + if err := revokeSingleSig(notaryRepo, tag); err != nil { + return err + } + } else { + // revoke all signatures for the image, as no tag was given + if err := revokeAllSigs(notaryRepo); err != nil { + return err + } + } + + // Publish change + return notaryRepo.Publish() +} + +func revokeSingleSig(notaryRepo client.Repository, tag string) error { + releasedTargetWithRole, err := notaryRepo.GetTargetByName(tag, trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return err + } + releasedTarget := releasedTargetWithRole.Target + return getSignableRolesForTargetAndRemove(releasedTarget, notaryRepo) +} + +func revokeAllSigs(notaryRepo client.Repository) error { + releasedTargetWithRoleList, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return err + } + + if len(releasedTargetWithRoleList) == 0 { + return fmt.Errorf("no signed tags to remove") + } + + // we need all the roles that signed each released target so we can remove from all roles. + for _, releasedTargetWithRole := range releasedTargetWithRoleList { + // remove from all roles + if err := getSignableRolesForTargetAndRemove(releasedTargetWithRole.Target, notaryRepo); err != nil { + return err + } + } + return nil +} + +// get all the roles that signed the target and removes it from all roles. +func getSignableRolesForTargetAndRemove(releasedTarget client.Target, notaryRepo client.Repository) error { + signableRoles, err := trust.GetSignableRoles(notaryRepo, &releasedTarget) + if err != nil { + return err + } + // remove from all roles + return notaryRepo.RemoveTarget(releasedTarget.Name, signableRoles...) +} diff --git a/cli/cli/command/trust/revoke_test.go b/cli/cli/command/trust/revoke_test.go new file mode 100644 index 00000000..b0a43876 --- /dev/null +++ b/cli/cli/command/trust/revoke_test.go @@ -0,0 +1,148 @@ +package trust + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustRevokeCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"remote1", "remote2"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE"}, + expectedError: "invalid reference format", + }, + { + name: "digest-reference", + args: []string{"ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2"}, + expectedError: "cannot use a digest reference for IMAGE:TAG", + }, + } + for _, tc := range testCases { + cmd := newRevokeCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustRevokeCommandOfflineErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetOfflineNotaryRepository) + cmd := newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.")) + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetOfflineNotaryRepository) + cmd = newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image", "-y"}) + cmd.SetOutput(ioutil.Discard) + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetOfflineNotaryRepository) + cmd = newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "could not remove signature for reg-name.io/image:tag: client is offline") +} + +func TestTrustRevokeCommandUninitializedErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetUninitializedNotaryRepository) + cmd := newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.")) + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetUninitializedNotaryRepository) + cmd = newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image", "-y"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "could not remove signature for reg-name.io/image: does not have trust data for") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetUninitializedNotaryRepository) + cmd = newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "could not remove signature for reg-name.io/image:tag: does not have trust data for") +} + +func TestTrustRevokeCommandEmptyNotaryRepo(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetEmptyTargetsNotaryRepository) + cmd := newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.")) + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetEmptyTargetsNotaryRepository) + cmd = newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image", "-y"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "could not remove signature for reg-name.io/image: no signed tags to remove") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetEmptyTargetsNotaryRepository) + cmd = newRevokeCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "could not remove signature for reg-name.io/image:tag: No valid trust data for tag") +} + +func TestNewRevokeTrustAllSigConfirmation(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notary.GetEmptyTargetsNotaryRepository) + cmd := newRevokeCommand(cli) + cmd.SetArgs([]string{"alpine"}) + assert.NilError(t, cmd.Execute()) + + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Please confirm you would like to delete all signature data for alpine? [y/N] \nAborting action.")) +} + +func TestGetSignableRolesForTargetAndRemoveError(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever("password"), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + target := client.Target{} + err = getSignableRolesForTargetAndRemove(target, notaryRepo) + assert.Error(t, err, "client is offline") +} diff --git a/cli/cli/command/trust/sign.go b/cli/cli/command/trust/sign.go new file mode 100644 index 00000000..234a057c --- /dev/null +++ b/cli/cli/command/trust/sign.go @@ -0,0 +1,247 @@ +package trust + +import ( + "context" + "fmt" + "io" + "path" + "sort" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type signOptions struct { + local bool + imageName string +} + +func newSignCommand(dockerCli command.Cli) *cobra.Command { + options := signOptions{} + cmd := &cobra.Command{ + Use: "sign IMAGE:TAG", + Short: "Sign an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.imageName = args[0] + return runSignImage(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVar(&options.local, "local", false, "Sign a locally tagged image") + return cmd +} + +func runSignImage(cli command.Cli, options signOptions) error { + imageName := options.imageName + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), imageName) + if err != nil { + return err + } + if err := validateTag(imgRefAndAuth); err != nil { + return err + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + if err = clearChangeList(notaryRepo); err != nil { + return err + } + defer clearChangeList(notaryRepo) + + // get the latest repository metadata so we can figure out which roles to sign + if _, err = notaryRepo.ListTargets(); err != nil { + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + // before initializing a new repo, check that the image exists locally: + if err := checkLocalImageExistence(ctx, cli, imageName); err != nil { + return err + } + + userRole := data.RoleName(path.Join(data.CanonicalTargetsRole.String(), imgRefAndAuth.AuthConfig().Username)) + if err := initNotaryRepoWithSigners(notaryRepo, userRole); err != nil { + return trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + + fmt.Fprintf(cli.Out(), "Created signer: %s\n", imgRefAndAuth.AuthConfig().Username) + fmt.Fprintf(cli.Out(), "Finished initializing signed repository for %s\n", imageName) + default: + return trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), err) + } + } + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "push") + target, err := createTarget(notaryRepo, imgRefAndAuth.Tag()) + if err != nil || options.local { + switch err := err.(type) { + // If the error is nil then the local flag is set + case client.ErrNoSuchTarget, client.ErrRepositoryNotExist, nil: + // Fail fast if the image doesn't exist locally + if err := checkLocalImageExistence(ctx, cli, imageName); err != nil { + return err + } + fmt.Fprintf(cli.Err(), "Signing and pushing trust data for local image %s, may overwrite remote trust data\n", imageName) + return image.TrustedPush(ctx, cli, imgRefAndAuth.RepoInfo(), imgRefAndAuth.Reference(), *imgRefAndAuth.AuthConfig(), requestPrivilege) + default: + return err + } + } + return signAndPublishToTarget(cli.Out(), imgRefAndAuth, notaryRepo, target) +} + +func signAndPublishToTarget(out io.Writer, imgRefAndAuth trust.ImageRefAndAuth, notaryRepo client.Repository, target client.Target) error { + tag := imgRefAndAuth.Tag() + fmt.Fprintf(out, "Signing and pushing trust metadata for %s\n", imgRefAndAuth.Name()) + existingSigInfo, err := getExistingSignatureInfoForReleasedTag(notaryRepo, tag) + if err != nil { + return err + } + err = image.AddTargetToAllSignableRoles(notaryRepo, &target) + if err == nil { + prettyPrintExistingSignatureInfo(out, existingSigInfo) + err = notaryRepo.Publish() + } + if err != nil { + return errors.Wrapf(err, "failed to sign %s:%s", imgRefAndAuth.RepoInfo().Name.Name(), tag) + } + fmt.Fprintf(out, "Successfully signed %s:%s\n", imgRefAndAuth.RepoInfo().Name.Name(), tag) + return nil +} + +func validateTag(imgRefAndAuth trust.ImageRefAndAuth) error { + tag := imgRefAndAuth.Tag() + if tag == "" { + if imgRefAndAuth.Digest() != "" { + return fmt.Errorf("cannot use a digest reference for IMAGE:TAG") + } + return fmt.Errorf("No tag specified for %s", imgRefAndAuth.Name()) + } + return nil +} + +func checkLocalImageExistence(ctx context.Context, cli command.Cli, imageName string) error { + _, _, err := cli.Client().ImageInspectWithRaw(ctx, imageName) + return err +} + +func createTarget(notaryRepo client.Repository, tag string) (client.Target, error) { + target := &client.Target{} + var err error + if tag == "" { + return *target, fmt.Errorf("No tag specified") + } + target.Name = tag + target.Hashes, target.Length, err = getSignedManifestHashAndSize(notaryRepo, tag) + return *target, err +} + +func getSignedManifestHashAndSize(notaryRepo client.Repository, tag string) (data.Hashes, int64, error) { + targets, err := notaryRepo.GetAllTargetMetadataByName(tag) + if err != nil { + return nil, 0, err + } + return getReleasedTargetHashAndSize(targets, tag) +} + +func getReleasedTargetHashAndSize(targets []client.TargetSignedStruct, tag string) (data.Hashes, int64, error) { + for _, tgt := range targets { + if isReleasedTarget(tgt.Role.Name) { + return tgt.Target.Hashes, tgt.Target.Length, nil + } + } + return nil, 0, client.ErrNoSuchTarget(tag) +} + +func getExistingSignatureInfoForReleasedTag(notaryRepo client.Repository, tag string) (trustTagRow, error) { + targets, err := notaryRepo.GetAllTargetMetadataByName(tag) + if err != nil { + return trustTagRow{}, err + } + releasedTargetInfoList := matchReleasedSignatures(targets) + if len(releasedTargetInfoList) == 0 { + return trustTagRow{}, nil + } + return releasedTargetInfoList[0], nil +} + +func prettyPrintExistingSignatureInfo(out io.Writer, existingSigInfo trustTagRow) { + sort.Strings(existingSigInfo.Signers) + joinedSigners := strings.Join(existingSigInfo.Signers, ", ") + fmt.Fprintf(out, "Existing signatures for tag %s digest %s from:\n%s\n", existingSigInfo.SignedTag, existingSigInfo.Digest, joinedSigners) +} + +func initNotaryRepoWithSigners(notaryRepo client.Repository, newSigner data.RoleName) error { + rootKey, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + if err != nil { + return err + } + rootKeyID := rootKey.ID() + + // Initialize the notary repository with a remotely managed snapshot key + if err := notaryRepo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return err + } + + signerKey, err := getOrGenerateNotaryKey(notaryRepo, newSigner) + if err != nil { + return err + } + if err := addStagedSigner(notaryRepo, newSigner, []data.PublicKey{signerKey}); err != nil { + return errors.Wrapf(err, "could not add signer to repo: %s", strings.TrimPrefix(newSigner.String(), "targets/")) + } + + return notaryRepo.Publish() +} + +// generates an ECDSA key without a GUN for the specified role +func getOrGenerateNotaryKey(notaryRepo client.Repository, role data.RoleName) (data.PublicKey, error) { + // use the signer name in the PEM headers if this is a delegation key + if data.IsDelegation(role) { + role = data.RoleName(notaryRoleToSigner(role)) + } + keys := notaryRepo.GetCryptoService().ListKeys(role) + var err error + var key data.PublicKey + // always select the first key by ID + if len(keys) > 0 { + sort.Strings(keys) + keyID := keys[0] + privKey, _, err := notaryRepo.GetCryptoService().GetPrivateKey(keyID) + if err != nil { + return nil, err + } + key = data.PublicKeyFromPrivate(privKey) + } else { + key, err = notaryRepo.GetCryptoService().Create(role, "", data.ECDSAKey) + if err != nil { + return nil, err + } + } + return key, nil +} + +// stages changes to add a signer with the specified name and key(s). Adds to targets/ and targets/releases +func addStagedSigner(notaryRepo client.Repository, newSigner data.RoleName, signerKeys []data.PublicKey) error { + // create targets/ + if err := notaryRepo.AddDelegationRoleAndKeys(newSigner, signerKeys); err != nil { + return err + } + if err := notaryRepo.AddDelegationPaths(newSigner, []string{""}); err != nil { + return err + } + + // create targets/releases + if err := notaryRepo.AddDelegationRoleAndKeys(trust.ReleasesRole, signerKeys); err != nil { + return err + } + return notaryRepo.AddDelegationPaths(trust.ReleasesRole, []string{""}) +} diff --git a/cli/cli/command/trust/sign_test.go b/cli/cli/command/trust/sign_test.go new file mode 100644 index 00000000..5a7b6b5f --- /dev/null +++ b/cli/cli/command/trust/sign_test.go @@ -0,0 +1,309 @@ +package trust + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/client/changelist" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf/data" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +const passwd = "password" + +func TestTrustSignCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"image", "tag"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE:latest"}, + expectedError: "invalid reference format", + }, + { + name: "no-tag", + args: []string{"reg/img"}, + expectedError: "No tag specified for reg/img", + }, + { + name: "digest-reference", + args: []string{"ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2"}, + expectedError: "cannot use a digest reference for IMAGE:TAG", + }, + } + // change to a tmpdir + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + for _, tc := range testCases { + cmd := newSignCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustSignCommandOfflineErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newSignCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "client is offline") +} + +func TestGetOrGenerateNotaryKey(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + + // repo is empty, try making a root key + rootKeyA, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + assert.NilError(t, err) + assert.Check(t, rootKeyA != nil) + + // we should only have one newly generated key + allKeys := notaryRepo.GetCryptoService().ListAllKeys() + assert.Check(t, is.Len(allKeys, 1)) + assert.Check(t, notaryRepo.GetCryptoService().GetKey(rootKeyA.ID()) != nil) + + // this time we should get back the same key if we ask for another root key + rootKeyB, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + assert.NilError(t, err) + assert.Check(t, rootKeyB != nil) + + // we should only have one newly generated key + allKeys = notaryRepo.GetCryptoService().ListAllKeys() + assert.Check(t, is.Len(allKeys, 1)) + assert.Check(t, notaryRepo.GetCryptoService().GetKey(rootKeyB.ID()) != nil) + + // The key we retrieved should be identical to the one we generated + assert.Check(t, is.DeepEqual(rootKeyA.Public(), rootKeyB.Public())) + + // Now also try with a delegation key + releasesKey, err := getOrGenerateNotaryKey(notaryRepo, data.RoleName(trust.ReleasesRole)) + assert.NilError(t, err) + assert.Check(t, releasesKey != nil) + + // we should now have two keys + allKeys = notaryRepo.GetCryptoService().ListAllKeys() + assert.Check(t, is.Len(allKeys, 2)) + assert.Check(t, notaryRepo.GetCryptoService().GetKey(releasesKey.ID()) != nil) + // The key we retrieved should be identical to the one we generated + assert.Check(t, releasesKey != rootKeyA) + assert.Check(t, releasesKey != rootKeyB) +} + +func TestAddStageSigners(t *testing.T) { + skip.If(t, runtime.GOOS == "windows", "FIXME: not supported currently") + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + + // stage targets/user + userRole := data.RoleName("targets/user") + userKey := data.NewPublicKey("algoA", []byte("a")) + err = addStagedSigner(notaryRepo, userRole, []data.PublicKey{userKey}) + assert.NilError(t, err) + // check the changelist for four total changes: two on targets/releases and two on targets/user + cl, err := notaryRepo.GetChangelist() + assert.NilError(t, err) + changeList := cl.List() + assert.Check(t, is.Len(changeList, 4)) + // ordering is determinstic: + + // first change is for targets/user key creation + newSignerKeyChange := changeList[0] + expectedJSON, err := json.Marshal(&changelist.TUFDelegation{ + NewThreshold: notary.MinThreshold, + AddKeys: data.KeyList([]data.PublicKey{userKey}), + }) + assert.NilError(t, err) + expectedChange := changelist.NewTUFChange( + changelist.ActionCreate, + userRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, newSignerKeyChange)) + + // second change is for targets/user getting all paths + newSignerPathsChange := changeList[1] + expectedJSON, err = json.Marshal(&changelist.TUFDelegation{ + AddPaths: []string{""}, + }) + assert.NilError(t, err) + expectedChange = changelist.NewTUFChange( + changelist.ActionCreate, + userRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, newSignerPathsChange)) + + releasesRole := data.RoleName("targets/releases") + + // third change is for targets/releases key creation + releasesKeyChange := changeList[2] + expectedJSON, err = json.Marshal(&changelist.TUFDelegation{ + NewThreshold: notary.MinThreshold, + AddKeys: data.KeyList([]data.PublicKey{userKey}), + }) + assert.NilError(t, err) + expectedChange = changelist.NewTUFChange( + changelist.ActionCreate, + releasesRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, releasesKeyChange)) + + // fourth change is for targets/releases getting all paths + releasesPathsChange := changeList[3] + expectedJSON, err = json.Marshal(&changelist.TUFDelegation{ + AddPaths: []string{""}, + }) + assert.NilError(t, err) + expectedChange = changelist.NewTUFChange( + changelist.ActionCreate, + releasesRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, releasesPathsChange)) +} + +func TestGetSignedManifestHashAndSize(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + target := &client.Target{} + target.Hashes, target.Length, err = getSignedManifestHashAndSize(notaryRepo, "test") + assert.Error(t, err, "client is offline") +} + +func TestGetReleasedTargetHashAndSize(t *testing.T) { + oneReleasedTgt := []client.TargetSignedStruct{} + // make and append 3 non-released signatures on the "unreleased" target + unreleasedTgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: unreleasedTgt}) + } + _, _, err := getReleasedTargetHashAndSize(oneReleasedTgt, "unreleased") + assert.Error(t, err, "No valid trust data for unreleased") + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: releasedTgt}) + hash, _, _ := getReleasedTargetHashAndSize(oneReleasedTgt, "unreleased") + assert.Check(t, is.DeepEqual(data.Hashes{notary.SHA256: []byte("released-hash")}, hash)) + +} + +func TestCreateTarget(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + _, err = createTarget(notaryRepo, "") + assert.Error(t, err, "No tag specified") + _, err = createTarget(notaryRepo, "1") + assert.Error(t, err, "client is offline") +} + +func TestGetExistingSignatureInfoForReleasedTag(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + _, err = getExistingSignatureInfoForReleasedTag(notaryRepo, "test") + assert.Error(t, err, "client is offline") +} + +func TestPrettyPrintExistingSignatureInfo(t *testing.T) { + buf := bytes.NewBuffer(nil) + signers := []string{"Bob", "Alice", "Carol"} + existingSig := trustTagRow{trustTagKey{"tagName", "abc123"}, signers} + prettyPrintExistingSignatureInfo(buf, existingSig) + + assert.Check(t, is.Contains(buf.String(), "Existing signatures for tag tagName digest abc123 from:\nAlice, Bob, Carol")) +} + +func TestSignCommandChangeListIsCleanedOnError(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + config.SetDir(tmpDir) + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + cmd := newSignCommand(cli) + cmd.SetArgs([]string{"ubuntu:latest"}) + cmd.SetOutput(ioutil.Discard) + + err = cmd.Execute() + assert.Assert(t, err != nil) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "docker.io/library/ubuntu", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + cl, err := notaryRepo.GetChangelist() + assert.NilError(t, err) + assert.Check(t, is.Equal(len(cl.List()), 0)) +} + +func TestSignCommandLocalFlag(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newSignCommand(cli) + cmd.SetArgs([]string{"--local", "reg-name.io/image:red"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "error during connect: Get /images/reg-name.io/image:red/json: unsupported protocol scheme") + +} diff --git a/cli/cli/command/trust/signer.go b/cli/cli/command/trust/signer.go new file mode 100644 index 00000000..807ad6c9 --- /dev/null +++ b/cli/cli/command/trust/signer.go @@ -0,0 +1,22 @@ +package trust + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// newTrustSignerCommand returns a cobra command for `trust signer` subcommands +func newTrustSignerCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "signer", + Short: "Manage entities who can sign Docker images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newSignerAddCommand(dockerCli), + newSignerRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/trust/signer_add.go b/cli/cli/command/trust/signer_add.go new file mode 100644 index 00000000..304aeec9 --- /dev/null +++ b/cli/cli/command/trust/signer_add.go @@ -0,0 +1,141 @@ +package trust + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "regexp" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/opts" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + tufutils "github.com/theupdateframework/notary/tuf/utils" +) + +type signerAddOptions struct { + keys opts.ListOpts + signer string + repos []string +} + +func newSignerAddCommand(dockerCli command.Cli) *cobra.Command { + var options signerAddOptions + cmd := &cobra.Command{ + Use: "add OPTIONS NAME REPOSITORY [REPOSITORY...] ", + Short: "Add a signer", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.signer = args[0] + options.repos = args[1:] + return addSigner(dockerCli, options) + }, + } + flags := cmd.Flags() + options.keys = opts.NewListOpts(nil) + flags.Var(&options.keys, "key", "Path to the signer's public key file") + return cmd +} + +var validSignerName = regexp.MustCompile(`^[a-z0-9][a-z0-9\_\-]*$`).MatchString + +func addSigner(cli command.Cli, options signerAddOptions) error { + signerName := options.signer + if !validSignerName(signerName) { + return fmt.Errorf("signer name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", signerName) + } + if signerName == "releases" { + return fmt.Errorf("releases is a reserved keyword, please use a different signer name") + } + + if options.keys.Len() == 0 { + return fmt.Errorf("path to a public key must be provided using the `--key` flag") + } + signerPubKeys, err := ingestPublicKeys(options.keys.GetAll()) + if err != nil { + return err + } + var errRepos []string + for _, repoName := range options.repos { + fmt.Fprintf(cli.Out(), "Adding signer \"%s\" to %s...\n", signerName, repoName) + if err := addSignerToRepo(cli, signerName, repoName, signerPubKeys); err != nil { + fmt.Fprintln(cli.Err(), err.Error()+"\n") + errRepos = append(errRepos, repoName) + } else { + fmt.Fprintf(cli.Out(), "Successfully added signer: %s to %s\n\n", signerName, repoName) + } + } + if len(errRepos) > 0 { + return fmt.Errorf("Failed to add signer to: %s", strings.Join(errRepos, ", ")) + } + return nil +} + +func addSignerToRepo(cli command.Cli, signerName string, repoName string, signerPubKeys []data.PublicKey) error { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), repoName) + if err != nil { + return err + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + + if _, err = notaryRepo.ListTargets(); err != nil { + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + fmt.Fprintf(cli.Out(), "Initializing signed repository for %s...\n", repoName) + if err := getOrGenerateRootKeyAndInitRepo(notaryRepo); err != nil { + return trust.NotaryError(repoName, err) + } + fmt.Fprintf(cli.Out(), "Successfully initialized %q\n", repoName) + default: + return trust.NotaryError(repoName, err) + } + } + + newSignerRoleName := data.RoleName(path.Join(data.CanonicalTargetsRole.String(), signerName)) + + if err := addStagedSigner(notaryRepo, newSignerRoleName, signerPubKeys); err != nil { + return errors.Wrapf(err, "could not add signer to repo: %s", strings.TrimPrefix(newSignerRoleName.String(), "targets/")) + } + + return notaryRepo.Publish() +} + +func ingestPublicKeys(pubKeyPaths []string) ([]data.PublicKey, error) { + pubKeys := []data.PublicKey{} + for _, pubKeyPath := range pubKeyPaths { + // Read public key bytes from PEM file, limit to 1 KiB + pubKeyFile, err := os.OpenFile(pubKeyPath, os.O_RDONLY, 0666) + if err != nil { + return nil, errors.Wrap(err, "unable to read public key from file") + } + defer pubKeyFile.Close() + // limit to + l := io.LimitReader(pubKeyFile, 1<<20) + pubKeyBytes, err := ioutil.ReadAll(l) + if err != nil { + return nil, errors.Wrap(err, "unable to read public key from file") + } + + // Parse PEM bytes into type PublicKey + pubKey, err := tufutils.ParsePEMPublicKey(pubKeyBytes) + if err != nil { + return nil, errors.Wrapf(err, "could not parse public key from file: %s", pubKeyPath) + } + pubKeys = append(pubKeys, pubKey) + } + return pubKeys, nil +} diff --git a/cli/cli/command/trust/signer_add_test.go b/cli/cli/command/trust/signer_add_test.go new file mode 100644 index 00000000..64121e29 --- /dev/null +++ b/cli/cli/command/trust/signer_add_test.go @@ -0,0 +1,147 @@ +package trust + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustSignerAddErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires at least 2 argument", + }, + { + name: "no-key", + args: []string{"foo", "bar"}, + expectedError: "path to a public key must be provided using the `--key` flag", + }, + { + name: "reserved-releases-signer-add", + args: []string{"releases", "my-image", "--key", "/path/to/key"}, + expectedError: "releases is a reserved keyword, please use a different signer name", + }, + { + name: "disallowed-chars", + args: []string{"ali/ce", "my-image", "--key", "/path/to/key"}, + expectedError: "signer name \"ali/ce\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + }, + { + name: "no-upper-case", + args: []string{"Alice", "my-image", "--key", "/path/to/key"}, + expectedError: "signer name \"Alice\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + }, + { + name: "start-with-letter", + args: []string{"_alice", "my-image", "--key", "/path/to/key"}, + expectedError: "signer name \"_alice\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + }, + } + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newSignerAddCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSignerAddCommandNoTargetsKey(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + tmpfile, err := ioutil.TempFile("", "pemfile") + assert.NilError(t, err) + defer os.Remove(tmpfile.Name()) + + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newSignerAddCommand(cli) + cmd.SetArgs([]string{"--key", tmpfile.Name(), "alice", "alpine", "linuxkit/alpine"}) + + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), fmt.Sprintf("could not parse public key from file: %s: no valid public key found", tmpfile.Name())) +} + +func TestSignerAddCommandBadKeyPath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newSignerAddCommand(cli) + cmd.SetArgs([]string{"--key", "/path/to/key.pem", "alice", "alpine"}) + + cmd.SetOutput(ioutil.Discard) + expectedError := "unable to read public key from file: open /path/to/key.pem: no such file or directory" + if runtime.GOOS == "windows" { + expectedError = "unable to read public key from file: open /path/to/key.pem: The system cannot find the path specified." + } + assert.Error(t, cmd.Execute(), expectedError) +} + +func TestSignerAddCommandInvalidRepoName(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + pubKeyDir, err := ioutil.TempDir("", "key-load-test-pubkey-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyDir) + pubKeyFilepath := filepath.Join(pubKeyDir, "pubkey.pem") + assert.NilError(t, ioutil.WriteFile(pubKeyFilepath, pubKeyFixture, notary.PrivNoExecPerms)) + + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetUninitializedNotaryRepository) + cmd := newSignerAddCommand(cli) + imageName := "870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd" + cmd.SetArgs([]string{"--key", pubKeyFilepath, "alice", imageName}) + + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), "Failed to add signer to: 870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd") + expectedErr := fmt.Sprintf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings\n\n", imageName) + + assert.Check(t, is.Equal(expectedErr, cli.ErrBuffer().String())) +} + +func TestIngestPublicKeys(t *testing.T) { + // Call with a bad path + _, err := ingestPublicKeys([]string{"foo", "bar"}) + expectedError := "unable to read public key from file: open foo: no such file or directory" + if runtime.GOOS == "windows" { + expectedError = "unable to read public key from file: open foo: The system cannot find the file specified." + } + assert.Error(t, err, expectedError) + // Call with real file path + tmpfile, err := ioutil.TempFile("", "pemfile") + assert.NilError(t, err) + defer os.Remove(tmpfile.Name()) + _, err = ingestPublicKeys([]string{tmpfile.Name()}) + assert.Error(t, err, fmt.Sprintf("could not parse public key from file: %s: no valid public key found", tmpfile.Name())) +} diff --git a/cli/cli/command/trust/signer_remove.go b/cli/cli/command/trust/signer_remove.go new file mode 100644 index 00000000..d4e8eec4 --- /dev/null +++ b/cli/cli/command/trust/signer_remove.go @@ -0,0 +1,142 @@ +package trust + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type signerRemoveOptions struct { + signer string + repos []string + forceYes bool +} + +func newSignerRemoveCommand(dockerCli command.Cli) *cobra.Command { + options := signerRemoveOptions{} + cmd := &cobra.Command{ + Use: "remove [OPTIONS] NAME REPOSITORY [REPOSITORY...]", + Short: "Remove a signer", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.signer = args[0] + options.repos = args[1:] + return removeSigner(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.forceYes, "force", "f", false, "Do not prompt for confirmation before removing the most recent signer") + return cmd +} + +func removeSigner(cli command.Cli, options signerRemoveOptions) error { + var errRepos []string + for _, repo := range options.repos { + fmt.Fprintf(cli.Out(), "Removing signer \"%s\" from %s...\n", options.signer, repo) + if _, err := removeSingleSigner(cli, repo, options.signer, options.forceYes); err != nil { + fmt.Fprintln(cli.Err(), err.Error()+"\n") + errRepos = append(errRepos, repo) + } + } + if len(errRepos) > 0 { + return fmt.Errorf("Error removing signer from: %s", strings.Join(errRepos, ", ")) + } + return nil +} + +func isLastSignerForReleases(roleWithSig data.Role, allRoles []client.RoleWithSignatures) (bool, error) { + var releasesRoleWithSigs client.RoleWithSignatures + for _, role := range allRoles { + if role.Name == releasesRoleTUFName { + releasesRoleWithSigs = role + break + } + } + counter := len(releasesRoleWithSigs.Signatures) + if counter == 0 { + return false, fmt.Errorf("All signed tags are currently revoked, use docker trust sign to fix") + } + for _, signature := range releasesRoleWithSigs.Signatures { + for _, key := range roleWithSig.KeyIDs { + if signature.KeyID == key { + counter-- + } + } + } + return counter < releasesRoleWithSigs.Threshold, nil +} + +// removeSingleSigner attempts to remove a single signer and returns whether signer removal happened. +// The signer not being removed doesn't necessarily raise an error e.g. user choosing "No" when prompted for confirmation. +func removeSingleSigner(cli command.Cli, repoName, signerName string, forceYes bool) (bool, error) { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), repoName) + if err != nil { + return false, err + } + + signerDelegation := data.RoleName("targets/" + signerName) + if signerDelegation == releasesRoleTUFName { + return false, fmt.Errorf("releases is a reserved keyword and cannot be removed") + } + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return false, trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + delegationRoles, err := notaryRepo.GetDelegationRoles() + if err != nil { + return false, errors.Wrapf(err, "error retrieving signers for %s", repoName) + } + var role data.Role + for _, delRole := range delegationRoles { + if delRole.Name == signerDelegation { + role = delRole + break + } + } + if role.Name == "" { + return false, fmt.Errorf("No signer %s for repository %s", signerName, repoName) + } + allRoles, err := notaryRepo.ListRoles() + if err != nil { + return false, err + } + if ok, err := isLastSignerForReleases(role, allRoles); ok && !forceYes { + removeSigner := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("The signer \"%s\" signed the last released version of %s. "+ + "Removing this signer will make %s unpullable. "+ + "Are you sure you want to continue?", + signerName, repoName, repoName, + )) + + if !removeSigner { + fmt.Fprintf(cli.Out(), "\nAborting action.\n") + return false, nil + } + } else if err != nil { + return false, err + } + if err = notaryRepo.RemoveDelegationKeys(releasesRoleTUFName, role.KeyIDs); err != nil { + return false, err + } + if err = notaryRepo.RemoveDelegationRole(signerDelegation); err != nil { + return false, err + } + + if err = notaryRepo.Publish(); err != nil { + return false, err + } + + fmt.Fprintf(cli.Out(), "Successfully removed %s from %s\n\n", signerName, repoName) + + return true, nil +} diff --git a/cli/cli/command/trust/signer_remove_test.go b/cli/cli/command/trust/signer_remove_test.go new file mode 100644 index 00000000..0feec874 --- /dev/null +++ b/cli/cli/command/trust/signer_remove_test.go @@ -0,0 +1,128 @@ +package trust + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustSignerRemoveErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args-0", + expectedError: "requires at least 2 arguments", + }, + { + name: "not-enough-args-1", + args: []string{"user"}, + expectedError: "requires at least 2 arguments", + }, + } + for _, tc := range testCases { + cmd := newSignerRemoveCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } + testCasesWithOutput := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-an-image", + args: []string{"user", "notanimage"}, + expectedError: "error retrieving signers for notanimage", + }, + { + name: "sha-reference", + args: []string{"user", "870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"user", "ALPINE"}, + expectedError: "invalid reference format", + }, + } + for _, tc := range testCasesWithOutput { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newSignerRemoveCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + cmd.Execute() + assert.Check(t, is.Contains(cli.ErrBuffer().String(), tc.expectedError)) + } + +} + +func TestRemoveSingleSigner(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + removed, err := removeSingleSigner(cli, "signed-repo", "test", true) + assert.Error(t, err, "No signer test for repository signed-repo") + assert.Equal(t, removed, false, "No signer should be removed") + + removed, err = removeSingleSigner(cli, "signed-repo", "releases", true) + assert.Error(t, err, "releases is a reserved keyword and cannot be removed") + assert.Equal(t, removed, false, "No signer should be removed") +} + +func TestRemoveMultipleSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + err := removeSigner(cli, signerRemoveOptions{signer: "test", repos: []string{"signed-repo", "signed-repo"}, forceYes: true}) + assert.Error(t, err, "Error removing signer from: signed-repo, signed-repo") + assert.Check(t, is.Contains(cli.ErrBuffer().String(), + "No signer test for repository signed-repo")) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Removing signer \"test\" from signed-repo...\n")) +} +func TestRemoveLastSignerWarning(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + + err := removeSigner(cli, signerRemoveOptions{signer: "alice", repos: []string{"signed-repo"}, forceYes: false}) + assert.NilError(t, err) + assert.Check(t, is.Contains(cli.OutBuffer().String(), + "The signer \"alice\" signed the last released version of signed-repo. "+ + "Removing this signer will make signed-repo unpullable. "+ + "Are you sure you want to continue? [y/N]")) +} + +func TestIsLastSignerForReleases(t *testing.T) { + role := data.Role{} + releaserole := client.RoleWithSignatures{} + releaserole.Name = releasesRoleTUFName + releaserole.Threshold = 1 + allrole := []client.RoleWithSignatures{releaserole} + lastsigner, _ := isLastSignerForReleases(role, allrole) + assert.Check(t, is.Equal(false, lastsigner)) + + role.KeyIDs = []string{"deadbeef"} + sig := data.Signature{} + sig.KeyID = "deadbeef" + releaserole.Signatures = []data.Signature{sig} + releaserole.Threshold = 1 + allrole = []client.RoleWithSignatures{releaserole} + lastsigner, _ = isLastSignerForReleases(role, allrole) + assert.Check(t, is.Equal(true, lastsigner)) + + sig.KeyID = "8badf00d" + releaserole.Signatures = []data.Signature{sig} + releaserole.Threshold = 1 + allrole = []client.RoleWithSignatures{releaserole} + lastsigner, _ = isLastSignerForReleases(role, allrole) + assert.Check(t, is.Equal(false, lastsigner)) +} diff --git a/cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden b/cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden new file mode 100644 index 00000000..ae6fd9c8 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden @@ -0,0 +1,25 @@ +[ + { + "Name": "reg/img:unsigned-tag", + "SignedTags": [], + "Signers": [], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden new file mode 100644 index 00000000..cda9b40e --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden @@ -0,0 +1,33 @@ +[ + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + } + ], + "Signers": [], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden new file mode 100644 index 00000000..496b312b --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden @@ -0,0 +1,65 @@ +[ + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "blue", + "Digest": "626c75652d646967657374", + "Signers": [ + "alice" + ] + }, + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + }, + { + "SignedTag": "red", + "Digest": "7265642d646967657374", + "Signers": [ + "alice", + "bob" + ] + } + ], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden new file mode 100644 index 00000000..fd87979e --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden @@ -0,0 +1,128 @@ +[ + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "blue", + "Digest": "626c75652d646967657374", + "Signers": [ + "alice" + ] + }, + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + }, + { + "SignedTag": "red", + "Digest": "7265642d646967657374", + "Signers": [ + "alice", + "bob" + ] + } + ], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + }, + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "blue", + "Digest": "626c75652d646967657374", + "Signers": [ + "alice" + ] + }, + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + }, + { + "SignedTag": "red", + "Digest": "7265642d646967657374", + "Signers": [ + "alice", + "bob" + ] + } + ], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden new file mode 100644 index 00000000..b1745d5c --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden @@ -0,0 +1,33 @@ +[ + { + "Name": "signed-repo:green", + "SignedTags": [ + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + } + ], + "Signers": [], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden new file mode 100644 index 00000000..9f3ada08 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden @@ -0,0 +1,10 @@ + +Signatures for signed-repo + +SIGNED TAG DIGEST SIGNERS +green 677265656e2d646967657374 (Repo Admin) + +Administrative keys for signed-repo + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden new file mode 100644 index 00000000..49b1efd2 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden @@ -0,0 +1,18 @@ + +Signatures for signed-repo + +SIGNED TAG DIGEST SIGNERS +blue 626c75652d646967657374 alice +green 677265656e2d646967657374 (Repo Admin) +red 7265642d646967657374 alice, bob + +List of signers and their keys for signed-repo + +SIGNER KEYS +alice A +bob B + +Administrative keys for signed-repo + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden new file mode 100644 index 00000000..b5857289 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden @@ -0,0 +1,10 @@ + +Signatures for signed-repo:green + +SIGNED TAG DIGEST SIGNERS +green 677265656e2d646967657374 (Repo Admin) + +Administrative keys for signed-repo:green + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden new file mode 100644 index 00000000..302a6b5e --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden @@ -0,0 +1,14 @@ + +No signatures for signed-repo:unsigned + + +List of signers and their keys for signed-repo:unsigned + +SIGNER KEYS +alice A +bob B + +Administrative keys for signed-repo:unsigned + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden b/cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden new file mode 100644 index 00000000..fe51488c --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden @@ -0,0 +1 @@ +[] diff --git a/cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden new file mode 100644 index 00000000..8c0a84eb --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden @@ -0,0 +1,42 @@ +[ + { + "Name": "signed-repo:unsigned", + "SignedTags": [], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdminstrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/utils.go b/cli/cli/command/utils.go new file mode 100644 index 00000000..dc543e7d --- /dev/null +++ b/cli/cli/command/utils.go @@ -0,0 +1,127 @@ +package command + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + // We use sequential file access here to avoid depleting the standby list + // on Windows. On Linux, this is a call directly to ioutil.TempFile + tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = NewInStream(os.Stdin) + } + + reader := bufio.NewReader(ins) + answer, _, _ := reader.ReadLine() + return strings.ToLower(string(answer)) == "y" +} + +// PruneFilters returns consolidated prune filters obtained from config.json and cli +func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { + if dockerCli.ConfigFile() == nil { + return pruneFilters + } + for _, f := range dockerCli.ConfigFile().PruneFilters { + parts := strings.SplitN(f, "=", 2) + if len(parts) != 2 { + continue + } + if parts[0] == "label" { + // CLI label filter supersede config.json. + // If CLI label filter conflict with config.json, + // skip adding label! filter in config.json. + if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { + continue + } + } else if parts[0] == "label!" { + // CLI label! filter supersede config.json. + // If CLI label! filter conflict with config.json, + // skip adding label filter in config.json. + if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { + continue + } + } + pruneFilters.Add(parts[0], parts[1]) + } + + return pruneFilters +} + +// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later. +func AddPlatformFlag(flags *pflag.FlagSet, target *string) { + flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") + flags.SetAnnotation("platform", "version", []string{"1.32"}) + flags.SetAnnotation("platform", "experimental", nil) +} diff --git a/cli/cli/command/volume/client_test.go b/cli/cli/command/volume/client_test.go new file mode 100644 index 00000000..644cad60 --- /dev/null +++ b/cli/cli/command/volume/client_test.go @@ -0,0 +1,54 @@ +package volume + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + volumeCreateFunc func(volumetypes.VolumeCreateBody) (types.Volume, error) + volumeInspectFunc func(volumeID string) (types.Volume, error) + volumeListFunc func(filter filters.Args) (volumetypes.VolumeListOKBody, error) + volumeRemoveFunc func(volumeID string, force bool) error + volumePruneFunc func(filter filters.Args) (types.VolumesPruneReport, error) +} + +func (c *fakeClient) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { + if c.volumeCreateFunc != nil { + return c.volumeCreateFunc(options) + } + return types.Volume{}, nil +} + +func (c *fakeClient) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + if c.volumeInspectFunc != nil { + return c.volumeInspectFunc(volumeID) + } + return types.Volume{}, nil +} + +func (c *fakeClient) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + if c.volumeListFunc != nil { + return c.volumeListFunc(filter) + } + return volumetypes.VolumeListOKBody{}, nil +} + +func (c *fakeClient) VolumesPrune(ctx context.Context, filter filters.Args) (types.VolumesPruneReport, error) { + if c.volumePruneFunc != nil { + return c.volumePruneFunc(filter) + } + return types.VolumesPruneReport{}, nil +} + +func (c *fakeClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + if c.volumeRemoveFunc != nil { + return c.volumeRemoveFunc(volumeID, force) + } + return nil +} diff --git a/cli/cli/command/volume/cmd.go b/cli/cli/command/volume/cmd.go new file mode 100644 index 00000000..b2a552ae --- /dev/null +++ b/cli/cli/command/volume/cmd.go @@ -0,0 +1,26 @@ +package volume + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewVolumeCommand returns a cobra command for `volume` subcommands +func NewVolumeCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "volume COMMAND", + Short: "Manage volumes", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{"version": "1.21"}, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/volume/create.go b/cli/cli/command/volume/create.go new file mode 100644 index 00000000..b25ed155 --- /dev/null +++ b/cli/cli/command/volume/create.go @@ -0,0 +1,69 @@ +package volume + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(opts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] [VOLUME]", + Short: "Create a volume", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + if options.name != "" { + return errors.Errorf("Conflicting options: either specify --name or provide positional arg, not both\n") + } + options.name = args[0] + } + return runCreate(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.StringVarP(&options.driver, "driver", "d", "local", "Specify volume driver name") + flags.StringVar(&options.name, "name", "", "Specify volume name") + flags.Lookup("name").Hidden = true + flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&options.labels, "label", "Set metadata for a volume") + + return cmd +} + +func runCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + + volReq := volumetypes.VolumeCreateBody{ + Driver: options.driver, + DriverOpts: options.driverOpts.GetAll(), + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + } + + vol, err := client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) + return nil +} diff --git a/cli/cli/command/volume/create_test.go b/cli/cli/command/volume/create_test.go new file mode 100644 index 00000000..a0646ed1 --- /dev/null +++ b/cli/cli/command/volume/create_test.go @@ -0,0 +1,126 @@ +package volume + +import ( + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestVolumeCreateErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + volumeCreateFunc func(volumetypes.VolumeCreateBody) (types.Volume, error) + expectedError string + }{ + { + args: []string{"volumeName"}, + flags: map[string]string{ + "name": "volumeName", + }, + expectedError: "Conflicting options: either specify --name or provide positional arg, not both", + }, + { + args: []string{"too", "many"}, + expectedError: "requires at most 1 argument", + }, + { + volumeCreateFunc: func(createBody volumetypes.VolumeCreateBody) (types.Volume, error) { + return types.Volume{}, errors.Errorf("error creating volume") + }, + expectedError: "error creating volume", + }, + } + for _, tc := range testCases { + cmd := newCreateCommand( + test.NewFakeCli(&fakeClient{ + volumeCreateFunc: tc.volumeCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestVolumeCreateWithName(t *testing.T) { + name := "foo" + cli := test.NewFakeCli(&fakeClient{ + volumeCreateFunc: func(body volumetypes.VolumeCreateBody) (types.Volume, error) { + if body.Name != name { + return types.Volume{}, errors.Errorf("expected name %q, got %q", name, body.Name) + } + return types.Volume{ + Name: body.Name, + }, nil + }, + }) + + buf := cli.OutBuffer() + + // Test by flags + cmd := newCreateCommand(cli) + cmd.Flags().Set("name", name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(name, strings.TrimSpace(buf.String()))) + + // Then by args + buf.Reset() + cmd = newCreateCommand(cli) + cmd.SetArgs([]string{name}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(name, strings.TrimSpace(buf.String()))) +} + +func TestVolumeCreateWithFlags(t *testing.T) { + expectedDriver := "foo" + expectedOpts := map[string]string{ + "bar": "1", + "baz": "baz", + } + expectedLabels := map[string]string{ + "lbl1": "v1", + "lbl2": "v2", + } + name := "banana" + + cli := test.NewFakeCli(&fakeClient{ + volumeCreateFunc: func(body volumetypes.VolumeCreateBody) (types.Volume, error) { + if body.Name != "" { + return types.Volume{}, errors.Errorf("expected empty name, got %q", body.Name) + } + if body.Driver != expectedDriver { + return types.Volume{}, errors.Errorf("expected driver %q, got %q", expectedDriver, body.Driver) + } + if !reflect.DeepEqual(body.DriverOpts, expectedOpts) { + return types.Volume{}, errors.Errorf("expected drivers opts %v, got %v", expectedOpts, body.DriverOpts) + } + if !reflect.DeepEqual(body.Labels, expectedLabels) { + return types.Volume{}, errors.Errorf("expected labels %v, got %v", expectedLabels, body.Labels) + } + return types.Volume{ + Name: name, + }, nil + }, + }) + + cmd := newCreateCommand(cli) + cmd.Flags().Set("driver", "foo") + cmd.Flags().Set("opt", "bar=1") + cmd.Flags().Set("opt", "baz=baz") + cmd.Flags().Set("label", "lbl1=v1") + cmd.Flags().Set("label", "lbl2=v2") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(name, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/volume/inspect.go b/cli/cli/command/volume/inspect.go new file mode 100644 index 00000000..52cfb0f0 --- /dev/null +++ b/cli/cli/command/volume/inspect.go @@ -0,0 +1,46 @@ +package volume + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] VOLUME [VOLUME...]", + Short: "Display detailed information on one or more volumes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getVolFunc := func(name string) (interface{}, []byte, error) { + i, err := client.VolumeInspect(ctx, name) + return i, nil, err + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) +} diff --git a/cli/cli/command/volume/inspect_test.go b/cli/cli/command/volume/inspect_test.go new file mode 100644 index 00000000..759042a5 --- /dev/null +++ b/cli/cli/command/volume/inspect_test.go @@ -0,0 +1,141 @@ +package volume + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestVolumeInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + volumeInspectFunc func(volumeID string) (types.Volume, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"foo"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + return types.Volume{}, errors.Errorf("error while inspecting the volume") + }, + expectedError: "error while inspecting the volume", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + args: []string{"foo", "bar"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + if volumeID == "foo" { + return types.Volume{ + Name: "foo", + }, nil + } + return types.Volume{}, errors.Errorf("error while inspecting the volume") + }, + expectedError: "error while inspecting the volume", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{ + volumeInspectFunc: tc.volumeInspectFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestVolumeInspectWithoutFormat(t *testing.T) { + testCases := []struct { + name string + args []string + volumeInspectFunc func(volumeID string) (types.Volume, error) + }{ + { + name: "single-volume", + args: []string{"foo"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + if volumeID != "foo" { + return types.Volume{}, errors.Errorf("Invalid volumeID, expected %s, got %s", "foo", volumeID) + } + return *Volume(), nil + }, + }, + { + name: "multiple-volume-with-labels", + args: []string{"foo", "bar"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + return *Volume(VolumeName(volumeID), VolumeLabels(map[string]string{ + "foo": "bar", + })), nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + volumeInspectFunc: tc.volumeInspectFunc, + }) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-without-format.%s.golden", tc.name)) + } +} + +func TestVolumeInspectWithFormat(t *testing.T) { + volumeInspectFunc := func(volumeID string) (types.Volume, error) { + return *Volume(VolumeLabels(map[string]string{ + "foo": "bar", + })), nil + } + testCases := []struct { + name string + format string + args []string + volumeInspectFunc func(volumeID string) (types.Volume, error) + }{ + { + name: "simple-template", + format: "{{.Name}}", + args: []string{"foo"}, + volumeInspectFunc: volumeInspectFunc, + }, + { + name: "json-template", + format: "{{json .Labels}}", + args: []string{"foo"}, + volumeInspectFunc: volumeInspectFunc, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + volumeInspectFunc: tc.volumeInspectFunc, + }) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("format", tc.format) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/volume/list.go b/cli/cli/command/volume/list.go new file mode 100644 index 00000000..55875e01 --- /dev/null +++ b/cli/cli/command/volume/list.go @@ -0,0 +1,73 @@ +package volume + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display volume names") + flags.StringVar(&options.format, "format", "", "Pretty-print volumes using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + volumes, err := client.VolumeList(context.Background(), options.filter.Value()) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().VolumesFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byVolumeName(volumes.Volumes)) + + volumeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewVolumeFormat(format, options.quiet), + } + return formatter.VolumeWrite(volumeCtx, volumes.Volumes) +} diff --git a/cli/cli/command/volume/list_test.go b/cli/cli/command/volume/list_test.go new file mode 100644 index 00000000..62d7d956 --- /dev/null +++ b/cli/cli/command/volume/list_test.go @@ -0,0 +1,111 @@ +package volume + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestVolumeListErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + volumeListFunc func(filter filters.Args) (volumetypes.VolumeListOKBody, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{}, errors.Errorf("error listing volumes") + }, + expectedError: "error listing volumes", + }, + } + for _, tc := range testCases { + cmd := newListCommand( + test.NewFakeCli(&fakeClient{ + volumeListFunc: tc.volumeListFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestVolumeListWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(), + Volume(VolumeName("foo"), VolumeDriver("bar")), + Volume(VolumeName("baz"), VolumeLabels(map[string]string{ + "foo": "bar", + })), + }, + }, nil + }, + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-without-format.golden") +} + +func TestVolumeListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(), + Volume(VolumeName("foo"), VolumeDriver("bar")), + Volume(VolumeName("baz"), VolumeLabels(map[string]string{ + "foo": "bar", + })), + }, + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + VolumesFormat: "{{ .Name }} {{ .Driver }} {{ .Labels }}", + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-with-config-format.golden") +} + +func TestVolumeListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(), + Volume(VolumeName("foo"), VolumeDriver("bar")), + Volume(VolumeName("baz"), VolumeLabels(map[string]string{ + "foo": "bar", + })), + }, + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }} {{ .Driver }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-with-format.golden") +} diff --git a/cli/cli/command/volume/prune.go b/cli/cli/command/volume/prune.go new file mode 100644 index 00000000..012c549f --- /dev/null +++ b/cli/cli/command/volume/prune.go @@ -0,0 +1,78 @@ +package volume + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for volumes +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused local volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'label=