From d5f625071218e218a066ca6cf28ec1c292338447 Mon Sep 17 00:00:00 2001 From: Arnaud Rebillout Date: Mon, 4 Nov 2019 09:06:27 +0000 Subject: [PATCH] Import docker.io_19.03.4+dfsg2.orig.tar.xz [dgit import orig docker.io_19.03.4+dfsg2.orig.tar.xz] --- CHANGELOG.md | 216 + CONTRIBUTING.md | 126 + Makefile | 56 + README.md | 94 + VERSION | 1 + cli/.dockerignore | 6 + cli/.mailmap | 504 + cli/AUTHORS | 716 + cli/CONTRIBUTING.md | 365 + cli/Jenkinsfile | 13 + cli/LICENSE | 191 + cli/MAINTAINERS | 136 + cli/Makefile | 102 + cli/NOTICE | 19 + cli/README.md | 69 + cli/TESTING.md | 85 + cli/VERSION | 1 + cli/appveyor.yml | 23 + cli/circle.yml | 133 + cli/cli-plugins/examples/helloworld/main.go | 106 + cli/cli-plugins/manager/candidate.go | 23 + cli/cli-plugins/manager/candidate_test.go | 101 + cli/cli-plugins/manager/cobra.go | 60 + cli/cli-plugins/manager/error.go | 43 + cli/cli-plugins/manager/error_test.go | 24 + cli/cli-plugins/manager/manager.go | 209 + cli/cli-plugins/manager/manager_test.go | 114 + cli/cli-plugins/manager/manager_unix.go | 8 + cli/cli-plugins/manager/manager_windows.go | 11 + cli/cli-plugins/manager/metadata.go | 28 + cli/cli-plugins/manager/plugin.go | 112 + cli/cli-plugins/manager/suffix_unix.go | 10 + cli/cli-plugins/manager/suffix_windows.go | 26 + cli/cli-plugins/plugin/plugin.go | 161 + cli/cli/cobra.go | 343 + cli/cli/cobra_test.go | 88 + cli/cli/command/builder/cmd.go | 25 + cli/cli/command/builder/prune.go | 96 + cli/cli/command/bundlefile/bundlefile.go | 70 + cli/cli/command/bundlefile/bundlefile_test.go | 78 + cli/cli/command/checkpoint/client_test.go | 36 + cli/cli/command/checkpoint/cmd.go | 28 + cli/cli/command/checkpoint/create.go | 57 + cli/cli/command/checkpoint/create_test.go | 72 + cli/cli/command/checkpoint/formatter.go | 55 + cli/cli/command/checkpoint/formatter_test.go | 53 + cli/cli/command/checkpoint/list.go | 54 + cli/cli/command/checkpoint/list_test.go | 67 + cli/cli/command/checkpoint/remove.go | 44 + cli/cli/command/checkpoint/remove_test.go | 65 + .../checkpoint-list-with-options.golden | 2 + cli/cli/command/cli.go | 547 + cli/cli/command/cli_options.go | 105 + cli/cli/command/cli_options_test.go | 37 + cli/cli/command/cli_test.go | 325 + cli/cli/command/commands/commands.go | 146 + cli/cli/command/config/client_test.go | 45 + cli/cli/command/config/cmd.go | 29 + cli/cli/command/config/create.go | 88 + cli/cli/command/config/create_test.go | 143 + cli/cli/command/config/formatter.go | 172 + cli/cli/command/config/formatter_test.go | 65 + cli/cli/command/config/inspect.go | 68 + cli/cli/command/config/inspect_test.go | 172 + cli/cli/command/config/ls.go | 72 + cli/cli/command/config/ls_test.go | 158 + cli/cli/command/config/remove.go | 55 + cli/cli/command/config/remove_test.go | 79 + .../testdata/config-create-with-name.golden | 1 + .../config-inspect-pretty.simple.golden | 8 + ...g-inspect-with-format.json-template.golden | 1 + ...inspect-with-format.simple-template.golden | 1 + ...format.multiple-configs-with-labels.golden | 26 + ...nspect-without-format.single-config.golden | 12 + .../config/testdata/config-list-sort.golden | 4 + .../config-list-with-config-format.golden | 2 + .../testdata/config-list-with-filter.golden | 3 + .../testdata/config-list-with-format.golden | 2 + .../config-list-with-quiet-option.golden | 2 + cli/cli/command/container/attach.go | 181 + cli/cli/command/container/attach_test.go | 129 + cli/cli/command/container/client_test.go | 145 + cli/cli/command/container/cmd.go | 45 + cli/cli/command/container/commit.go | 75 + cli/cli/command/container/cp.go | 313 + cli/cli/command/container/cp_test.go | 201 + cli/cli/command/container/create.go | 277 + cli/cli/command/container/create_test.go | 280 + cli/cli/command/container/diff.go | 47 + cli/cli/command/container/exec.go | 214 + cli/cli/command/container/exec_test.go | 227 + cli/cli/command/container/export.go | 62 + cli/cli/command/container/export_test.go | 49 + cli/cli/command/container/formatter_diff.go | 73 + .../command/container/formatter_diff_test.go | 61 + cli/cli/command/container/formatter_stats.go | 225 + .../command/container/formatter_stats_test.go | 302 + cli/cli/command/container/hijack.go | 208 + cli/cli/command/container/inspect.go | 47 + cli/cli/command/container/kill.go | 56 + cli/cli/command/container/list.go | 140 + cli/cli/command/container/list_test.go | 164 + cli/cli/command/container/logs.go | 80 + cli/cli/command/container/logs_test.go | 62 + cli/cli/command/container/opts.go | 1034 + cli/cli/command/container/opts_test.go | 856 + cli/cli/command/container/pause.go | 49 + cli/cli/command/container/port.go | 78 + cli/cli/command/container/prune.go | 78 + cli/cli/command/container/ps_test.go | 119 + cli/cli/command/container/rename.go | 51 + cli/cli/command/container/restart.go | 62 + cli/cli/command/container/rm.go | 73 + cli/cli/command/container/run.go | 312 + cli/cli/command/container/run_test.go | 74 + cli/cli/command/container/start.go | 202 + cli/cli/command/container/stats.go | 245 + cli/cli/command/container/stats_helpers.go | 240 + .../command/container/stats_helpers_test.go | 47 + cli/cli/command/container/stats_unit_test.go | 30 + cli/cli/command/container/stop.go | 67 + ...container-create-localhost-dns-ipv6.golden | 1 + .../container-create-localhost-dns.golden | 1 + ...-oom-kill-true-without-memory-limit.golden | 1 + ...reate-oom-kill-without-memory-limit.golden | 1 + .../container-list-format-name-name.golden | 2 + .../container-list-format-with-arg.golden | 2 + .../container-list-with-config-format.golden | 2 + .../container-list-with-format.golden | 2 + ...tainer-list-without-format-no-trunc.golden | 3 + .../container-list-without-format.golden | 6 + cli/cli/command/container/testdata/utf16.env | Bin 0 -> 54 bytes .../command/container/testdata/utf16be.env | Bin 0 -> 54 bytes cli/cli/command/container/testdata/utf8.env | 3 + cli/cli/command/container/testdata/valid.env | 1 + .../command/container/testdata/valid.label | 1 + cli/cli/command/container/top.go | 57 + cli/cli/command/container/tty.go | 126 + cli/cli/command/container/tty_test.go | 30 + cli/cli/command/container/unpause.go | 50 + cli/cli/command/container/update.go | 140 + cli/cli/command/container/utils.go | 162 + cli/cli/command/container/utils_test.go | 70 + cli/cli/command/container/wait.go | 53 + cli/cli/command/context.go | 27 + cli/cli/command/context/cmd.go | 49 + cli/cli/command/context/create.go | 199 + cli/cli/command/context/create_test.go | 365 + cli/cli/command/context/export-import_test.go | 110 + cli/cli/command/context/export.go | 110 + cli/cli/command/context/import.go | 51 + cli/cli/command/context/inspect.go | 64 + cli/cli/command/context/inspect_test.go | 24 + cli/cli/command/context/list.go | 96 + cli/cli/command/context/list_test.go | 47 + cli/cli/command/context/options.go | 219 + cli/cli/command/context/remove.go | 68 + cli/cli/command/context/remove_test.go | 73 + .../command/context/testdata/inspect.golden | 31 + cli/cli/command/context/testdata/list.golden | 5 + .../context/testdata/quiet-list.golden | 3 + .../command/context/testdata/test-kubeconfig | 19 + cli/cli/command/context/update.go | 144 + cli/cli/command/context/update_test.go | 102 + cli/cli/command/context/use.go | 48 + cli/cli/command/context/use_test.go | 49 + cli/cli/command/defaultcontextstore.go | 215 + cli/cli/command/defaultcontextstore_test.go | 190 + cli/cli/command/engine/activate.go | 209 + cli/cli/command/engine/activate_test.go | 148 + cli/cli/command/engine/activate_unix.go | 13 + cli/cli/command/engine/activate_windows.go | 9 + cli/cli/command/engine/auth.go | 34 + cli/cli/command/engine/check.go | 125 + cli/cli/command/engine/check_test.go | 114 + cli/cli/command/engine/client_test.go | 101 + cli/cli/command/engine/cmd.go | 23 + cli/cli/command/engine/cmd_test.go | 14 + cli/cli/command/engine/init.go | 10 + cli/cli/command/engine/licenses.go | 155 + cli/cli/command/engine/licenses_test.go | 257 + .../command/engine/testdata/check-all.golden | 11 + .../testdata/check-no-downgrades.golden | 6 + .../testdata/check-no-prerelease.golden | 8 + .../engine/testdata/check-patches-only.golden | 4 + .../expired-hub-license-display-only.golden | 3 + .../expired-license-display-only.golden | 1 + cli/cli/command/engine/update.go | 55 + cli/cli/command/engine/update_test.go | 40 + cli/cli/command/engine/updates.go | 74 + cli/cli/command/engine/updates_test.go | 144 + cli/cli/command/events_utils.go | 47 + cli/cli/command/formatter/buildcache.go | 177 + cli/cli/command/formatter/container.go | 329 + cli/cli/command/formatter/container_test.go | 658 + cli/cli/command/formatter/context.go | 90 + cli/cli/command/formatter/custom.go | 55 + cli/cli/command/formatter/custom_test.go | 12 + cli/cli/command/formatter/disk_usage.go | 476 + cli/cli/command/formatter/disk_usage_test.go | 119 + cli/cli/command/formatter/displayutils.go | 61 + .../command/formatter/displayutils_test.go | 31 + cli/cli/command/formatter/formatter.go | 119 + cli/cli/command/formatter/image.go | 272 + cli/cli/command/formatter/image_test.go | 356 + cli/cli/command/formatter/reflect.go | 68 + cli/cli/command/formatter/reflect_test.go | 66 + ...ainer-context-write-special-headers.golden | 3 + .../disk-usage-context-write-custom.golden | 5 + .../testdata/disk-usage-raw-format.golden | 24 + cli/cli/command/formatter/volume.go | 121 + cli/cli/command/formatter/volume_test.go | 183 + cli/cli/command/idresolver/client_test.go | 29 + cli/cli/command/idresolver/idresolver.go | 70 + cli/cli/command/idresolver/idresolver_test.go | 146 + cli/cli/command/image/build.go | 711 + cli/cli/command/image/build/context.go | 440 + cli/cli/command/image/build/context_test.go | 416 + cli/cli/command/image/build/context_unix.go | 11 + .../command/image/build/context_windows.go | 17 + cli/cli/command/image/build/dockerignore.go | 39 + cli/cli/command/image/build_buildkit.go | 497 + cli/cli/command/image/build_session.go | 161 + cli/cli/command/image/build_test.go | 278 + cli/cli/command/image/client_test.go | 124 + cli/cli/command/image/cmd.go | 33 + cli/cli/command/image/formatter_history.go | 110 + .../command/image/formatter_history_test.go | 228 + cli/cli/command/image/history.go | 64 + cli/cli/command/image/history_test.go | 105 + cli/cli/command/image/import.go | 90 + cli/cli/command/image/import_test.go | 97 + cli/cli/command/image/inspect.go | 44 + cli/cli/command/image/inspect_test.go | 88 + cli/cli/command/image/list.go | 96 + cli/cli/command/image/list_test.go | 98 + cli/cli/command/image/load.go | 76 + cli/cli/command/image/load_test.go | 101 + cli/cli/command/image/prune.go | 101 + cli/cli/command/image/prune_test.go | 102 + cli/cli/command/image/pull.go | 86 + cli/cli/command/image/pull_test.go | 130 + cli/cli/command/image/push.go | 70 + cli/cli/command/image/push_test.go | 71 + cli/cli/command/image/remove.go | 86 + cli/cli/command/image/remove_test.go | 134 + cli/cli/command/image/save.go | 61 + cli/cli/command/image/save_test.go | 108 + cli/cli/command/image/tag.go | 41 + cli/cli/command/image/tag_test.go | 41 + .../command/image/testdata/Dockerfile.test | 3 + cli/cli/command/image/testdata/gittar.test | Bin 0 -> 10240 bytes .../history-command-success.non-human.golden | 2 + ...tory-command-success.quiet-no-trunc.golden | 1 + .../history-command-success.quiet.golden | 1 + .../history-command-success.simple.golden | 2 + .../testdata/import-command-success.input.txt | 1 + .../inspect-command-success.format.golden | 1 + ...inspect-command-success.simple-many.golden | 56 + .../inspect-command-success.simple.golden | 29 + .../list-command-success.filters.golden | 1 + .../list-command-success.format.golden | 0 .../list-command-success.match-name.golden | 1 + .../list-command-success.quiet-format.golden | 0 .../list-command-success.simple.golden | 1 + .../load-command-success.input-file.golden | 1 + .../testdata/load-command-success.input.txt | 1 + .../testdata/load-command-success.json.golden | 1 + .../load-command-success.simple.golden | 1 + .../testdata/prune-command-success.all.golden | 2 + ...prune-command-success.force-deleted.golden | 4 + ...rune-command-success.force-untagged.golden | 4 + .../prune-command-success.label-filter.golden | 1 + .../pull-command-success.simple-no-tag.golden | 2 + .../pull-command-success.simple-quiet.golden | 2 + .../pull-command-success.simple.golden | 1 + ...-success.Image Deleted and Untagged.golden | 2 + ...emove-command-success.Image Deleted.golden | 1 + ...move-command-success.Image Untagged.golden | 1 + ...s.Image not found with force option.golden | 0 cli/cli/command/image/testdata/tar.test | Bin 0 -> 10240 bytes cli/cli/command/image/trust.go | 363 + cli/cli/command/image/trust_test.go | 73 + cli/cli/command/inspect/inspector.go | 199 + cli/cli/command/inspect/inspector_test.go | 259 + cli/cli/command/manifest/annotate.go | 97 + cli/cli/command/manifest/annotate_test.go | 77 + cli/cli/command/manifest/client_test.go | 56 + cli/cli/command/manifest/cmd.go | 45 + cli/cli/command/manifest/create_list.go | 82 + cli/cli/command/manifest/create_test.go | 116 + cli/cli/command/manifest/inspect.go | 148 + cli/cli/command/manifest/inspect_test.go | 146 + cli/cli/command/manifest/push.go | 281 + cli/cli/command/manifest/push_test.go | 69 + .../manifest/testdata/inspect-annotate.golden | 32 + .../testdata/inspect-manifest-list.golden | 24 + .../manifest/testdata/inspect-manifest.golden | 16 + cli/cli/command/manifest/util.go | 81 + cli/cli/command/network/client_test.go | 45 + cli/cli/command/network/cmd.go | 29 + cli/cli/command/network/connect.go | 85 + cli/cli/command/network/connect_test.go | 70 + cli/cli/command/network/create.go | 248 + cli/cli/command/network/create_test.go | 174 + cli/cli/command/network/disconnect.go | 41 + cli/cli/command/network/disconnect_test.go | 41 + cli/cli/command/network/formatter.go | 120 + cli/cli/command/network/formatter_test.go | 215 + cli/cli/command/network/inspect.go | 48 + cli/cli/command/network/list.go | 72 + cli/cli/command/network/list_test.go | 94 + cli/cli/command/network/prune.go | 76 + cli/cli/command/network/remove.go | 53 + .../network/testdata/network-list-sort.golden | 3 + .../network/testdata/network-list.golden | 2 + cli/cli/command/node/client_test.go | 77 + cli/cli/command/node/cmd.go | 60 + cli/cli/command/node/demote.go | 36 + cli/cli/command/node/demote_test.go | 84 + cli/cli/command/node/formatter.go | 334 + cli/cli/command/node/formatter_test.go | 350 + cli/cli/command/node/inspect.go | 72 + cli/cli/command/node/inspect_test.go | 118 + cli/cli/command/node/list.go | 78 + cli/cli/command/node/list_test.go | 141 + cli/cli/command/node/opts.go | 23 + cli/cli/command/node/promote.go | 36 + cli/cli/command/node/promote_test.go | 84 + cli/cli/command/node/ps.go | 104 + cli/cli/command/node/ps_test.go | 151 + cli/cli/command/node/remove.go | 56 + cli/cli/command/node/remove_test.go | 44 + .../node-inspect-pretty.manager-leader.golden | 24 + .../node-inspect-pretty.manager.golden | 24 + .../node-inspect-pretty.simple.golden | 22 + .../testdata/node-list-format-flag.golden | 2 + .../node-list-format-from-config.golden | 3 + .../node/testdata/node-list-sort.golden | 4 + .../node/testdata/node-ps.simple.golden | 2 + .../node/testdata/node-ps.with-errors.golden | 4 + cli/cli/command/node/update.go | 120 + cli/cli/command/node/update_test.go | 169 + cli/cli/command/orchestrator.go | 84 + cli/cli/command/orchestrator_test.go | 101 + cli/cli/command/plugin/client_test.go | 76 + cli/cli/command/plugin/cmd.go | 32 + cli/cli/command/plugin/create.go | 128 + cli/cli/command/plugin/create_test.go | 123 + cli/cli/command/plugin/disable.go | 36 + cli/cli/command/plugin/disable_test.go | 58 + cli/cli/command/plugin/enable.go | 48 + cli/cli/command/plugin/enable_test.go | 70 + cli/cli/command/plugin/formatter.go | 94 + cli/cli/command/plugin/formatter_test.go | 185 + cli/cli/command/plugin/inspect.go | 43 + cli/cli/command/plugin/inspect_test.go | 150 + cli/cli/command/plugin/install.go | 174 + cli/cli/command/plugin/install_test.go | 141 + cli/cli/command/plugin/list.go | 70 + cli/cli/command/plugin/list_test.go | 174 + cli/cli/command/plugin/push.go | 76 + cli/cli/command/plugin/remove.go | 54 + cli/cli/command/plugin/remove_test.go | 71 + cli/cli/command/plugin/set.go | 22 + ...plugin-inspect-multiple-with-format.golden | 2 + .../plugin-inspect-single-with-format.golden | 1 + ...lugin-inspect-single-without-format.golden | 54 + .../plugin/testdata/plugin-list-sort.golden | 3 + .../testdata/plugin-list-with-format.golden | 1 + .../plugin-list-with-no-trunc-option.golden | 1 + .../plugin-list-with-quiet-option.golden | 1 + .../plugin-list-without-format.golden | 2 + cli/cli/command/plugin/upgrade.go | 90 + cli/cli/command/registry.go | 210 + cli/cli/command/registry/formatter_search.go | 104 + .../command/registry/formatter_search_test.go | 282 + cli/cli/command/registry/login.go | 190 + cli/cli/command/registry/login_test.go | 182 + cli/cli/command/registry/logout.go | 76 + cli/cli/command/registry/search.go | 97 + .../search-context-write-stars-table.golden | 2 + .../search-context-write-table.golden | 3 + cli/cli/command/registry_test.go | 151 + cli/cli/command/secret/client_test.go | 45 + cli/cli/command/secret/cmd.go | 29 + cli/cli/command/secret/create.go | 109 + cli/cli/command/secret/create_test.go | 169 + cli/cli/command/secret/formatter.go | 179 + cli/cli/command/secret/formatter_test.go | 65 + cli/cli/command/secret/inspect.go | 65 + cli/cli/command/secret/inspect_test.go | 173 + cli/cli/command/secret/ls.go | 69 + cli/cli/command/secret/ls_test.go | 160 + cli/cli/command/secret/remove.go | 53 + cli/cli/command/secret/remove_test.go | 79 + .../testdata/secret-create-with-name.golden | 1 + .../secret-inspect-pretty.simple.golden | 7 + ...t-inspect-with-format.json-template.golden | 1 + ...inspect-with-format.simple-template.golden | 1 + ...format.multiple-secrets-with-labels.golden | 26 + ...nspect-without-format.single-secret.golden | 12 + .../secret/testdata/secret-list-sort.golden | 4 + .../secret-list-with-config-format.golden | 2 + .../testdata/secret-list-with-filter.golden | 3 + .../testdata/secret-list-with-format.golden | 2 + .../secret-list-with-quiet-option.golden | 2 + cli/cli/command/service/client_test.go | 77 + cli/cli/command/service/cmd.go | 34 + cli/cli/command/service/create.go | 181 + cli/cli/command/service/create_test.go | 271 + cli/cli/command/service/formatter.go | 682 + cli/cli/command/service/formatter_test.go | 360 + .../command/service/generic_resource_opts.go | 105 + .../service/generic_resource_opts_test.go | 23 + cli/cli/command/service/helpers.go | 33 + cli/cli/command/service/inspect.go | 93 + cli/cli/command/service/inspect_test.go | 170 + cli/cli/command/service/list.go | 142 + cli/cli/command/service/list_test.go | 28 + cli/cli/command/service/logs.go | 349 + cli/cli/command/service/opts.go | 949 + cli/cli/command/service/opts_test.go | 302 + cli/cli/command/service/parse.go | 158 + cli/cli/command/service/progress/progress.go | 504 + .../command/service/progress/progress_test.go | 375 + cli/cli/command/service/ps.go | 155 + cli/cli/command/service/ps_test.go | 135 + cli/cli/command/service/remove.go | 48 + cli/cli/command/service/rollback.go | 64 + cli/cli/command/service/rollback_test.go | 104 + cli/cli/command/service/scale.go | 122 + .../testdata/service-context-write-raw.golden | 14 + .../service/testdata/service-list-sort.golden | 3 + cli/cli/command/service/trust.go | 87 + cli/cli/command/service/update.go | 1344 + cli/cli/command/service/update_test.go | 1250 + cli/cli/command/stack/client_test.go | 250 + cli/cli/command/stack/cmd.go | 132 + cli/cli/command/stack/common.go | 50 + cli/cli/command/stack/deploy.go | 81 + cli/cli/command/stack/deploy_test.go | 17 + cli/cli/command/stack/formatter/formatter.go | 88 + .../command/stack/formatter/formatter_test.go | 74 + cli/cli/command/stack/kubernetes/cli.go | 144 + cli/cli/command/stack/kubernetes/client.go | 107 + .../command/stack/kubernetes/conversion.go | 240 + .../stack/kubernetes/conversion_test.go | 192 + cli/cli/command/stack/kubernetes/convert.go | 567 + .../command/stack/kubernetes/convert_test.go | 346 + cli/cli/command/stack/kubernetes/deploy.go | 171 + .../command/stack/kubernetes/deploy_test.go | 299 + cli/cli/command/stack/kubernetes/list.go | 136 + cli/cli/command/stack/kubernetes/ps.go | 112 + cli/cli/command/stack/kubernetes/remove.go | 27 + cli/cli/command/stack/kubernetes/services.go | 159 + .../command/stack/kubernetes/services_test.go | 138 + cli/cli/command/stack/kubernetes/stack.go | 161 + .../command/stack/kubernetes/stackclient.go | 274 + .../stack/kubernetes/stackclient_test.go | 60 + .../testdata/compose-with-expose.yml | 9 + .../testdata/compose-with-pull-policy.yml | 6 + .../testdata/compose-with-pull-secret.yml | 6 + .../command/stack/kubernetes/testdata/config | 1 + .../command/stack/kubernetes/testdata/secret | 1 + .../stack/kubernetes/testdata/warnings.golden | 31 + cli/cli/command/stack/kubernetes/warnings.go | 145 + .../command/stack/kubernetes/warnings_test.go | 78 + cli/cli/command/stack/kubernetes/watcher.go | 254 + .../command/stack/kubernetes/watcher_test.go | 218 + cli/cli/command/stack/list.go | 80 + cli/cli/command/stack/list_test.go | 130 + cli/cli/command/stack/loader/loader.go | 152 + cli/cli/command/stack/loader/loader_test.go | 47 + cli/cli/command/stack/options/opts.go | 43 + cli/cli/command/stack/ps.go | 44 + cli/cli/command/stack/ps_test.go | 184 + cli/cli/command/stack/remove.go | 39 + cli/cli/command/stack/remove_test.go | 166 + cli/cli/command/stack/services.go | 42 + cli/cli/command/stack/services_test.go | 170 + cli/cli/command/stack/swarm/client_test.go | 239 + cli/cli/command/stack/swarm/common.go | 50 + cli/cli/command/stack/swarm/deploy.go | 80 + .../command/stack/swarm/deploy_bundlefile.go | 124 + .../stack/swarm/deploy_bundlefile_test.go | 50 + .../command/stack/swarm/deploy_composefile.go | 281 + .../stack/swarm/deploy_composefile_test.go | 67 + cli/cli/command/stack/swarm/deploy_test.go | 110 + cli/cli/command/stack/swarm/list.go | 45 + cli/cli/command/stack/swarm/ps.go | 35 + cli/cli/command/stack/swarm/remove.go | 140 + cli/cli/command/stack/swarm/services.go | 66 + .../testdata/bundlefile_with_two_services.dab | 29 + .../testdata/stack-list-sort-natural.golden | 4 + .../stack/testdata/stack-list-sort.golden | 3 + .../testdata/stack-list-with-format.golden | 1 + .../testdata/stack-list-without-format.golden | 2 + .../stack-ps-with-config-format.golden | 1 + .../testdata/stack-ps-with-format.golden | 1 + .../stack-ps-with-no-resolve-option.golden | 1 + .../stack-ps-with-no-trunc-option.golden | 1 + .../stack-ps-with-quiet-option.golden | 1 + .../testdata/stack-ps-without-format.golden | 2 + .../stack-services-with-config-format.golden | 1 + .../stack-services-with-format.golden | 1 + .../stack-services-with-quiet-option.golden | 1 + .../stack-services-without-format.golden | 2 + cli/cli/command/streams.go | 23 + cli/cli/command/swarm/ca.go | 141 + cli/cli/command/swarm/ca_test.go | 300 + cli/cli/command/swarm/client_test.go | 85 + cli/cli/command/swarm/cmd.go | 33 + cli/cli/command/swarm/init.go | 117 + cli/cli/command/swarm/init_test.go | 125 + cli/cli/command/swarm/join.go | 88 + cli/cli/command/swarm/join_test.go | 100 + cli/cli/command/swarm/join_token.go | 119 + cli/cli/command/swarm/join_token_test.go | 211 + cli/cli/command/swarm/leave.go | 43 + cli/cli/command/swarm/leave_test.go | 50 + cli/cli/command/swarm/opts.go | 276 + cli/cli/command/swarm/opts_test.go | 111 + .../command/swarm/progress/root_rotation.go | 120 + .../swarm/testdata/init-init-autolock.golden | 11 + .../command/swarm/testdata/init-init.golden | 4 + .../testdata/jointoken-manager-quiet.golden | 1 + .../testdata/jointoken-manager-rotate.golden | 6 + .../swarm/testdata/jointoken-manager.golden | 4 + .../testdata/jointoken-worker-quiet.golden | 1 + .../swarm/testdata/jointoken-worker.golden | 4 + .../unlockkeys-unlock-key-quiet.golden | 1 + .../unlockkeys-unlock-key-rotate-quiet.golden | 1 + .../unlockkeys-unlock-key-rotate.golden | 9 + .../testdata/unlockkeys-unlock-key.golden | 7 + .../testdata/update-all-flags-quiet.golden | 1 + .../update-autolock-unlock-key.golden | 8 + .../swarm/testdata/update-noargs.golden | 14 + cli/cli/command/swarm/unlock.go | 75 + cli/cli/command/swarm/unlock_key.go | 89 + cli/cli/command/swarm/unlock_key_test.go | 171 + cli/cli/command/swarm/unlock_test.go | 98 + cli/cli/command/swarm/update.go | 71 + cli/cli/command/swarm/update_test.go | 185 + cli/cli/command/system/client_test.go | 23 + cli/cli/command/system/cmd.go | 26 + cli/cli/command/system/df.go | 72 + cli/cli/command/system/dial_stdio.go | 128 + cli/cli/command/system/events.go | 142 + cli/cli/command/system/info.go | 491 + cli/cli/command/system/info_test.go | 398 + cli/cli/command/system/inspect.go | 218 + cli/cli/command/system/prune.go | 148 + cli/cli/command/system/prune_test.go | 51 + .../testdata/docker-client-version.golden | 44 + .../system/testdata/docker-info-badsec.golden | 52 + .../testdata/docker-info-badsec.json.golden | 1 + .../docker-info-daemon-warnings.json.golden | 1 + .../system/testdata/docker-info-errors.golden | 5 + .../testdata/docker-info-errors.json.golden | 1 + .../docker-info-legacy-warnings.json.golden | 1 + .../testdata/docker-info-no-swarm.golden | 54 + .../testdata/docker-info-no-swarm.json.golden | 1 + .../docker-info-plugins-warnings.golden | 1 + .../testdata/docker-info-plugins.golden | 57 + .../testdata/docker-info-plugins.json.golden | 1 + .../testdata/docker-info-warnings.golden | 11 + .../testdata/docker-info-with-swarm.golden | 76 + .../docker-info-with-swarm.json.golden | 1 + cli/cli/command/system/version.go | 286 + cli/cli/command/system/version_test.go | 113 + cli/cli/command/task/client_test.go | 29 + cli/cli/command/task/formatter.go | 149 + cli/cli/command/task/formatter_test.go | 107 + cli/cli/command/task/print.go | 93 + cli/cli/command/task/print_test.go | 128 + .../task-context-write-table-custom.golden | 3 + .../task-print-with-global-service.golden | 1 + .../task-print-with-indentation.golden | 3 + .../task-print-with-no-trunc-option.golden | 1 + .../task-print-with-quiet-option.golden | 1 + .../task-print-with-replicated-service.golden | 1 + .../task-print-with-resolution.golden | 1 + cli/cli/command/testdata/ca.pem | 18 + cli/cli/command/trust.go | 15 + cli/cli/command/trust/cmd.go | 25 + cli/cli/command/trust/common.go | 156 + cli/cli/command/trust/common_test.go | 33 + cli/cli/command/trust/formatter.go | 132 + cli/cli/command/trust/formatter_test.go | 240 + cli/cli/command/trust/helpers.go | 47 + cli/cli/command/trust/helpers_test.go | 24 + cli/cli/command/trust/inspect.go | 115 + cli/cli/command/trust/inspect_pretty.go | 93 + cli/cli/command/trust/inspect_pretty_test.go | 476 + cli/cli/command/trust/inspect_test.go | 152 + cli/cli/command/trust/key.go | 22 + cli/cli/command/trust/key_generate.go | 134 + cli/cli/command/trust/key_generate_test.go | 134 + cli/cli/command/trust/key_load.go | 118 + cli/cli/command/trust/key_load_test.go | 253 + cli/cli/command/trust/revoke.go | 125 + cli/cli/command/trust/revoke_test.go | 156 + cli/cli/command/trust/sign.go | 247 + cli/cli/command/trust/sign_test.go | 309 + cli/cli/command/trust/signer.go | 22 + cli/cli/command/trust/signer_add.go | 141 + cli/cli/command/trust/signer_add_test.go | 147 + cli/cli/command/trust/signer_remove.go | 143 + cli/cli/command/trust/signer_remove_test.go | 128 + .../testdata/trust-inspect-empty-repo.golden | 25 + .../trust-inspect-full-repo-no-signers.golden | 33 + ...rust-inspect-full-repo-with-signers.golden | 65 + ...inspect-multiple-repos-with-signers.golden | 128 + .../trust-inspect-one-tag-no-signers.golden | 33 + ...inspect-pretty-full-repo-no-signers.golden | 10 + ...spect-pretty-full-repo-with-signers.golden | 18 + ...t-inspect-pretty-one-tag-no-signers.golden | 10 + ...ct-pretty-unsigned-tag-with-signers.golden | 14 + .../trust-inspect-uninitialized.golden | 1 + ...t-inspect-unsigned-tag-with-signers.golden | 42 + cli/cli/command/utils.go | 194 + cli/cli/command/utils_test.go | 33 + cli/cli/command/volume/client_test.go | 54 + cli/cli/command/volume/cmd.go | 26 + cli/cli/command/volume/create.go | 69 + cli/cli/command/volume/create_test.go | 126 + cli/cli/command/volume/inspect.go | 46 + cli/cli/command/volume/inspect_test.go | 141 + cli/cli/command/volume/list.go | 67 + cli/cli/command/volume/list_test.go | 129 + cli/cli/command/volume/prune.go | 78 + cli/cli/command/volume/prune_test.go | 119 + cli/cli/command/volume/remove.go | 69 + cli/cli/command/volume/remove_test.go | 44 + ...e-inspect-with-format.json-template.golden | 1 + ...inspect-with-format.simple-template.golden | 1 + ...-format.multiple-volume-with-labels.golden | 22 + ...nspect-without-format.single-volume.golden | 10 + .../volume/testdata/volume-list-sort.golden | 3 + .../volume-list-with-config-format.golden | 3 + .../testdata/volume-list-with-format.golden | 3 + .../volume-list-without-format.golden | 4 + .../volume/testdata/volume-prune-no.golden | 2 + .../volume/testdata/volume-prune-yes.golden | 7 + .../volume-prune.deletedVolumes.golden | 6 + .../volume/testdata/volume-prune.empty.golden | 1 + cli/cli/compose/convert/compose.go | 199 + cli/cli/compose/convert/compose_test.go | 171 + cli/cli/compose/convert/service.go | 684 + cli/cli/compose/convert/service_test.go | 631 + cli/cli/compose/convert/volume.go | 162 + cli/cli/compose/convert/volume_test.go | 361 + .../compose/interpolation/interpolation.go | 163 + .../interpolation/interpolation_test.go | 147 + cli/cli/compose/loader/example1.env | 8 + cli/cli/compose/loader/example2.env | 4 + cli/cli/compose/loader/full-example.yml | 409 + cli/cli/compose/loader/full-struct_test.go | 1451 + cli/cli/compose/loader/interpolate.go | 71 + cli/cli/compose/loader/loader.go | 941 + cli/cli/compose/loader/loader_test.go | 1751 + cli/cli/compose/loader/merge.go | 233 + cli/cli/compose/loader/merge_test.go | 1016 + cli/cli/compose/loader/types_test.go | 43 + cli/cli/compose/loader/volume.go | 122 + cli/cli/compose/loader/volume_test.go | 223 + cli/cli/compose/loader/windows_path.go | 66 + cli/cli/compose/loader/windows_path_test.go | 61 + cli/cli/compose/schema/bindata.go | 564 + .../schema/data/config_schema_v3.0.json | 384 + .../schema/data/config_schema_v3.1.json | 429 + .../schema/data/config_schema_v3.2.json | 476 + .../schema/data/config_schema_v3.3.json | 540 + .../schema/data/config_schema_v3.4.json | 548 + .../schema/data/config_schema_v3.5.json | 577 + .../schema/data/config_schema_v3.6.json | 586 + .../schema/data/config_schema_v3.7.json | 606 + .../schema/data/config_schema_v3.8.json | 617 + cli/cli/compose/schema/data/doc.go | 8 + cli/cli/compose/schema/schema.go | 168 + cli/cli/compose/schema/schema_test.go | 280 + cli/cli/compose/template/template.go | 245 + cli/cli/compose/template/template_test.go | 283 + cli/cli/compose/types/types.go | 524 + cli/cli/config/config.go | 136 + cli/cli/config/config_test.go | 594 + cli/cli/config/configfile/file.go | 385 + cli/cli/config/configfile/file_test.go | 498 + .../testdata/plugin-config-2.golden | 12 + .../configfile/testdata/plugin-config.golden | 12 + cli/cli/config/credentials/credentials.go | 17 + cli/cli/config/credentials/default_store.go | 21 + .../credentials/default_store_darwin.go | 5 + .../config/credentials/default_store_linux.go | 13 + .../credentials/default_store_unsupported.go | 7 + .../credentials/default_store_windows.go | 5 + cli/cli/config/credentials/file_store.go | 81 + cli/cli/config/credentials/file_store_test.go | 136 + cli/cli/config/credentials/native_store.go | 143 + .../config/credentials/native_store_test.go | 277 + cli/cli/config/types/authconfig.go | 22 + cli/cli/connhelper/commandconn/commandconn.go | 283 + .../commandconn/commandconn_unix_test.go | 44 + .../connhelper/commandconn/pdeathsig_linux.go | 10 + .../commandconn/pdeathsig_nolinux.go | 10 + .../connhelper/commandconn/session_unix.go | 13 + .../connhelper/commandconn/session_windows.go | 8 + cli/cli/connhelper/connhelper.go | 55 + cli/cli/connhelper/ssh/ssh.go | 63 + cli/cli/connhelper/ssh/ssh_test.go | 64 + cli/cli/context/docker/constants.go | 6 + cli/cli/context/docker/load.go | 166 + cli/cli/context/endpoint.go | 7 + cli/cli/context/kubernetes/constants.go | 6 + cli/cli/context/kubernetes/endpoint_test.go | 196 + cli/cli/context/kubernetes/load.go | 135 + cli/cli/context/kubernetes/load_test.go | 25 + cli/cli/context/kubernetes/save.go | 61 + .../kubernetes/testdata/eks-kubeconfig | 23 + .../kubernetes/testdata/gke-kubeconfig | 23 + .../kubernetes/testdata/test-kubeconfig | 20 + cli/cli/context/store/doc.go | 22 + cli/cli/context/store/io_utils.go | 29 + cli/cli/context/store/io_utils_test.go | 24 + cli/cli/context/store/metadata_test.go | 143 + cli/cli/context/store/metadatastore.go | 153 + cli/cli/context/store/store.go | 499 + cli/cli/context/store/store_test.go | 258 + cli/cli/context/store/storeconfig.go | 53 + cli/cli/context/store/storeconfig_test.go | 31 + cli/cli/context/store/tlsstore.go | 99 + cli/cli/context/store/tlsstore_test.go | 79 + cli/cli/context/tlsdata.go | 98 + cli/cli/debug/debug.go | 26 + cli/cli/debug/debug_test.go | 43 + cli/cli/error.go | 33 + cli/cli/flags/client.go | 12 + cli/cli/flags/common.go | 122 + cli/cli/flags/common_test.go | 43 + cli/cli/manifest/store/store.go | 180 + cli/cli/manifest/store/store_test.go | 136 + cli/cli/manifest/types/types.go | 114 + cli/cli/registry/client/client.go | 211 + cli/cli/registry/client/endpoint.go | 133 + cli/cli/registry/client/fetcher.go | 308 + cli/cli/required.go | 107 + cli/cli/required_test.go | 138 + cli/cli/streams/in.go | 56 + cli/cli/streams/out.go | 50 + cli/cli/streams/stream.go | 34 + cli/cli/trust/trust.go | 388 + cli/cli/trust/trust_test.go | 61 + cli/cli/version/version.go | 10 + cli/cli/winresources/res_windows.go | 18 + cli/cmd/docker/docker.go | 484 + cli/cmd/docker/docker_test.go | 66 + cli/cmd/docker/docker_windows.go | 3 + cli/codecov.yml | 22 + cli/contrib/completion/bash/docker | 5623 ++ cli/contrib/completion/fish/docker.fish | 564 + cli/contrib/completion/powershell/readme.txt | 2 + cli/contrib/completion/zsh/REVIEWERS | 2 + cli/contrib/completion/zsh/_docker | 3038 + cli/docker.Makefile | 156 + cli/dockerfiles/Dockerfile.binary-native | 10 + cli/dockerfiles/Dockerfile.cross | 6 + cli/dockerfiles/Dockerfile.dev | 31 + cli/dockerfiles/Dockerfile.e2e | 43 + cli/dockerfiles/Dockerfile.lint | 20 + cli/dockerfiles/Dockerfile.shellcheck | 5 + cli/docs/README.md | 30 + cli/docs/deprecated.md | 418 + cli/docs/extend/EBS_volume.md | 165 + cli/docs/extend/cli_plugins.md | 124 + cli/docs/extend/config.md | 237 + .../extend/images/authz_additional_info.png | Bin 0 -> 45916 bytes cli/docs/extend/images/authz_allow.png | Bin 0 -> 33505 bytes cli/docs/extend/images/authz_chunked.png | Bin 0 -> 33168 bytes .../extend/images/authz_connection_hijack.png | Bin 0 -> 38780 bytes cli/docs/extend/images/authz_deny.png | Bin 0 -> 27099 bytes cli/docs/extend/index.md | 263 + cli/docs/extend/legacy_plugins.md | 104 + cli/docs/extend/plugin_api.md | 195 + cli/docs/extend/plugins_authorization.md | 259 + cli/docs/extend/plugins_graphdriver.md | 403 + cli/docs/extend/plugins_logging.md | 219 + cli/docs/extend/plugins_metrics.md | 84 + cli/docs/extend/plugins_network.md | 78 + cli/docs/extend/plugins_services.md | 185 + cli/docs/extend/plugins_volume.md | 359 + cli/docs/reference/builder.md | 2103 + cli/docs/reference/commandline/attach.md | 160 + cli/docs/reference/commandline/build.md | 615 + cli/docs/reference/commandline/cli.md | 384 + cli/docs/reference/commandline/commit.md | 117 + cli/docs/reference/commandline/container.md | 61 + .../reference/commandline/container_prune.md | 126 + .../reference/commandline/context_create.md | 121 + .../reference/commandline/context_export.md | 31 + .../reference/commandline/context_import.md | 26 + .../reference/commandline/context_inspect.md | 69 + cli/docs/reference/commandline/context_ls.md | 30 + cli/docs/reference/commandline/context_rm.md | 28 + .../reference/commandline/context_update.md | 60 + cli/docs/reference/commandline/context_use.md | 26 + cli/docs/reference/commandline/cp.md | 118 + cli/docs/reference/commandline/create.md | 261 + cli/docs/reference/commandline/deploy.md | 111 + cli/docs/reference/commandline/diff.md | 67 + cli/docs/reference/commandline/dockerd.md | 1560 + cli/docs/reference/commandline/events.md | 421 + cli/docs/reference/commandline/exec.md | 125 + cli/docs/reference/commandline/export.md | 48 + cli/docs/reference/commandline/history.md | 87 + cli/docs/reference/commandline/image.md | 47 + cli/docs/reference/commandline/image_prune.md | 213 + cli/docs/reference/commandline/images.md | 353 + cli/docs/reference/commandline/import.md | 98 + cli/docs/reference/commandline/index.md | 197 + cli/docs/reference/commandline/info.md | 251 + cli/docs/reference/commandline/inspect.md | 122 + cli/docs/reference/commandline/kill.md | 71 + cli/docs/reference/commandline/load.md | 63 + cli/docs/reference/commandline/login.md | 190 + cli/docs/reference/commandline/logout.md | 36 + cli/docs/reference/commandline/logs.md | 85 + cli/docs/reference/commandline/manifest.md | 274 + cli/docs/reference/commandline/network.md | 51 + .../reference/commandline/network_connect.md | 117 + .../reference/commandline/network_create.md | 240 + .../commandline/network_disconnect.md | 48 + .../reference/commandline/network_inspect.md | 307 + cli/docs/reference/commandline/network_ls.md | 250 + .../reference/commandline/network_prune.md | 104 + cli/docs/reference/commandline/network_rm.md | 68 + cli/docs/reference/commandline/node.md | 42 + cli/docs/reference/commandline/node_demote.md | 47 + .../reference/commandline/node_inspect.md | 167 + cli/docs/reference/commandline/node_ls.md | 172 + .../reference/commandline/node_promote.md | 44 + cli/docs/reference/commandline/node_ps.md | 149 + cli/docs/reference/commandline/node_rm.md | 80 + cli/docs/reference/commandline/node_update.md | 77 + cli/docs/reference/commandline/pause.md | 48 + cli/docs/reference/commandline/plugin.md | 44 + .../reference/commandline/plugin_create.md | 66 + .../reference/commandline/plugin_disable.md | 69 + .../reference/commandline/plugin_enable.md | 68 + .../reference/commandline/plugin_inspect.md | 166 + .../reference/commandline/plugin_install.md | 75 + cli/docs/reference/commandline/plugin_ls.md | 118 + cli/docs/reference/commandline/plugin_push.md | 57 + cli/docs/reference/commandline/plugin_rm.md | 63 + cli/docs/reference/commandline/plugin_set.md | 172 + .../reference/commandline/plugin_upgrade.md | 100 + cli/docs/reference/commandline/port.md | 47 + cli/docs/reference/commandline/ps.md | 434 + cli/docs/reference/commandline/pull.md | 255 + cli/docs/reference/commandline/push.md | 82 + cli/docs/reference/commandline/rename.md | 35 + cli/docs/reference/commandline/restart.md | 32 + cli/docs/reference/commandline/rm.md | 100 + cli/docs/reference/commandline/rmi.md | 116 + cli/docs/reference/commandline/run.md | 866 + cli/docs/reference/commandline/save.md | 70 + cli/docs/reference/commandline/search.md | 202 + cli/docs/reference/commandline/secret.md | 45 + .../reference/commandline/secret_create.md | 99 + .../reference/commandline/secret_inspect.md | 95 + cli/docs/reference/commandline/secret_ls.md | 157 + cli/docs/reference/commandline/secret_rm.md | 54 + cli/docs/reference/commandline/service.md | 42 + .../reference/commandline/service_create.md | 1022 + .../reference/commandline/service_inspect.md | 171 + .../reference/commandline/service_logs.md | 86 + cli/docs/reference/commandline/service_ls.md | 165 + cli/docs/reference/commandline/service_ps.md | 195 + cli/docs/reference/commandline/service_rm.md | 61 + .../reference/commandline/service_rollback.md | 94 + .../reference/commandline/service_scale.md | 106 + .../reference/commandline/service_update.md | 313 + cli/docs/reference/commandline/stack.md | 41 + .../reference/commandline/stack_deploy.md | 148 + cli/docs/reference/commandline/stack_ls.md | 81 + cli/docs/reference/commandline/stack_ps.md | 233 + cli/docs/reference/commandline/stack_rm.md | 81 + .../reference/commandline/stack_services.md | 122 + cli/docs/reference/commandline/start.md | 34 + cli/docs/reference/commandline/stats.md | 175 + cli/docs/reference/commandline/stop.md | 37 + cli/docs/reference/commandline/swarm.md | 41 + cli/docs/reference/commandline/swarm_ca.md | 122 + cli/docs/reference/commandline/swarm_init.md | 205 + cli/docs/reference/commandline/swarm_join.md | 133 + .../reference/commandline/swarm_join_token.md | 115 + cli/docs/reference/commandline/swarm_leave.md | 72 + .../reference/commandline/swarm_unlock.md | 49 + .../reference/commandline/swarm_unlock_key.md | 92 + .../reference/commandline/swarm_update.md | 52 + cli/docs/reference/commandline/system.md | 37 + cli/docs/reference/commandline/system_df.md | 140 + .../reference/commandline/system_events.md | 345 + .../reference/commandline/system_prune.md | 155 + cli/docs/reference/commandline/tag.md | 84 + cli/docs/reference/commandline/top.md | 25 + .../reference/commandline/trust_inspect.md | 470 + .../commandline/trust_key_generate.md | 67 + .../reference/commandline/trust_key_load.md | 57 + .../reference/commandline/trust_revoke.md | 130 + cli/docs/reference/commandline/trust_sign.md | 184 + .../reference/commandline/trust_signer_add.md | 211 + .../commandline/trust_signer_remove.md | 172 + cli/docs/reference/commandline/unpause.md | 45 + cli/docs/reference/commandline/update.md | 128 + cli/docs/reference/commandline/version.md | 75 + cli/docs/reference/commandline/volume.md | 48 + .../reference/commandline/volume_create.md | 125 + .../reference/commandline/volume_inspect.md | 61 + cli/docs/reference/commandline/volume_ls.md | 199 + .../reference/commandline/volume_prune.md | 73 + cli/docs/reference/commandline/volume_rm.md | 48 + cli/docs/reference/commandline/wait.md | 58 + cli/docs/reference/glossary.md | 373 + cli/docs/reference/index.md | 20 + cli/docs/reference/run.md | 1615 + cli/docs/yaml/Dockerfile | 4 + cli/docs/yaml/generate.go | 105 + cli/docs/yaml/yaml.go | 270 + cli/e2e/cli-plugins/config_test.go | 34 + cli/e2e/cli-plugins/dial_test.go | 30 + cli/e2e/cli-plugins/flags_test.go | 258 + cli/e2e/cli-plugins/help_test.go | 109 + cli/e2e/cli-plugins/main_test.go | 17 + cli/e2e/cli-plugins/plugins/badmeta/main.go | 19 + .../plugins/nopersistentprerun/main.go | 36 + cli/e2e/cli-plugins/run_test.go | 247 + .../testdata/docker-badmeta-err.golden | 2 + .../testdata/docker-help-badmeta-err.golden | 1 + .../docker-help-helloworld-goodbye.golden | 4 + .../testdata/docker-help-helloworld.golden | 17 + .../docker-help-nonexistent-err.golden | 1 + .../testdata/docker-nonexistent-err.golden | 2 + cli/e2e/cli-plugins/util_test.go | 30 + cli/e2e/compose-env.connhelper-ssh.yaml | 9 + cli/e2e/compose-env.experimental.yaml | 6 + cli/e2e/compose-env.yaml | 28 + cli/e2e/container/attach_test.go | 31 + cli/e2e/container/create_test.go | 103 + cli/e2e/container/kill_test.go | 50 + cli/e2e/container/main_test.go | 17 + cli/e2e/container/run_test.go | 61 + ...run-attached-from-remote-and-remove.golden | 4 + cli/e2e/context/context_test.go | 21 + cli/e2e/context/main_test.go | 17 + cli/e2e/context/testdata/context-ls.golden | 3 + .../testdata/test-dockerconfig/config.json | 7 + .../meta.json | 1 + cli/e2e/context/testdata/test-kubeconfig | 20 + cli/e2e/image/build_test.go | 143 + cli/e2e/image/main_test.go | 17 + cli/e2e/image/pull_test.go | 80 + cli/e2e/image/push_test.go | 392 + cli/e2e/image/testdata/notary/delgkey1.crt | 21 + cli/e2e/image/testdata/notary/delgkey1.key | 27 + cli/e2e/image/testdata/notary/delgkey2.crt | 21 + cli/e2e/image/testdata/notary/delgkey2.key | 27 + cli/e2e/image/testdata/notary/delgkey3.crt | 21 + cli/e2e/image/testdata/notary/delgkey3.key | 27 + cli/e2e/image/testdata/notary/delgkey4.crt | 21 + cli/e2e/image/testdata/notary/delgkey4.key | 27 + cli/e2e/image/testdata/notary/gen.sh | 18 + cli/e2e/image/testdata/notary/localhost.cert | 19 + cli/e2e/image/testdata/notary/localhost.key | 27 + .../pull-with-content-trust-err.golden | 1 + .../testdata/pull-with-content-trust.golden | 5 + .../push-with-content-trust-err.golden | 0 cli/e2e/internal/fixtures/fixtures.go | 135 + cli/e2e/plugin/basic/basic.go | 34 + cli/e2e/plugin/main_test.go | 17 + cli/e2e/plugin/trust_test.go | 114 + cli/e2e/stack/deploy_test.go | 44 + cli/e2e/stack/help_test.go | 24 + cli/e2e/stack/main_test.go | 17 + cli/e2e/stack/remove_test.go | 85 + cli/e2e/stack/testdata/data | 1 + cli/e2e/stack/testdata/full-stack.yml | 9 + .../stack-deploy-help-kubernetes.golden | 14 + .../testdata/stack-deploy-help-swarm.golden | 19 + .../stack-deploy-with-names-kubernetes.golden | 7 + .../stack-deploy-with-names-swarm.golden | 7 + .../testdata/stack-deploy-with-names.golden | 7 + .../stack-remove-kubernetes-success.golden | 1 + .../stack-remove-swarm-success.golden | 3 + .../testdata/stack-with-named-resources.yml | 30 + cli/e2e/system/inspect_test.go | 18 + cli/e2e/system/main_test.go | 17 + cli/e2e/testdata/Dockerfile.connhelper-ssh | 12 + .../testdata/Dockerfile.evil-notary-server | 4 + cli/e2e/testdata/Dockerfile.notary-server | 4 + cli/e2e/testdata/connhelper-ssh/entrypoint.sh | 8 + .../testdata/notary-evil/notary-config.json | 19 + .../testdata/notary-evil/notary-server.cert | 64 + .../testdata/notary-evil/notary-server.key | 28 + cli/e2e/testdata/notary-evil/root-ca.cert | 32 + cli/e2e/testdata/notary/notary-config.json | 19 + cli/e2e/testdata/notary/notary-server.cert | 64 + cli/e2e/testdata/notary/notary-server.key | 28 + cli/e2e/testdata/notary/root-ca.cert | 32 + cli/e2e/trust/main_test.go | 17 + cli/e2e/trust/revoke_test.go | 71 + cli/e2e/trust/sign_test.go | 62 + cli/experimental/README.md | 53 + cli/experimental/checkpoint-restore.md | 88 + cli/experimental/docker-stacks-and-bundles.md | 202 + cli/experimental/images/ipvlan-l3.gliffy | 1 + cli/experimental/images/ipvlan-l3.png | Bin 0 -> 18260 bytes cli/experimental/images/ipvlan-l3.svg | 1 + .../images/ipvlan_l2_simple.gliffy | 1 + cli/experimental/images/ipvlan_l2_simple.png | Bin 0 -> 20145 bytes cli/experimental/images/ipvlan_l2_simple.svg | 1 + .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 + .../images/macvlan-bridge-ipvlan-l2.png | Bin 0 -> 14527 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 + .../images/multi_tenant_8021q_vlans.gliffy | 1 + .../images/multi_tenant_8021q_vlans.png | Bin 0 -> 17879 bytes .../images/multi_tenant_8021q_vlans.svg | 1 + .../images/vlans-deeper-look.gliffy | 1 + cli/experimental/images/vlans-deeper-look.png | Bin 0 -> 38837 bytes cli/experimental/images/vlans-deeper-look.svg | 1 + cli/experimental/vlan-networks.md | 475 + cli/gometalinter.json | 42 + .../containerizedengine/client_test.go | 234 + .../containerizedengine/containerd.go | 78 + .../containerizedengine/containerd_test.go | 45 + cli/internal/containerizedengine/progress.go | 215 + cli/internal/containerizedengine/types.go | 49 + cli/internal/containerizedengine/update.go | 183 + .../containerizedengine/update_test.go | 300 + cli/internal/licenseutils/client_test.go | 112 + cli/internal/licenseutils/types.go | 28 + cli/internal/licenseutils/utils.go | 202 + cli/internal/licenseutils/utils_test.go | 234 + cli/internal/pkg/containerized/hostpaths.go | 61 + .../pkg/containerized/hostpaths_test.go | 21 + cli/internal/pkg/containerized/pauseandrun.go | 74 + cli/internal/pkg/containerized/signal_unix.go | 12 + .../pkg/containerized/signal_windows.go | 12 + cli/internal/pkg/containerized/snapshot.go | 158 + cli/internal/test/builders/config.go | 68 + cli/internal/test/builders/container.go | 79 + cli/internal/test/builders/doc.go | 3 + cli/internal/test/builders/network.go | 45 + cli/internal/test/builders/node.go | 134 + cli/internal/test/builders/secret.go | 70 + cli/internal/test/builders/service.go | 74 + cli/internal/test/builders/swarm.go | 39 + cli/internal/test/builders/task.go | 160 + cli/internal/test/builders/volume.go | 43 + cli/internal/test/cli.go | 253 + cli/internal/test/doc.go | 5 + cli/internal/test/environment/testenv.go | 90 + cli/internal/test/network/client.go | 57 + cli/internal/test/notary/client.go | 543 + cli/internal/test/output/output.go | 65 + cli/internal/test/store.go | 79 + cli/internal/test/strings.go | 29 + cli/internal/versions/versions.go | 127 + cli/internal/versions/versions_test.go | 105 + cli/kubernetes/README.md | 4 + cli/kubernetes/check.go | 60 + cli/kubernetes/check_test.go | 54 + cli/kubernetes/client/clientset/clientset.go | 25 + .../client/clientset/scheme/register.go | 30 + .../typed/compose/v1beta1/compose_client.go | 24 + .../clientset/typed/compose/v1beta1/stack.go | 12 + .../typed/compose/v1beta2/compose_client.go | 24 + .../clientset/typed/compose/v1beta2/stack.go | 12 + .../client/informers/compose/interface.go | 11 + .../informers/compose/v1beta2/interface.go | 11 + .../client/informers/compose/v1beta2/stack.go | 8 + cli/kubernetes/client/informers/factory.go | 12 + cli/kubernetes/client/informers/generic.go | 8 + .../internalinterfaces/factory_interfaces.go | 11 + .../compose/v1beta2/expansion_generated.go | 13 + .../client/listers/compose/v1beta2/stack.go | 15 + cli/kubernetes/compose/clone/maps.go | 11 + cli/kubernetes/compose/clone/slices.go | 7 + cli/kubernetes/compose/doc.go | 5 + .../impersonation/impersonationconfig.go | 7 + cli/kubernetes/compose/v1beta1/doc.go | 11 + cli/kubernetes/compose/v1beta1/owner.go | 8 + cli/kubernetes/compose/v1beta1/parsing.go | 7 + cli/kubernetes/compose/v1beta1/register.go | 22 + cli/kubernetes/compose/v1beta1/stack.go | 37 + cli/kubernetes/compose/v1beta1/stack_test.go | 1 + .../v1beta2/composefile_stack_types.go | 7 + cli/kubernetes/compose/v1beta2/doc.go | 6 + cli/kubernetes/compose/v1beta2/owner.go | 7 + cli/kubernetes/compose/v1beta2/register.go | 23 + cli/kubernetes/compose/v1beta2/scale.go | 7 + cli/kubernetes/compose/v1beta2/stack.go | 115 + cli/kubernetes/doc.go | 4 + cli/kubernetes/labels/labels.go | 24 + cli/man/Dockerfile.5.md | 474 + cli/man/README.md | 15 + cli/man/docker-build.1.md | 359 + cli/man/docker-config-json.5.md | 72 + cli/man/docker-run.1.md | 1139 + cli/man/docker.1.md | 70 + cli/man/dockerd.8.md | 839 + cli/man/generate.go | 122 + cli/man/import.go | 7 + cli/man/md2man-all.sh | 22 + cli/man/src/attach.md | 2 + cli/man/src/commit.md | 1 + cli/man/src/container/attach.md | 66 + cli/man/src/container/commit.md | 30 + cli/man/src/container/cp.md | 145 + cli/man/src/container/create-example.md | 35 + cli/man/src/container/create.md | 88 + cli/man/src/container/diff.md | 39 + cli/man/src/container/exec.md | 51 + cli/man/src/container/export.md | 20 + cli/man/src/container/kill.md | 2 + cli/man/src/container/logs.md | 40 + cli/man/src/container/ls.md | 117 + cli/man/src/container/pause.md | 12 + cli/man/src/container/port.md | 26 + cli/man/src/container/rename.md | 1 + cli/man/src/container/restart.md | 1 + cli/man/src/container/rm.md | 37 + cli/man/src/container/run.md | 1 + cli/man/src/container/start.md | 1 + cli/man/src/container/stats.md | 43 + cli/man/src/container/stop.md | 1 + cli/man/src/container/top.md | 11 + cli/man/src/container/unpause.md | 6 + cli/man/src/container/update.md | 102 + cli/man/src/container/wait.md | 8 + cli/man/src/cp.md | 1 + cli/man/src/create.md | 1 + cli/man/src/diff.md | 1 + cli/man/src/events.md | 1 + cli/man/src/exec.md | 1 + cli/man/src/export.md | 1 + cli/man/src/history.md | 1 + cli/man/src/image/build.md | 1 + cli/man/src/image/history.md | 54 + cli/man/src/image/import.md | 50 + cli/man/src/image/load.md | 25 + cli/man/src/image/ls.md | 118 + cli/man/src/image/pull.md | 189 + cli/man/src/image/push.md | 34 + cli/man/src/image/rm.md | 16 + cli/man/src/image/save.md | 19 + cli/man/src/image/tag.md | 54 + cli/man/src/images.md | 1 + cli/man/src/import.md | 1 + cli/man/src/info.md | 1 + cli/man/src/inspect.md | 286 + cli/man/src/kill.md | 1 + cli/man/src/load.md | 1 + cli/man/src/login.md | 22 + cli/man/src/logout.md | 13 + cli/man/src/logs.md | 1 + cli/man/src/network/connect.md | 39 + cli/man/src/network/create.md | 177 + cli/man/src/network/disconnect.md | 5 + cli/man/src/network/inspect.md | 183 + cli/man/src/network/ls.md | 182 + cli/man/src/network/rm.md | 20 + cli/man/src/pause.md | 1 + cli/man/src/plugin/ls.md | 43 + cli/man/src/port.md | 1 + cli/man/src/ps.md | 1 + cli/man/src/pull.md | 1 + cli/man/src/push.md | 1 + cli/man/src/rename.md | 1 + cli/man/src/restart.md | 1 + cli/man/src/rm.md | 1 + cli/man/src/rmi.md | 1 + cli/man/src/save.md | 1 + cli/man/src/search.md | 36 + cli/man/src/start.md | 1 + cli/man/src/stats.md | 1 + cli/man/src/stop.md | 1 + cli/man/src/system/events.md | 134 + cli/man/src/system/info.md | 169 + cli/man/src/tag.md | 1 + cli/man/src/top.md | 1 + cli/man/src/unpause.md | 1 + cli/man/src/update.md | 1 + cli/man/src/version.md | 37 + cli/man/src/volume.md | 14 + cli/man/src/volume/create.md | 35 + cli/man/src/volume/inspect.md | 4 + cli/man/src/volume/ls.md | 11 + cli/man/src/wait.md | 1 + cli/opts/config.go | 98 + cli/opts/duration.go | 64 + cli/opts/duration_test.go | 30 + cli/opts/env.go | 46 + cli/opts/env_test.go | 42 + cli/opts/envfile.go | 22 + cli/opts/envfile_test.go | 178 + cli/opts/file.go | 77 + cli/opts/gpus.go | 112 + cli/opts/gpus_test.go | 48 + cli/opts/hosts.go | 167 + cli/opts/hosts_test.go | 181 + cli/opts/hosts_unix.go | 8 + cli/opts/hosts_windows.go | 6 + cli/opts/ip.go | 47 + cli/opts/ip_test.go | 54 + cli/opts/mount.go | 182 + cli/opts/mount_test.go | 185 + cli/opts/network.go | 120 + cli/opts/network_test.go | 104 + cli/opts/opts.go | 523 + cli/opts/opts_test.go | 395 + cli/opts/opts_unix.go | 6 + cli/opts/opts_windows.go | 56 + cli/opts/parse.go | 99 + cli/opts/port.go | 172 + cli/opts/port_test.go | 330 + cli/opts/quotedstring.go | 37 + cli/opts/quotedstring_test.go | 30 + cli/opts/runtime.go | 79 + cli/opts/secret.go | 98 + cli/opts/secret_test.go | 80 + cli/opts/throttledevice.go | 108 + cli/opts/ulimit.go | 57 + cli/opts/ulimit_test.go | 42 + cli/opts/weightdevice.go | 84 + cli/poule.yml | 41 + cli/scripts/build/.variables | 39 + cli/scripts/build/binary | 14 + cli/scripts/build/cross | 33 + cli/scripts/build/dynbinary | 14 + cli/scripts/build/osx | 22 + cli/scripts/build/plugins | 21 + cli/scripts/build/plugins-osx | 18 + cli/scripts/build/plugins-windows | 14 + cli/scripts/build/windows | 23 + cli/scripts/docs/generate-authors.sh | 15 + cli/scripts/docs/generate-man.sh | 14 + cli/scripts/docs/generate-yaml.sh | 8 + cli/scripts/gen/windows-resources | 44 + cli/scripts/make.ps1 | 228 + cli/scripts/test/e2e/entry | 11 + cli/scripts/test/e2e/load-image | 50 + cli/scripts/test/e2e/run | 120 + cli/scripts/test/e2e/wait-on-daemon | 9 + cli/scripts/test/e2e/wrapper | 16 + cli/scripts/validate/check-git-diff | 17 + cli/scripts/validate/shellcheck | 5 + cli/scripts/warn-outside-container | 18 + cli/scripts/winresources/common.rc | 38 + cli/scripts/winresources/docker.exe.manifest | 18 + cli/scripts/winresources/docker.ico | Bin 0 -> 370070 bytes cli/scripts/winresources/docker.png | Bin 0 -> 658195 bytes cli/scripts/winresources/docker.rc | 3 + cli/service/logs/parse_logs.go | 39 + cli/service/logs/parse_logs_test.go | 34 + cli/templates/templates.go | 84 + cli/templates/templates_test.go | 89 + cli/types/types.go | 88 + cli/vendor.conf | 103 + .../github.com/containerd/ttrpc/LICENSE | 201 + .../github.com/containerd/ttrpc/README.md | 62 + .../github.com/containerd/ttrpc/channel.go | 154 + .../github.com/containerd/ttrpc/client.go | 290 + .../github.com/containerd/ttrpc/codec.go | 42 + .../github.com/containerd/ttrpc/config.go | 39 + .../github.com/containerd/ttrpc/handshake.go | 50 + .../github.com/containerd/ttrpc/server.go | 471 + .../github.com/containerd/ttrpc/services.go | 150 + .../github.com/containerd/ttrpc/types.go | 43 + .../containerd/ttrpc/unixcreds_linux.go | 108 + .../docker/compose-on-kubernetes/LICENSE | 201 + .../docker/compose-on-kubernetes/README.md | 190 + .../api/client/clientset/clientset.go | 130 + .../api/client/clientset/scheme/register.go | 47 + .../typed/compose/v1alpha3/compose_client.go | 74 + .../clientset/typed/compose/v1alpha3/stack.go | 172 + .../typed/compose/v1beta1/compose_client.go | 74 + .../clientset/typed/compose/v1beta1/stack.go | 175 + .../typed/compose/v1beta2/compose_client.go | 74 + .../clientset/typed/compose/v1beta2/stack.go | 173 + .../api/client/informers/compose/interface.go | 32 + .../informers/compose/v1alpha3/interface.go | 25 + .../informers/compose/v1alpha3/stack.go | 74 + .../informers/compose/v1beta2/interface.go | 25 + .../client/informers/compose/v1beta2/stack.go | 51 + .../api/client/informers/factory.go | 101 + .../api/client/informers/generic.go | 46 + .../internalinterfaces/factory_interfaces.go | 18 + .../compose/v1alpha3/expansion_generated.go | 9 + .../client/listers/compose/v1alpha3/stack.go | 78 + .../compose/v1beta2/expansion_generated.go | 9 + .../client/listers/compose/v1beta2/stack.go | 78 + .../api/compose/clone/maps.go | 25 + .../api/compose/clone/slices.go | 11 + .../api/compose/impersonation/doc.go | 3 + .../impersonation/impersonationconfig.go | 26 + .../v1alpha3/composefile_stack_types.go | 26 + .../api/compose/v1alpha3/conversion_custom.go | 12 + .../compose/v1alpha3/conversion_generated.go | 1039 + .../compose/v1alpha3/deepcopy_generated.go | 588 + .../api/compose/v1alpha3/doc.go | 8 + .../api/compose/v1alpha3/owner.go | 30 + .../api/compose/v1alpha3/register.go | 42 + .../api/compose/v1alpha3/scale.go | 29 + .../api/compose/v1alpha3/stack.go | 295 + .../api/compose/v1beta1/doc.go | 10 + .../api/compose/v1beta1/owner.go | 31 + .../api/compose/v1beta1/parsing.go | 4 + .../api/compose/v1beta1/register.go | 39 + .../api/compose/v1beta1/stack.go | 87 + .../v1beta2/composefile_stack_types.go | 26 + .../api/compose/v1beta2/deepcopy_generated.go | 660 + .../api/compose/v1beta2/doc.go | 7 + .../api/compose/v1beta2/owner.go | 30 + .../api/compose/v1beta2/register.go | 42 + .../api/compose/v1beta2/scale.go | 29 + .../api/compose/v1beta2/stack.go | 270 + .../compose-on-kubernetes/api/config.go | 26 + .../docker/compose-on-kubernetes/api/doc.go | 4 + .../api/labels/labels.go | 45 + .../github.com/docker/licensing/LICENSE | 191 + .../github.com/docker/licensing/README.md | 12 + .../github.com/docker/licensing/client.go | 276 + .../docker/licensing/lib/errors/error.go | 112 + .../docker/licensing/lib/errors/herror.go | 101 + .../docker/licensing/lib/errors/stack.go | 65 + .../docker/licensing/lib/errors/wrap.go | 106 + .../docker/licensing/lib/go-auth/README.md | 21 + .../lib/go-auth/identity/identity.go | 45 + .../licensing/lib/go-auth/jwt/context.go | 20 + .../docker/licensing/lib/go-auth/jwt/jwt.go | 282 + .../licensing/lib/go-clientlib/README.md | 5 + .../licensing/lib/go-clientlib/client.go | 305 + .../licensing/lib/go-validation/README.md | 5 + .../licensing/lib/go-validation/validation.go | 141 + .../github.com/docker/licensing/license.go | 158 + .../docker/licensing/model/license.go | 38 + .../docker/licensing/model/subscriptions.go | 189 + .../docker/licensing/model/users.go | 91 + .../github.com/docker/licensing/storage.go | 223 + .../docker/licensing/subscriptions.go | 74 + .../docker/licensing/types/types.go | 17 + .../github.com/docker/licensing/users.go | 90 + .../grpc-ecosystem/grpc-opentracing/LICENSE | 27 + .../grpc-ecosystem/grpc-opentracing/PATENTS | 23 + .../grpc-opentracing/README.rst | 25 + .../grpc-opentracing/go/otgrpc/README.md | 57 + .../grpc-opentracing/go/otgrpc/client.go | 239 + .../grpc-opentracing/go/otgrpc/errors.go | 69 + .../grpc-opentracing/go/otgrpc/options.go | 76 + .../grpc-opentracing/go/otgrpc/package.go | 5 + .../grpc-opentracing/go/otgrpc/server.go | 141 + .../grpc-opentracing/go/otgrpc/shared.go | 42 + .../grpc-opentracing/python/README.md | 4 + .../python/examples/protos/command_line.proto | 15 + .../python/examples/protos/store.proto | 37 + cli/vendor/github.com/jaguilar/vt100/LICENSE | 22 + .../github.com/jaguilar/vt100/README.md | 54 + .../github.com/jaguilar/vt100/command.go | 288 + cli/vendor/github.com/jaguilar/vt100/go.mod | 5 + .../github.com/jaguilar/vt100/scanner.go | 97 + cli/vendor/github.com/jaguilar/vt100/vt100.go | 435 + cli/vendor/github.com/moby/buildkit/LICENSE | 201 + cli/vendor/github.com/moby/buildkit/README.md | 373 + .../api/services/control/control.pb.go | 5970 ++ .../api/services/control/control.proto | 149 + .../buildkit/api/services/control/generate.go | 3 + .../moby/buildkit/api/types/generate.go | 3 + .../moby/buildkit/api/types/worker.pb.go | 911 + .../moby/buildkit/api/types/worker.proto | 24 + .../github.com/moby/buildkit/client/build.go | 105 + .../moby/buildkit/client/buildid/metadata.go | 29 + .../github.com/moby/buildkit/client/client.go | 169 + .../moby/buildkit/client/client_unix.go | 19 + .../moby/buildkit/client/client_windows.go | 24 + .../buildkit/client/connhelper/connhelper.go | 37 + .../moby/buildkit/client/diskusage.go | 84 + .../moby/buildkit/client/exporters.go | 9 + .../github.com/moby/buildkit/client/filter.go | 19 + .../github.com/moby/buildkit/client/graph.go | 46 + .../moby/buildkit/client/llb/exec.go | 654 + .../moby/buildkit/client/llb/fileop.go | 727 + .../moby/buildkit/client/llb/marshal.go | 112 + .../moby/buildkit/client/llb/meta.go | 233 + .../moby/buildkit/client/llb/resolver.go | 20 + .../moby/buildkit/client/llb/source.go | 429 + .../moby/buildkit/client/llb/state.go | 515 + .../moby/buildkit/client/ociindex/ociindex.go | 113 + .../github.com/moby/buildkit/client/prune.go | 83 + .../github.com/moby/buildkit/client/solve.go | 475 + .../moby/buildkit/client/workers.go | 70 + .../frontend/gateway/client/client.go | 76 + .../frontend/gateway/client/result.go | 54 + .../frontend/gateway/grpcclient/client.go | 501 + .../moby/buildkit/frontend/gateway/pb/caps.go | 95 + .../frontend/gateway/pb/gateway.pb.go | 5292 ++ .../frontend/gateway/pb/gateway.proto | 138 + .../buildkit/frontend/gateway/pb/generate.go | 3 + cli/vendor/github.com/moby/buildkit/go.mod | 77 + .../moby/buildkit/identity/randomid.go | 53 + .../moby/buildkit/session/auth/auth.go | 27 + .../moby/buildkit/session/auth/auth.pb.go | 728 + .../moby/buildkit/session/auth/auth.proto | 19 + .../session/auth/authprovider/authprovider.go | 53 + .../moby/buildkit/session/auth/generate.go | 3 + .../buildkit/session/content/attachable.go | 132 + .../moby/buildkit/session/content/caller.go | 91 + .../moby/buildkit/session/context.go | 22 + .../buildkit/session/filesync/diffcopy.go | 117 + .../buildkit/session/filesync/filesync.go | 321 + .../buildkit/session/filesync/filesync.pb.go | 669 + .../buildkit/session/filesync/filesync.proto | 20 + .../buildkit/session/filesync/generate.go | 3 + .../github.com/moby/buildkit/session/grpc.go | 81 + .../moby/buildkit/session/grpchijack/dial.go | 162 + .../buildkit/session/grpchijack/hijack.go | 15 + .../moby/buildkit/session/manager.go | 220 + .../moby/buildkit/session/secrets/generate.go | 3 + .../moby/buildkit/session/secrets/secrets.go | 30 + .../buildkit/session/secrets/secrets.pb.go | 868 + .../buildkit/session/secrets/secrets.proto | 19 + .../session/secrets/secretsprovider/file.go | 54 + .../secretsprovider/secretsprovider.go | 60 + .../moby/buildkit/session/session.go | 143 + .../moby/buildkit/session/sshforward/copy.go | 65 + .../buildkit/session/sshforward/generate.go | 3 + .../moby/buildkit/session/sshforward/ssh.go | 114 + .../buildkit/session/sshforward/ssh.pb.go | 900 + .../buildkit/session/sshforward/ssh.proto | 22 + .../sshforward/sshprovider/agentprovider.go | 198 + .../moby/buildkit/solver/pb/attr.go | 25 + .../moby/buildkit/solver/pb/caps.go | 285 + .../moby/buildkit/solver/pb/const.go | 25 + .../moby/buildkit/solver/pb/generate.go | 3 + .../moby/buildkit/solver/pb/ops.pb.go | 9361 +++ .../moby/buildkit/solver/pb/ops.proto | 305 + .../moby/buildkit/solver/pb/platform.go | 41 + .../moby/buildkit/util/apicaps/caps.go | 162 + .../moby/buildkit/util/apicaps/pb/caps.pb.go | 567 + .../moby/buildkit/util/apicaps/pb/caps.proto | 19 + .../moby/buildkit/util/apicaps/pb/generate.go | 3 + .../buildkit/util/appcontext/appcontext.go | 41 + .../util/appcontext/appcontext_unix.go | 11 + .../util/appcontext/appcontext_windows.go | 7 + .../util/appdefaults/appdefaults_unix.go | 69 + .../util/appdefaults/appdefaults_windows.go | 23 + .../util/entitlements/entitlements.go | 60 + .../util/entitlements/security_linux.go | 67 + .../util/progress/progressui/display.go | 577 + .../util/progress/progressui/printer.go | 293 + .../moby/buildkit/util/system/path_unix.go | 14 + .../moby/buildkit/util/system/path_windows.go | 37 + .../buildkit/util/system/seccomp_linux.go | 29 + .../buildkit/util/system/seccomp_nolinux.go | 7 + .../buildkit/util/system/seccomp_noseccomp.go | 7 + cli/vendor/github.com/spf13/cobra/LICENSE.txt | 174 + cli/vendor/github.com/spf13/cobra/README.md | 736 + cli/vendor/github.com/spf13/cobra/args.go | 89 + .../spf13/cobra/bash_completions.go | 584 + cli/vendor/github.com/spf13/cobra/cobra.go | 200 + cli/vendor/github.com/spf13/cobra/command.go | 1517 + .../github.com/spf13/cobra/command_notwin.go | 5 + .../github.com/spf13/cobra/command_win.go | 20 + .../github.com/spf13/cobra/doc/man_docs.go | 236 + .../github.com/spf13/cobra/doc/md_docs.go | 159 + .../github.com/spf13/cobra/doc/rest_docs.go | 185 + cli/vendor/github.com/spf13/cobra/doc/util.go | 51 + .../github.com/spf13/cobra/doc/yaml_docs.go | 169 + .../github.com/spf13/cobra/zsh_completions.go | 126 + cli/vendor/github.com/spf13/pflag/LICENSE | 28 + cli/vendor/github.com/spf13/pflag/README.md | 296 + cli/vendor/github.com/spf13/pflag/bool.go | 94 + .../github.com/spf13/pflag/bool_slice.go | 147 + cli/vendor/github.com/spf13/pflag/bytes.go | 209 + cli/vendor/github.com/spf13/pflag/count.go | 96 + cli/vendor/github.com/spf13/pflag/duration.go | 86 + .../github.com/spf13/pflag/duration_slice.go | 128 + cli/vendor/github.com/spf13/pflag/flag.go | 1224 + cli/vendor/github.com/spf13/pflag/float32.go | 88 + cli/vendor/github.com/spf13/pflag/float64.go | 84 + .../github.com/spf13/pflag/golangflag.go | 105 + cli/vendor/github.com/spf13/pflag/int.go | 84 + cli/vendor/github.com/spf13/pflag/int16.go | 88 + cli/vendor/github.com/spf13/pflag/int32.go | 88 + cli/vendor/github.com/spf13/pflag/int64.go | 84 + cli/vendor/github.com/spf13/pflag/int8.go | 88 + .../github.com/spf13/pflag/int_slice.go | 128 + cli/vendor/github.com/spf13/pflag/ip.go | 94 + cli/vendor/github.com/spf13/pflag/ip_slice.go | 148 + cli/vendor/github.com/spf13/pflag/ipmask.go | 122 + cli/vendor/github.com/spf13/pflag/ipnet.go | 98 + .../github.com/spf13/pflag/ipnet_slice.go | 147 + cli/vendor/github.com/spf13/pflag/string.go | 80 + .../github.com/spf13/pflag/string_array.go | 103 + .../github.com/spf13/pflag/string_slice.go | 149 + .../github.com/spf13/pflag/string_to_int.go | 149 + .../spf13/pflag/string_to_string.go | 160 + cli/vendor/github.com/spf13/pflag/uint.go | 88 + cli/vendor/github.com/spf13/pflag/uint16.go | 88 + cli/vendor/github.com/spf13/pflag/uint32.go | 88 + cli/vendor/github.com/spf13/pflag/uint64.go | 88 + cli/vendor/github.com/spf13/pflag/uint8.go | 88 + .../github.com/spf13/pflag/uint_slice.go | 126 + .../github.com/tonistiigi/fsutil/LICENSE | 22 + .../tonistiigi/fsutil/chtimes_linux.go | 20 + .../tonistiigi/fsutil/chtimes_nolinux.go | 20 + .../github.com/tonistiigi/fsutil/diff.go | 45 + .../tonistiigi/fsutil/diff_containerd.go | 200 + .../fsutil/diff_containerd_linux.go | 37 + .../tonistiigi/fsutil/diskwriter.go | 352 + .../tonistiigi/fsutil/diskwriter_unix.go | 52 + .../tonistiigi/fsutil/diskwriter_windows.go | 18 + .../tonistiigi/fsutil/followlinks.go | 150 + cli/vendor/github.com/tonistiigi/fsutil/fs.go | 118 + .../github.com/tonistiigi/fsutil/go.mod | 28 + .../github.com/tonistiigi/fsutil/hardlinks.go | 47 + .../github.com/tonistiigi/fsutil/readme.md | 45 + .../github.com/tonistiigi/fsutil/receive.go | 276 + .../github.com/tonistiigi/fsutil/send.go | 206 + .../github.com/tonistiigi/fsutil/stat.go | 64 + .../github.com/tonistiigi/fsutil/stat_unix.go | 71 + .../tonistiigi/fsutil/stat_windows.go | 16 + .../github.com/tonistiigi/fsutil/tarwriter.go | 72 + .../tonistiigi/fsutil/types/generate.go | 3 + .../tonistiigi/fsutil/types/stat.pb.go | 909 + .../tonistiigi/fsutil/types/stat.proto | 19 + .../tonistiigi/fsutil/types/wire.pb.go | 542 + .../tonistiigi/fsutil/types/wire.proto | 21 + .../github.com/tonistiigi/fsutil/validator.go | 92 + .../github.com/tonistiigi/fsutil/walker.go | 230 + .../github.com/tonistiigi/units/LICENSE | 21 + .../github.com/tonistiigi/units/bytes.go | 125 + .../github.com/tonistiigi/units/readme.md | 29 + cli/vendor/k8s.io/api/LICENSE | 202 + cli/vendor/k8s.io/api/README.md | 19 + .../api/admissionregistration/v1beta1/doc.go | 26 + .../v1beta1/generated.pb.go | 2297 + .../v1beta1/generated.proto | 300 + .../admissionregistration/v1beta1/register.go | 53 + .../admissionregistration/v1beta1/types.go | 349 + .../v1beta1/types_swagger_doc_generated.go | 129 + .../v1beta1/zz_generated.deepcopy.go | 317 + cli/vendor/k8s.io/api/apps/v1/doc.go | 21 + cli/vendor/k8s.io/api/apps/v1/generated.pb.go | 6924 ++ cli/vendor/k8s.io/api/apps/v1/generated.proto | 701 + cli/vendor/k8s.io/api/apps/v1/register.go | 60 + cli/vendor/k8s.io/api/apps/v1/types.go | 826 + .../apps/v1/types_swagger_doc_generated.go | 365 + .../api/apps/v1/zz_generated.deepcopy.go | 772 + cli/vendor/k8s.io/api/apps/v1beta1/doc.go | 21 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 5275 ++ .../k8s.io/api/apps/v1beta1/generated.proto | 484 + .../k8s.io/api/apps/v1beta1/register.go | 58 + cli/vendor/k8s.io/api/apps/v1beta1/types.go | 567 + .../v1beta1/types_swagger_doc_generated.go | 273 + .../api/apps/v1beta1/zz_generated.deepcopy.go | 594 + cli/vendor/k8s.io/api/apps/v1beta2/doc.go | 21 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 7567 +++ .../k8s.io/api/apps/v1beta2/generated.proto | 752 + .../k8s.io/api/apps/v1beta2/register.go | 61 + cli/vendor/k8s.io/api/apps/v1beta2/types.go | 876 + .../v1beta2/types_swagger_doc_generated.go | 396 + .../api/apps/v1beta2/zz_generated.deepcopy.go | 839 + .../api/auditregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 1685 + .../v1alpha1/generated.proto | 158 + .../auditregistration/v1alpha1/register.go | 56 + .../api/auditregistration/v1alpha1/types.go | 194 + .../v1alpha1/types_swagger_doc_generated.go | 110 + .../v1alpha1/zz_generated.deepcopy.go | 224 + .../k8s.io/api/authentication/v1/doc.go | 22 + .../api/authentication/v1/generated.pb.go | 2233 + .../api/authentication/v1/generated.proto | 182 + .../k8s.io/api/authentication/v1/register.go | 52 + .../k8s.io/api/authentication/v1/types.go | 189 + .../v1/types_swagger_doc_generated.go | 115 + .../v1/zz_generated.deepcopy.go | 244 + .../k8s.io/api/authentication/v1beta1/doc.go | 22 + .../authentication/v1beta1/generated.pb.go | 1388 + .../authentication/v1beta1/generated.proto | 118 + .../api/authentication/v1beta1/register.go | 51 + .../api/authentication/v1beta1/types.go | 110 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.deepcopy.go | 152 + cli/vendor/k8s.io/api/authorization/v1/doc.go | 23 + .../api/authorization/v1/generated.pb.go | 3511 + .../api/authorization/v1/generated.proto | 272 + .../k8s.io/api/authorization/v1/register.go | 55 + .../k8s.io/api/authorization/v1/types.go | 268 + .../v1/types_swagger_doc_generated.go | 173 + .../authorization/v1/zz_generated.deepcopy.go | 385 + .../k8s.io/api/authorization/v1beta1/doc.go | 23 + .../api/authorization/v1beta1/generated.pb.go | 3511 + .../api/authorization/v1beta1/generated.proto | 272 + .../api/authorization/v1beta1/register.go | 55 + .../k8s.io/api/authorization/v1beta1/types.go | 268 + .../v1beta1/types_swagger_doc_generated.go | 173 + .../v1beta1/zz_generated.deepcopy.go | 385 + cli/vendor/k8s.io/api/autoscaling/v1/doc.go | 21 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 4691 ++ .../k8s.io/api/autoscaling/v1/generated.proto | 415 + .../k8s.io/api/autoscaling/v1/register.go | 53 + cli/vendor/k8s.io/api/autoscaling/v1/types.go | 428 + .../v1/types_swagger_doc_generated.go | 250 + .../autoscaling/v1/zz_generated.deepcopy.go | 515 + .../k8s.io/api/autoscaling/v2beta1/doc.go | 21 + .../api/autoscaling/v2beta1/generated.pb.go | 4307 ++ .../api/autoscaling/v2beta1/generated.proto | 397 + .../api/autoscaling/v2beta1/register.go | 52 + .../k8s.io/api/autoscaling/v2beta1/types.go | 405 + .../v2beta1/types_swagger_doc_generated.go | 221 + .../v2beta1/zz_generated.deepcopy.go | 466 + .../k8s.io/api/autoscaling/v2beta2/doc.go | 21 + .../api/autoscaling/v2beta2/generated.pb.go | 4419 ++ .../api/autoscaling/v2beta2/generated.proto | 369 + .../api/autoscaling/v2beta2/register.go | 50 + .../k8s.io/api/autoscaling/v2beta2/types.go | 393 + .../v2beta2/types_swagger_doc_generated.go | 240 + .../v2beta2/zz_generated.deepcopy.go | 487 + cli/vendor/k8s.io/api/batch/v1/doc.go | 21 + .../k8s.io/api/batch/v1/generated.pb.go | 1627 + .../k8s.io/api/batch/v1/generated.proto | 184 + cli/vendor/k8s.io/api/batch/v1/register.go | 52 + cli/vendor/k8s.io/api/batch/v1/types.go | 193 + .../batch/v1/types_swagger_doc_generated.go | 95 + .../api/batch/v1/zz_generated.deepcopy.go | 188 + cli/vendor/k8s.io/api/batch/v1beta1/doc.go | 21 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 1490 + .../k8s.io/api/batch/v1beta1/generated.proto | 137 + .../k8s.io/api/batch/v1beta1/register.go | 53 + cli/vendor/k8s.io/api/batch/v1beta1/types.go | 158 + .../v1beta1/types_swagger_doc_generated.go | 96 + .../batch/v1beta1/zz_generated.deepcopy.go | 194 + cli/vendor/k8s.io/api/batch/v2alpha1/doc.go | 21 + .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1490 + .../k8s.io/api/batch/v2alpha1/generated.proto | 135 + .../k8s.io/api/batch/v2alpha1/register.go | 53 + cli/vendor/k8s.io/api/batch/v2alpha1/types.go | 156 + .../v2alpha1/types_swagger_doc_generated.go | 96 + .../batch/v2alpha1/zz_generated.deepcopy.go | 194 + .../k8s.io/api/certificates/v1beta1/doc.go | 23 + .../api/certificates/v1beta1/generated.pb.go | 1676 + .../api/certificates/v1beta1/generated.proto | 121 + .../api/certificates/v1beta1/register.go | 59 + .../k8s.io/api/certificates/v1beta1/types.go | 155 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.deepcopy.go | 197 + cli/vendor/k8s.io/api/coordination/v1/doc.go | 23 + .../api/coordination/v1/generated.pb.go | 864 + .../api/coordination/v1/generated.proto | 80 + .../k8s.io/api/coordination/v1/register.go | 53 + .../k8s.io/api/coordination/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 63 + .../coordination/v1/zz_generated.deepcopy.go | 124 + .../k8s.io/api/coordination/v1beta1/doc.go | 23 + .../api/coordination/v1beta1/generated.pb.go | 864 + .../api/coordination/v1beta1/generated.proto | 80 + .../api/coordination/v1beta1/register.go | 53 + .../k8s.io/api/coordination/v1beta1/types.go | 74 + .../v1beta1/types_swagger_doc_generated.go | 63 + .../v1beta1/zz_generated.deepcopy.go | 124 + .../api/core/v1/annotation_key_constants.go | 106 + cli/vendor/k8s.io/api/core/v1/doc.go | 22 + cli/vendor/k8s.io/api/core/v1/generated.pb.go | 52991 ++++++++++++++++ cli/vendor/k8s.io/api/core/v1/generated.proto | 4845 ++ .../k8s.io/api/core/v1/objectreference.go | 33 + cli/vendor/k8s.io/api/core/v1/register.go | 99 + cli/vendor/k8s.io/api/core/v1/resource.go | 56 + cli/vendor/k8s.io/api/core/v1/taint.go | 33 + cli/vendor/k8s.io/api/core/v1/toleration.go | 56 + cli/vendor/k8s.io/api/core/v1/types.go | 5414 ++ .../core/v1/types_swagger_doc_generated.go | 2362 + .../k8s.io/api/core/v1/well_known_labels.go | 36 + .../api/core/v1/zz_generated.deepcopy.go | 5473 ++ cli/vendor/k8s.io/api/events/v1beta1/doc.go | 23 + .../k8s.io/api/events/v1beta1/generated.pb.go | 1287 + .../k8s.io/api/events/v1beta1/generated.proto | 121 + .../k8s.io/api/events/v1beta1/register.go | 53 + cli/vendor/k8s.io/api/events/v1beta1/types.go | 122 + .../v1beta1/types_swagger_doc_generated.go | 73 + .../events/v1beta1/zz_generated.deepcopy.go | 117 + .../k8s.io/api/extensions/v1beta1/doc.go | 21 + .../api/extensions/v1beta1/generated.pb.go | 12477 ++++ .../api/extensions/v1beta1/generated.proto | 1172 + .../k8s.io/api/extensions/v1beta1/register.go | 66 + .../k8s.io/api/extensions/v1beta1/types.go | 1372 + .../v1beta1/types_swagger_doc_generated.go | 650 + .../v1beta1/zz_generated.deepcopy.go | 1466 + cli/vendor/k8s.io/api/networking/v1/doc.go | 22 + .../k8s.io/api/networking/v1/generated.pb.go | 1849 + .../k8s.io/api/networking/v1/generated.proto | 195 + .../k8s.io/api/networking/v1/register.go | 53 + cli/vendor/k8s.io/api/networking/v1/types.go | 203 + .../v1/types_swagger_doc_generated.go | 113 + .../networking/v1/zz_generated.deepcopy.go | 262 + .../k8s.io/api/networking/v1beta1/doc.go | 22 + .../api/networking/v1beta1/generated.pb.go | 1953 + .../api/networking/v1beta1/generated.proto | 186 + .../k8s.io/api/networking/v1beta1/register.go | 56 + .../k8s.io/api/networking/v1beta1/types.go | 192 + .../v1beta1/types_swagger_doc_generated.go | 127 + .../v1beta1/zz_generated.deepcopy.go | 252 + cli/vendor/k8s.io/api/node/v1alpha1/doc.go | 22 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 696 + .../k8s.io/api/node/v1alpha1/generated.proto | 76 + .../k8s.io/api/node/v1alpha1/register.go | 52 + cli/vendor/k8s.io/api/node/v1alpha1/types.go | 75 + .../v1alpha1/types_swagger_doc_generated.go | 59 + .../node/v1alpha1/zz_generated.deepcopy.go | 101 + cli/vendor/k8s.io/api/node/v1beta1/doc.go | 22 + .../k8s.io/api/node/v1beta1/generated.pb.go | 564 + .../k8s.io/api/node/v1beta1/generated.proto | 66 + .../k8s.io/api/node/v1beta1/register.go | 52 + cli/vendor/k8s.io/api/node/v1beta1/types.go | 65 + .../v1beta1/types_swagger_doc_generated.go | 50 + .../api/node/v1beta1/zz_generated.deepcopy.go | 84 + cli/vendor/k8s.io/api/policy/v1beta1/doc.go | 24 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 4505 ++ .../k8s.io/api/policy/v1beta1/generated.proto | 378 + .../k8s.io/api/policy/v1beta1/register.go | 56 + cli/vendor/k8s.io/api/policy/v1beta1/types.go | 464 + .../v1beta1/types_swagger_doc_generated.go | 232 + .../policy/v1beta1/zz_generated.deepcopy.go | 509 + cli/vendor/k8s.io/api/rbac/v1/doc.go | 23 + cli/vendor/k8s.io/api/rbac/v1/generated.pb.go | 2729 + cli/vendor/k8s.io/api/rbac/v1/generated.proto | 199 + cli/vendor/k8s.io/api/rbac/v1/register.go | 58 + cli/vendor/k8s.io/api/rbac/v1/types.go | 237 + .../rbac/v1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1/zz_generated.deepcopy.go | 389 + cli/vendor/k8s.io/api/rbac/v1alpha1/doc.go | 23 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 2730 + .../k8s.io/api/rbac/v1alpha1/generated.proto | 201 + .../k8s.io/api/rbac/v1alpha1/register.go | 58 + cli/vendor/k8s.io/api/rbac/v1alpha1/types.go | 239 + .../v1alpha1/types_swagger_doc_generated.go | 158 + .../rbac/v1alpha1/zz_generated.deepcopy.go | 389 + cli/vendor/k8s.io/api/rbac/v1beta1/doc.go | 23 + .../k8s.io/api/rbac/v1beta1/generated.pb.go | 2729 + .../k8s.io/api/rbac/v1beta1/generated.proto | 200 + .../k8s.io/api/rbac/v1beta1/register.go | 58 + cli/vendor/k8s.io/api/rbac/v1beta1/types.go | 237 + .../v1beta1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 + cli/vendor/k8s.io/api/scheduling/v1/doc.go | 23 + .../k8s.io/api/scheduling/v1/generated.pb.go | 621 + .../k8s.io/api/scheduling/v1/generated.proto | 67 + .../k8s.io/api/scheduling/v1/register.go | 55 + cli/vendor/k8s.io/api/scheduling/v1/types.go | 66 + .../v1/types_swagger_doc_generated.go | 52 + .../scheduling/v1/zz_generated.deepcopy.go | 84 + .../k8s.io/api/scheduling/v1alpha1/doc.go | 23 + .../api/scheduling/v1alpha1/generated.pb.go | 621 + .../api/scheduling/v1alpha1/generated.proto | 68 + .../api/scheduling/v1alpha1/register.go | 52 + .../k8s.io/api/scheduling/v1alpha1/types.go | 67 + .../v1alpha1/types_swagger_doc_generated.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 84 + .../k8s.io/api/scheduling/v1beta1/doc.go | 23 + .../api/scheduling/v1beta1/generated.pb.go | 621 + .../api/scheduling/v1beta1/generated.proto | 68 + .../k8s.io/api/scheduling/v1beta1/register.go | 52 + .../k8s.io/api/scheduling/v1beta1/types.go | 67 + .../v1beta1/types_swagger_doc_generated.go | 52 + .../v1beta1/zz_generated.deepcopy.go | 84 + .../k8s.io/api/settings/v1alpha1/doc.go | 23 + .../api/settings/v1alpha1/generated.pb.go | 910 + .../api/settings/v1alpha1/generated.proto | 75 + .../k8s.io/api/settings/v1alpha1/register.go | 52 + .../k8s.io/api/settings/v1alpha1/types.go | 70 + .../v1alpha1/types_swagger_doc_generated.go | 61 + .../v1alpha1/zz_generated.deepcopy.go | 131 + cli/vendor/k8s.io/api/storage/v1/doc.go | 22 + .../k8s.io/api/storage/v1/generated.pb.go | 2246 + .../k8s.io/api/storage/v1/generated.proto | 186 + cli/vendor/k8s.io/api/storage/v1/register.go | 56 + cli/vendor/k8s.io/api/storage/v1/types.go | 211 + .../storage/v1/types_swagger_doc_generated.go | 119 + .../api/storage/v1/zz_generated.deepcopy.go | 268 + cli/vendor/k8s.io/api/storage/v1alpha1/doc.go | 22 + .../api/storage/v1alpha1/generated.pb.go | 1503 + .../api/storage/v1alpha1/generated.proto | 126 + .../k8s.io/api/storage/v1alpha1/register.go | 50 + .../k8s.io/api/storage/v1alpha1/types.go | 126 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../storage/v1alpha1/zz_generated.deepcopy.go | 174 + cli/vendor/k8s.io/api/storage/v1beta1/doc.go | 22 + .../api/storage/v1beta1/generated.pb.go | 3472 + .../api/storage/v1beta1/generated.proto | 323 + .../k8s.io/api/storage/v1beta1/register.go | 62 + .../k8s.io/api/storage/v1beta1/types.go | 368 + .../v1beta1/types_swagger_doc_generated.go | 189 + .../storage/v1beta1/zz_generated.deepcopy.go | 458 + cli/vendor/k8s.io/apimachinery/LICENSE | 202 + cli/vendor/k8s.io/apimachinery/README.md | 29 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 + .../apimachinery/pkg/api/errors/errors.go | 619 + .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 + .../apimachinery/pkg/api/meta/errors.go | 121 + .../pkg/api/meta/firsthit_restmapper.go | 97 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 218 + .../apimachinery/pkg/api/meta/interfaces.go | 134 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 650 + .../pkg/api/meta/multirestmapper.go | 210 + .../apimachinery/pkg/api/meta/priority.go | 222 + .../apimachinery/pkg/api/meta/restmapper.go | 518 + .../apimachinery/pkg/api/resource/amount.go | 299 + .../pkg/api/resource/generated.pb.go | 75 + .../pkg/api/resource/generated.proto | 88 + .../apimachinery/pkg/api/resource/math.go | 314 + .../apimachinery/pkg/api/resource/quantity.go | 738 + .../pkg/api/resource/quantity_proto.go | 284 + .../pkg/api/resource/scale_int.go | 95 + .../apimachinery/pkg/api/resource/suffix.go | 198 + .../pkg/api/resource/zz_generated.deepcopy.go | 27 + .../apis/meta/internalversion/conversion.go | 52 + .../pkg/apis/meta/internalversion/doc.go | 20 + .../pkg/apis/meta/internalversion/register.go | 113 + .../pkg/apis/meta/internalversion/types.go | 67 + .../zz_generated.conversion.go | 141 + .../internalversion/zz_generated.deepcopy.go | 96 + .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/conversion.go | 319 + .../apimachinery/pkg/apis/meta/v1/doc.go | 23 + .../apimachinery/pkg/apis/meta/v1/duration.go | 60 + .../pkg/apis/meta/v1/generated.pb.go | 9202 +++ .../pkg/apis/meta/v1/generated.proto | 984 + .../pkg/apis/meta/v1/group_version.go | 148 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 267 + .../apimachinery/pkg/apis/meta/v1/labels.go | 55 + .../apimachinery/pkg/apis/meta/v1/meta.go | 176 + .../pkg/apis/meta/v1/micro_time.go | 183 + .../pkg/apis/meta/v1/micro_time_proto.go | 72 + .../apimachinery/pkg/apis/meta/v1/register.go | 99 + .../apimachinery/pkg/apis/meta/v1/time.go | 185 + .../pkg/apis/meta/v1/time_proto.go | 92 + .../apimachinery/pkg/apis/meta/v1/types.go | 1130 + .../meta/v1/types_swagger_doc_generated.go | 384 + .../pkg/apis/meta/v1/unstructured/helpers.go | 454 + .../apis/meta/v1/unstructured/unstructured.go | 498 + .../meta/v1/unstructured/unstructured_list.go | 188 + .../v1/unstructured/zz_generated.deepcopy.go | 55 + .../apimachinery/pkg/apis/meta/v1/watch.go | 89 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1056 + .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 + .../pkg/apis/meta/v1beta1/conversion.go | 27 + .../pkg/apis/meta/v1beta1/deepcopy.go | 44 + .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 23 + .../pkg/apis/meta/v1beta1/generated.pb.go | 613 + .../pkg/apis/meta/v1beta1/generated.proto | 57 + .../pkg/apis/meta/v1beta1/register.go | 57 + .../pkg/apis/meta/v1beta1/types.go | 161 + .../v1beta1/types_swagger_doc_generated.go | 104 + .../meta/v1beta1/zz_generated.deepcopy.go | 189 + .../meta/v1beta1/zz_generated.defaults.go | 32 + .../apimachinery/pkg/conversion/converter.go | 898 + .../apimachinery/pkg/conversion/deep_equal.go | 36 + .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 + .../apimachinery/pkg/conversion/helper.go | 39 + .../pkg/conversion/queryparams/convert.go | 198 + .../pkg/conversion/queryparams/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/fields.go | 62 + .../apimachinery/pkg/fields/requirements.go | 30 + .../apimachinery/pkg/fields/selector.go | 476 + .../k8s.io/apimachinery/pkg/labels/doc.go | 19 + .../k8s.io/apimachinery/pkg/labels/labels.go | 181 + .../apimachinery/pkg/labels/selector.go | 891 + .../pkg/labels/zz_generated.deepcopy.go | 42 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 332 + .../apimachinery/pkg/runtime/codec_check.go | 48 + .../apimachinery/pkg/runtime/conversion.go | 113 + .../apimachinery/pkg/runtime/converter.go | 805 + .../k8s.io/apimachinery/pkg/runtime/doc.go | 51 + .../apimachinery/pkg/runtime/embedded.go | 142 + .../k8s.io/apimachinery/pkg/runtime/error.go | 122 + .../apimachinery/pkg/runtime/extension.go | 51 + .../apimachinery/pkg/runtime/generated.pb.go | 753 + .../apimachinery/pkg/runtime/generated.proto | 127 + .../k8s.io/apimachinery/pkg/runtime/helper.go | 212 + .../apimachinery/pkg/runtime/interfaces.go | 252 + .../apimachinery/pkg/runtime/register.go | 61 + .../pkg/runtime/schema/generated.pb.go | 63 + .../pkg/runtime/schema/generated.proto | 26 + .../pkg/runtime/schema/group_version.go | 300 + .../pkg/runtime/schema/interfaces.go | 40 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 754 + .../pkg/runtime/scheme_builder.go | 48 + .../pkg/runtime/serializer/codec_factory.go | 237 + .../pkg/runtime/serializer/json/json.go | 303 + .../pkg/runtime/serializer/json/meta.go | 63 + .../runtime/serializer/negotiated_codec.go | 43 + .../pkg/runtime/serializer/protobuf/doc.go | 18 + .../runtime/serializer/protobuf/protobuf.go | 459 + .../runtime/serializer/protobuf_extension.go | 48 + .../serializer/recognizer/recognizer.go | 127 + .../runtime/serializer/streaming/streaming.go | 137 + .../serializer/versioning/versioning.go | 282 + .../pkg/runtime/swagger_doc_generator.go | 262 + .../k8s.io/apimachinery/pkg/runtime/types.go | 138 + .../apimachinery/pkg/runtime/types_proto.go | 69 + .../pkg/runtime/zz_generated.deepcopy.go | 108 + .../apimachinery/pkg/selection/operator.go | 33 + .../k8s.io/apimachinery/pkg/types/doc.go | 18 + .../apimachinery/pkg/types/namespacedname.go | 43 + .../k8s.io/apimachinery/pkg/types/nodename.go | 43 + .../k8s.io/apimachinery/pkg/types/patch.go | 29 + .../k8s.io/apimachinery/pkg/types/uid.go | 22 + .../apimachinery/pkg/util/cache/cache.go | 83 + .../pkg/util/cache/lruexpirecache.go | 102 + .../apimachinery/pkg/util/clock/clock.go | 348 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 313 + .../apimachinery/pkg/util/errors/doc.go | 18 + .../apimachinery/pkg/util/errors/errors.go | 229 + .../apimachinery/pkg/util/framer/framer.go | 167 + .../pkg/util/intstr/generated.pb.go | 362 + .../pkg/util/intstr/generated.proto | 43 + .../apimachinery/pkg/util/intstr/intstr.go | 184 + .../k8s.io/apimachinery/pkg/util/json/json.go | 119 + .../pkg/util/mergepatch/errors.go | 102 + .../apimachinery/pkg/util/mergepatch/util.go | 133 + .../pkg/util/naming/from_stack.go | 93 + .../k8s.io/apimachinery/pkg/util/net/http.go | 445 + .../apimachinery/pkg/util/net/interface.go | 416 + .../apimachinery/pkg/util/net/port_range.go | 149 + .../apimachinery/pkg/util/net/port_split.go | 77 + .../k8s.io/apimachinery/pkg/util/net/util.go | 56 + .../apimachinery/pkg/util/runtime/runtime.go | 173 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 + .../apimachinery/pkg/util/sets/empty.go | 23 + .../k8s.io/apimachinery/pkg/util/sets/int.go | 203 + .../apimachinery/pkg/util/sets/int64.go | 203 + .../apimachinery/pkg/util/sets/string.go | 203 + .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 + .../pkg/util/strategicpatch/patch.go | 2174 + .../pkg/util/strategicpatch/types.go | 193 + .../pkg/util/validation/field/errors.go | 259 + .../pkg/util/validation/field/path.go | 91 + .../pkg/util/validation/validation.go | 416 + .../k8s.io/apimachinery/pkg/util/wait/doc.go | 19 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 481 + .../apimachinery/pkg/util/yaml/decoder.go | 344 + .../k8s.io/apimachinery/pkg/version/doc.go | 20 + .../apimachinery/pkg/version/helpers.go | 88 + .../k8s.io/apimachinery/pkg/version/types.go | 37 + .../k8s.io/apimachinery/pkg/watch/doc.go | 19 + .../k8s.io/apimachinery/pkg/watch/filter.go | 105 + .../k8s.io/apimachinery/pkg/watch/mux.go | 260 + .../apimachinery/pkg/watch/streamwatcher.go | 119 + .../k8s.io/apimachinery/pkg/watch/watch.go | 317 + .../pkg/watch/zz_generated.deepcopy.go | 40 + .../third_party/forked/golang/json/fields.go | 513 + .../forked/golang/reflect/deep_equal.go | 388 + cli/vendor/k8s.io/client-go/LICENSE | 202 + cli/vendor/k8s.io/client-go/README.md | 195 + .../client-go/discovery/discovery_client.go | 508 + cli/vendor/k8s.io/client-go/discovery/doc.go | 19 + .../client-go/discovery/fake/discovery.go | 160 + .../k8s.io/client-go/discovery/helper.go | 125 + .../k8s.io/client-go/kubernetes/clientset.go | 580 + cli/vendor/k8s.io/client-go/kubernetes/doc.go | 20 + .../kubernetes/fake/clientset_generated.go | 322 + .../k8s.io/client-go/kubernetes/fake/doc.go | 20 + .../client-go/kubernetes/fake/register.go | 126 + .../k8s.io/client-go/kubernetes/import.go | 19 + .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 + .../client-go/kubernetes/scheme/register.go | 126 + .../v1beta1/admissionregistration_client.go | 95 + .../admissionregistration/v1beta1/doc.go | 20 + .../admissionregistration/v1beta1/fake/doc.go | 20 + .../fake/fake_admissionregistration_client.go | 44 + .../fake/fake_mutatingwebhookconfiguration.go | 120 + .../fake_validatingwebhookconfiguration.go | 120 + .../v1beta1/generated_expansion.go | 23 + .../v1beta1/mutatingwebhookconfiguration.go | 164 + .../v1beta1/validatingwebhookconfiguration.go | 164 + .../kubernetes/typed/apps/v1/apps_client.go | 110 + .../typed/apps/v1/controllerrevision.go | 174 + .../kubernetes/typed/apps/v1/daemonset.go | 191 + .../kubernetes/typed/apps/v1/deployment.go | 223 + .../client-go/kubernetes/typed/apps/v1/doc.go | 20 + .../kubernetes/typed/apps/v1/fake/doc.go | 20 + .../typed/apps/v1/fake/fake_apps_client.go | 56 + .../apps/v1/fake/fake_controllerrevision.go | 128 + .../typed/apps/v1/fake/fake_daemonset.go | 140 + .../typed/apps/v1/fake/fake_deployment.go | 163 + .../typed/apps/v1/fake/fake_replicaset.go | 163 + .../typed/apps/v1/fake/fake_statefulset.go | 163 + .../typed/apps/v1/generated_expansion.go | 29 + .../kubernetes/typed/apps/v1/replicaset.go | 223 + .../kubernetes/typed/apps/v1/statefulset.go | 223 + .../typed/apps/v1beta1/apps_client.go | 100 + .../typed/apps/v1beta1/controllerrevision.go | 174 + .../typed/apps/v1beta1/deployment.go | 191 + .../kubernetes/typed/apps/v1beta1/doc.go | 20 + .../kubernetes/typed/apps/v1beta1/fake/doc.go | 20 + .../apps/v1beta1/fake/fake_apps_client.go | 48 + .../v1beta1/fake/fake_controllerrevision.go | 128 + .../apps/v1beta1/fake/fake_deployment.go | 140 + .../apps/v1beta1/fake/fake_statefulset.go | 140 + .../typed/apps/v1beta1/generated_expansion.go | 25 + .../typed/apps/v1beta1/statefulset.go | 191 + .../typed/apps/v1beta2/apps_client.go | 110 + .../typed/apps/v1beta2/controllerrevision.go | 174 + .../typed/apps/v1beta2/daemonset.go | 191 + .../typed/apps/v1beta2/deployment.go | 191 + .../kubernetes/typed/apps/v1beta2/doc.go | 20 + .../kubernetes/typed/apps/v1beta2/fake/doc.go | 20 + .../apps/v1beta2/fake/fake_apps_client.go | 56 + .../v1beta2/fake/fake_controllerrevision.go | 128 + .../typed/apps/v1beta2/fake/fake_daemonset.go | 140 + .../apps/v1beta2/fake/fake_deployment.go | 140 + .../apps/v1beta2/fake/fake_replicaset.go | 140 + .../apps/v1beta2/fake/fake_statefulset.go | 162 + .../typed/apps/v1beta2/generated_expansion.go | 29 + .../typed/apps/v1beta2/replicaset.go | 191 + .../typed/apps/v1beta2/statefulset.go | 222 + .../v1alpha1/auditregistration_client.go | 90 + .../auditregistration/v1alpha1/auditsink.go | 164 + .../typed/auditregistration/v1alpha1/doc.go | 20 + .../auditregistration/v1alpha1/fake/doc.go | 20 + .../fake/fake_auditregistration_client.go | 40 + .../v1alpha1/fake/fake_auditsink.go | 120 + .../v1alpha1/generated_expansion.go | 21 + .../v1/authentication_client.go | 90 + .../kubernetes/typed/authentication/v1/doc.go | 20 + .../typed/authentication/v1/fake/doc.go | 20 + .../v1/fake/fake_authentication_client.go | 40 + .../v1/fake/fake_tokenreview.go | 24 + .../v1/fake/fake_tokenreview_expansion.go | 27 + .../authentication/v1/generated_expansion.go | 19 + .../typed/authentication/v1/tokenreview.go | 46 + .../v1/tokenreview_expansion.go | 35 + .../v1beta1/authentication_client.go | 90 + .../typed/authentication/v1beta1/doc.go | 20 + .../typed/authentication/v1beta1/fake/doc.go | 20 + .../fake/fake_authentication_client.go | 40 + .../v1beta1/fake/fake_tokenreview.go | 24 + .../fake/fake_tokenreview_expansion.go | 27 + .../v1beta1/generated_expansion.go | 19 + .../authentication/v1beta1/tokenreview.go | 46 + .../v1beta1/tokenreview_expansion.go | 35 + .../authorization/v1/authorization_client.go | 105 + .../kubernetes/typed/authorization/v1/doc.go | 20 + .../typed/authorization/v1/fake/doc.go | 20 + .../v1/fake/fake_authorization_client.go | 52 + .../v1/fake/fake_localsubjectaccessreview.go | 25 + ...fake_localsubjectaccessreview_expansion.go | 27 + .../v1/fake/fake_selfsubjectaccessreview.go | 24 + .../fake_selfsubjectaccessreview_expansion.go | 27 + .../v1/fake/fake_selfsubjectrulesreview.go | 24 + .../fake_selfsubjectrulesreview_expansion.go | 27 + .../v1/fake/fake_subjectaccessreview.go | 24 + .../fake_subjectaccessreview_expansion.go | 30 + .../authorization/v1/generated_expansion.go | 19 + .../v1/localsubjectaccessreview.go | 48 + .../v1/localsubjectaccessreview_expansion.go | 36 + .../v1/selfsubjectaccessreview.go | 46 + .../v1/selfsubjectaccessreview_expansion.go | 35 + .../v1/selfsubjectrulesreview.go | 46 + .../v1/selfsubjectrulesreview_expansion.go | 35 + .../authorization/v1/subjectaccessreview.go | 46 + .../v1/subjectaccessreview_expansion.go | 36 + .../v1beta1/authorization_client.go | 105 + .../typed/authorization/v1beta1/doc.go | 20 + .../typed/authorization/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_authorization_client.go | 52 + .../v1beta1/fake/fake_generated_expansion.go | 17 + .../fake/fake_localsubjectaccessreview.go | 25 + ...fake_localsubjectaccessreview_expansion.go | 27 + .../fake/fake_selfsubjectaccessreview.go | 24 + .../fake_selfsubjectaccessreview_expansion.go | 27 + .../fake/fake_selfsubjectrulesreview.go | 24 + .../fake_selfsubjectrulesreview_expansion.go | 27 + .../v1beta1/fake/fake_subjectaccessreview.go | 24 + .../fake_subjectaccessreview_expansion.go | 27 + .../v1beta1/generated_expansion.go | 19 + .../v1beta1/localsubjectaccessreview.go | 48 + .../localsubjectaccessreview_expansion.go | 36 + .../v1beta1/selfsubjectaccessreview.go | 46 + .../selfsubjectaccessreview_expansion.go | 35 + .../v1beta1/selfsubjectrulesreview.go | 46 + .../selfsubjectrulesreview_expansion.go | 35 + .../v1beta1/subjectaccessreview.go | 46 + .../v1beta1/subjectaccessreview_expansion.go | 36 + .../autoscaling/v1/autoscaling_client.go | 90 + .../kubernetes/typed/autoscaling/v1/doc.go | 20 + .../typed/autoscaling/v1/fake/doc.go | 20 + .../v1/fake/fake_autoscaling_client.go | 40 + .../v1/fake/fake_horizontalpodautoscaler.go | 140 + .../autoscaling/v1/generated_expansion.go | 21 + .../autoscaling/v1/horizontalpodautoscaler.go | 191 + .../autoscaling/v2beta1/autoscaling_client.go | 90 + .../typed/autoscaling/v2beta1/doc.go | 20 + .../typed/autoscaling/v2beta1/fake/doc.go | 20 + .../v2beta1/fake/fake_autoscaling_client.go | 40 + .../fake/fake_horizontalpodautoscaler.go | 140 + .../v2beta1/generated_expansion.go | 21 + .../v2beta1/horizontalpodautoscaler.go | 191 + .../autoscaling/v2beta2/autoscaling_client.go | 90 + .../typed/autoscaling/v2beta2/doc.go | 20 + .../typed/autoscaling/v2beta2/fake/doc.go | 20 + .../v2beta2/fake/fake_autoscaling_client.go | 40 + .../fake/fake_horizontalpodautoscaler.go | 140 + .../v2beta2/generated_expansion.go | 21 + .../v2beta2/horizontalpodautoscaler.go | 191 + .../kubernetes/typed/batch/v1/batch_client.go | 90 + .../kubernetes/typed/batch/v1/doc.go | 20 + .../kubernetes/typed/batch/v1/fake/doc.go | 20 + .../typed/batch/v1/fake/fake_batch_client.go | 40 + .../typed/batch/v1/fake/fake_job.go | 140 + .../typed/batch/v1/generated_expansion.go | 21 + .../kubernetes/typed/batch/v1/job.go | 191 + .../typed/batch/v1beta1/batch_client.go | 90 + .../kubernetes/typed/batch/v1beta1/cronjob.go | 191 + .../kubernetes/typed/batch/v1beta1/doc.go | 20 + .../typed/batch/v1beta1/fake/doc.go | 20 + .../batch/v1beta1/fake/fake_batch_client.go | 40 + .../typed/batch/v1beta1/fake/fake_cronjob.go | 140 + .../batch/v1beta1/generated_expansion.go | 21 + .../typed/batch/v2alpha1/batch_client.go | 90 + .../typed/batch/v2alpha1/cronjob.go | 191 + .../kubernetes/typed/batch/v2alpha1/doc.go | 20 + .../typed/batch/v2alpha1/fake/doc.go | 20 + .../batch/v2alpha1/fake/fake_batch_client.go | 40 + .../typed/batch/v2alpha1/fake/fake_cronjob.go | 140 + .../batch/v2alpha1/generated_expansion.go | 21 + .../v1beta1/certificates_client.go | 90 + .../v1beta1/certificatesigningrequest.go | 180 + .../certificatesigningrequest_expansion.go | 37 + .../typed/certificates/v1beta1/doc.go | 20 + .../typed/certificates/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_certificates_client.go | 40 + .../fake/fake_certificatesigningrequest.go | 131 + ...ake_certificatesigningrequest_expansion.go | 31 + .../v1beta1/generated_expansion.go | 19 + .../coordination/v1/coordination_client.go | 90 + .../kubernetes/typed/coordination/v1/doc.go | 20 + .../typed/coordination/v1/fake/doc.go | 20 + .../v1/fake/fake_coordination_client.go | 40 + .../typed/coordination/v1/fake/fake_lease.go | 128 + .../coordination/v1/generated_expansion.go | 21 + .../kubernetes/typed/coordination/v1/lease.go | 174 + .../v1beta1/coordination_client.go | 90 + .../typed/coordination/v1beta1/doc.go | 20 + .../typed/coordination/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_coordination_client.go | 40 + .../coordination/v1beta1/fake/fake_lease.go | 128 + .../v1beta1/generated_expansion.go | 21 + .../typed/coordination/v1beta1/lease.go | 174 + .../typed/core/v1/componentstatus.go | 164 + .../kubernetes/typed/core/v1/configmap.go | 174 + .../kubernetes/typed/core/v1/core_client.go | 165 + .../client-go/kubernetes/typed/core/v1/doc.go | 20 + .../kubernetes/typed/core/v1/endpoints.go | 174 + .../kubernetes/typed/core/v1/event.go | 174 + .../typed/core/v1/event_expansion.go | 164 + .../kubernetes/typed/core/v1/fake/doc.go | 20 + .../core/v1/fake/fake_componentstatus.go | 120 + .../typed/core/v1/fake/fake_configmap.go | 128 + .../typed/core/v1/fake/fake_core_client.go | 100 + .../typed/core/v1/fake/fake_endpoints.go | 128 + .../typed/core/v1/fake/fake_event.go | 128 + .../core/v1/fake/fake_event_expansion.go | 93 + .../typed/core/v1/fake/fake_limitrange.go | 128 + .../typed/core/v1/fake/fake_namespace.go | 123 + .../core/v1/fake/fake_namespace_expansion.go | 37 + .../typed/core/v1/fake/fake_node.go | 131 + .../typed/core/v1/fake/fake_node_expansion.go | 36 + .../core/v1/fake/fake_persistentvolume.go | 131 + .../v1/fake/fake_persistentvolumeclaim.go | 140 + .../kubernetes/typed/core/v1/fake/fake_pod.go | 140 + .../typed/core/v1/fake/fake_pod_expansion.go | 70 + .../typed/core/v1/fake/fake_podtemplate.go | 128 + .../v1/fake/fake_replicationcontroller.go | 163 + .../typed/core/v1/fake/fake_resourcequota.go | 140 + .../typed/core/v1/fake/fake_secret.go | 128 + .../typed/core/v1/fake/fake_service.go | 132 + .../core/v1/fake/fake_service_expansion.go | 26 + .../typed/core/v1/fake/fake_serviceaccount.go | 128 + .../v1/fake/fake_serviceaccount_expansion.go | 31 + .../typed/core/v1/generated_expansion.go | 39 + .../kubernetes/typed/core/v1/limitrange.go | 174 + .../kubernetes/typed/core/v1/namespace.go | 164 + .../typed/core/v1/namespace_expansion.go | 31 + .../kubernetes/typed/core/v1/node.go | 180 + .../typed/core/v1/node_expansion.go | 43 + .../typed/core/v1/persistentvolume.go | 180 + .../typed/core/v1/persistentvolumeclaim.go | 191 + .../client-go/kubernetes/typed/core/v1/pod.go | 191 + .../kubernetes/typed/core/v1/pod_expansion.go | 45 + .../kubernetes/typed/core/v1/podtemplate.go | 174 + .../typed/core/v1/replicationcontroller.go | 223 + .../kubernetes/typed/core/v1/resourcequota.go | 191 + .../kubernetes/typed/core/v1/secret.go | 174 + .../kubernetes/typed/core/v1/service.go | 174 + .../typed/core/v1/service_expansion.go | 41 + .../typed/core/v1/serviceaccount.go | 174 + .../typed/core/v1/serviceaccount_expansion.go | 41 + .../kubernetes/typed/events/v1beta1/doc.go | 20 + .../kubernetes/typed/events/v1beta1/event.go | 174 + .../typed/events/v1beta1/events_client.go | 90 + .../typed/events/v1beta1/fake/doc.go | 20 + .../typed/events/v1beta1/fake/fake_event.go | 128 + .../events/v1beta1/fake/fake_events_client.go | 40 + .../events/v1beta1/generated_expansion.go | 21 + .../typed/extensions/v1beta1/daemonset.go | 191 + .../typed/extensions/v1beta1/deployment.go | 222 + .../v1beta1/deployment_expansion.go | 29 + .../typed/extensions/v1beta1/doc.go | 20 + .../extensions/v1beta1/extensions_client.go | 110 + .../typed/extensions/v1beta1/fake/doc.go | 20 + .../extensions/v1beta1/fake/fake_daemonset.go | 140 + .../v1beta1/fake/fake_deployment.go | 162 + .../v1beta1/fake/fake_deployment_expansion.go | 33 + .../v1beta1/fake/fake_extensions_client.go | 56 + .../extensions/v1beta1/fake/fake_ingress.go | 140 + .../v1beta1/fake/fake_podsecuritypolicy.go | 120 + .../v1beta1/fake/fake_replicaset.go | 162 + .../extensions/v1beta1/generated_expansion.go | 27 + .../typed/extensions/v1beta1/ingress.go | 191 + .../extensions/v1beta1/podsecuritypolicy.go | 164 + .../typed/extensions/v1beta1/replicaset.go | 222 + .../kubernetes/typed/networking/v1/doc.go | 20 + .../typed/networking/v1/fake/doc.go | 20 + .../v1/fake/fake_networking_client.go | 40 + .../networking/v1/fake/fake_networkpolicy.go | 128 + .../networking/v1/generated_expansion.go | 21 + .../typed/networking/v1/networking_client.go | 90 + .../typed/networking/v1/networkpolicy.go | 174 + .../typed/networking/v1beta1/doc.go | 20 + .../typed/networking/v1beta1/fake/doc.go | 20 + .../networking/v1beta1/fake/fake_ingress.go | 140 + .../v1beta1/fake/fake_networking_client.go | 40 + .../networking/v1beta1/generated_expansion.go | 21 + .../typed/networking/v1beta1/ingress.go | 191 + .../networking/v1beta1/networking_client.go | 90 + .../kubernetes/typed/node/v1alpha1/doc.go | 20 + .../typed/node/v1alpha1/fake/doc.go | 20 + .../node/v1alpha1/fake/fake_node_client.go | 40 + .../node/v1alpha1/fake/fake_runtimeclass.go | 120 + .../node/v1alpha1/generated_expansion.go | 21 + .../typed/node/v1alpha1/node_client.go | 90 + .../typed/node/v1alpha1/runtimeclass.go | 164 + .../kubernetes/typed/node/v1beta1/doc.go | 20 + .../kubernetes/typed/node/v1beta1/fake/doc.go | 20 + .../node/v1beta1/fake/fake_node_client.go | 40 + .../node/v1beta1/fake/fake_runtimeclass.go | 120 + .../typed/node/v1beta1/generated_expansion.go | 21 + .../typed/node/v1beta1/node_client.go | 90 + .../typed/node/v1beta1/runtimeclass.go | 164 + .../kubernetes/typed/policy/v1beta1/doc.go | 20 + .../typed/policy/v1beta1/eviction.go | 48 + .../policy/v1beta1/eviction_expansion.go | 38 + .../typed/policy/v1beta1/fake/doc.go | 20 + .../policy/v1beta1/fake/fake_eviction.go | 25 + .../v1beta1/fake/fake_eviction_expansion.go | 35 + .../v1beta1/fake/fake_poddisruptionbudget.go | 140 + .../v1beta1/fake/fake_podsecuritypolicy.go | 120 + .../policy/v1beta1/fake/fake_policy_client.go | 48 + .../policy/v1beta1/generated_expansion.go | 23 + .../policy/v1beta1/poddisruptionbudget.go | 191 + .../typed/policy/v1beta1/podsecuritypolicy.go | 164 + .../typed/policy/v1beta1/policy_client.go | 100 + .../kubernetes/typed/rbac/v1/clusterrole.go | 164 + .../typed/rbac/v1/clusterrolebinding.go | 164 + .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 + .../kubernetes/typed/rbac/v1/fake/doc.go | 20 + .../typed/rbac/v1/fake/fake_clusterrole.go | 120 + .../rbac/v1/fake/fake_clusterrolebinding.go | 120 + .../typed/rbac/v1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1/fake/fake_role.go | 128 + .../typed/rbac/v1/fake/fake_rolebinding.go | 128 + .../typed/rbac/v1/generated_expansion.go | 27 + .../kubernetes/typed/rbac/v1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1/role.go | 174 + .../kubernetes/typed/rbac/v1/rolebinding.go | 174 + .../typed/rbac/v1alpha1/clusterrole.go | 164 + .../typed/rbac/v1alpha1/clusterrolebinding.go | 164 + .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 + .../typed/rbac/v1alpha1/fake/doc.go | 20 + .../rbac/v1alpha1/fake/fake_clusterrole.go | 120 + .../v1alpha1/fake/fake_clusterrolebinding.go | 120 + .../rbac/v1alpha1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1alpha1/fake/fake_role.go | 128 + .../rbac/v1alpha1/fake/fake_rolebinding.go | 128 + .../rbac/v1alpha1/generated_expansion.go | 27 + .../typed/rbac/v1alpha1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1alpha1/role.go | 174 + .../typed/rbac/v1alpha1/rolebinding.go | 174 + .../typed/rbac/v1beta1/clusterrole.go | 164 + .../typed/rbac/v1beta1/clusterrolebinding.go | 164 + .../kubernetes/typed/rbac/v1beta1/doc.go | 20 + .../kubernetes/typed/rbac/v1beta1/fake/doc.go | 20 + .../rbac/v1beta1/fake/fake_clusterrole.go | 120 + .../v1beta1/fake/fake_clusterrolebinding.go | 120 + .../rbac/v1beta1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1beta1/fake/fake_role.go | 128 + .../rbac/v1beta1/fake/fake_rolebinding.go | 128 + .../typed/rbac/v1beta1/generated_expansion.go | 27 + .../typed/rbac/v1beta1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1beta1/role.go | 174 + .../typed/rbac/v1beta1/rolebinding.go | 174 + .../kubernetes/typed/scheduling/v1/doc.go | 20 + .../typed/scheduling/v1/fake/doc.go | 20 + .../scheduling/v1/fake/fake_priorityclass.go | 120 + .../v1/fake/fake_scheduling_client.go | 40 + .../scheduling/v1/generated_expansion.go | 21 + .../typed/scheduling/v1/priorityclass.go | 164 + .../typed/scheduling/v1/scheduling_client.go | 90 + .../typed/scheduling/v1alpha1/doc.go | 20 + .../typed/scheduling/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_priorityclass.go | 120 + .../v1alpha1/fake/fake_scheduling_client.go | 40 + .../v1alpha1/generated_expansion.go | 21 + .../scheduling/v1alpha1/priorityclass.go | 164 + .../scheduling/v1alpha1/scheduling_client.go | 90 + .../typed/scheduling/v1beta1/doc.go | 20 + .../typed/scheduling/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_priorityclass.go | 120 + .../v1beta1/fake/fake_scheduling_client.go | 40 + .../scheduling/v1beta1/generated_expansion.go | 21 + .../typed/scheduling/v1beta1/priorityclass.go | 164 + .../scheduling/v1beta1/scheduling_client.go | 90 + .../kubernetes/typed/settings/v1alpha1/doc.go | 20 + .../typed/settings/v1alpha1/fake/doc.go | 20 + .../settings/v1alpha1/fake/fake_podpreset.go | 128 + .../v1alpha1/fake/fake_settings_client.go | 40 + .../settings/v1alpha1/generated_expansion.go | 21 + .../typed/settings/v1alpha1/podpreset.go | 174 + .../settings/v1alpha1/settings_client.go | 90 + .../kubernetes/typed/storage/v1/doc.go | 20 + .../kubernetes/typed/storage/v1/fake/doc.go | 20 + .../storage/v1/fake/fake_storage_client.go | 44 + .../storage/v1/fake/fake_storageclass.go | 120 + .../storage/v1/fake/fake_volumeattachment.go | 131 + .../typed/storage/v1/generated_expansion.go | 23 + .../typed/storage/v1/storage_client.go | 95 + .../typed/storage/v1/storageclass.go | 164 + .../typed/storage/v1/volumeattachment.go | 180 + .../kubernetes/typed/storage/v1alpha1/doc.go | 20 + .../typed/storage/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_storage_client.go | 40 + .../v1alpha1/fake/fake_volumeattachment.go | 131 + .../storage/v1alpha1/generated_expansion.go | 21 + .../typed/storage/v1alpha1/storage_client.go | 90 + .../storage/v1alpha1/volumeattachment.go | 180 + .../typed/storage/v1beta1/csidriver.go | 164 + .../typed/storage/v1beta1/csinode.go | 164 + .../kubernetes/typed/storage/v1beta1/doc.go | 20 + .../typed/storage/v1beta1/fake/doc.go | 20 + .../storage/v1beta1/fake/fake_csidriver.go | 120 + .../storage/v1beta1/fake/fake_csinode.go | 120 + .../v1beta1/fake/fake_storage_client.go | 52 + .../storage/v1beta1/fake/fake_storageclass.go | 120 + .../v1beta1/fake/fake_volumeattachment.go | 131 + .../storage/v1beta1/generated_expansion.go | 27 + .../typed/storage/v1beta1/storage_client.go | 105 + .../typed/storage/v1beta1/storageclass.go | 164 + .../typed/storage/v1beta1/volumeattachment.go | 180 + .../pkg/apis/clientauthentication/doc.go | 20 + .../pkg/apis/clientauthentication/register.go | 50 + .../pkg/apis/clientauthentication/types.go | 77 + .../apis/clientauthentication/v1alpha1/doc.go | 24 + .../clientauthentication/v1alpha1/register.go | 55 + .../clientauthentication/v1alpha1/types.go | 78 + .../v1alpha1/zz_generated.conversion.go | 176 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../v1alpha1/zz_generated.defaults.go | 32 + .../v1beta1/conversion.go | 26 + .../apis/clientauthentication/v1beta1/doc.go | 24 + .../clientauthentication/v1beta1/register.go | 55 + .../clientauthentication/v1beta1/types.go | 59 + .../v1beta1/zz_generated.conversion.go | 142 + .../v1beta1/zz_generated.deepcopy.go | 92 + .../v1beta1/zz_generated.defaults.go | 32 + .../zz_generated.deepcopy.go | 128 + .../k8s.io/client-go/pkg/version/base.go | 63 + .../k8s.io/client-go/pkg/version/doc.go | 21 + .../k8s.io/client-go/pkg/version/version.go | 42 + .../plugin/pkg/client/auth/exec/exec.go | 360 + .../plugin/pkg/client/auth/gcp/gcp.go | 383 + cli/vendor/k8s.io/client-go/rest/client.go | 258 + cli/vendor/k8s.io/client-go/rest/config.go | 551 + cli/vendor/k8s.io/client-go/rest/plugin.go | 73 + cli/vendor/k8s.io/client-go/rest/request.go | 1201 + cli/vendor/k8s.io/client-go/rest/transport.go | 117 + cli/vendor/k8s.io/client-go/rest/url_utils.go | 97 + .../k8s.io/client-go/rest/urlbackoff.go | 107 + .../k8s.io/client-go/rest/watch/decoder.go | 72 + .../k8s.io/client-go/rest/watch/encoder.go | 56 + .../client-go/rest/zz_generated.deepcopy.go | 52 + .../k8s.io/client-go/testing/actions.go | 671 + cli/vendor/k8s.io/client-go/testing/fake.go | 216 + .../k8s.io/client-go/testing/fixture.go | 557 + .../forked/golang/template/exec.go | 94 + .../forked/golang/template/funcs.go | 599 + .../k8s.io/client-go/tools/auth/clientauth.go | 125 + .../client-go/tools/cache/controller.go | 380 + .../client-go/tools/cache/delta_fifo.go | 655 + .../k8s.io/client-go/tools/cache/doc.go | 24 + .../client-go/tools/cache/expiration_cache.go | 208 + .../tools/cache/expiration_cache_fakes.go | 54 + .../tools/cache/fake_custom_store.go | 102 + .../k8s.io/client-go/tools/cache/fifo.go | 358 + .../k8s.io/client-go/tools/cache/heap.go | 323 + .../k8s.io/client-go/tools/cache/index.go | 87 + .../k8s.io/client-go/tools/cache/listers.go | 180 + .../k8s.io/client-go/tools/cache/listwatch.go | 114 + .../client-go/tools/cache/mutation_cache.go | 261 + .../tools/cache/mutation_detector.go | 130 + .../k8s.io/client-go/tools/cache/reflector.go | 386 + .../tools/cache/reflector_metrics.go | 119 + .../client-go/tools/cache/shared_informer.go | 597 + .../k8s.io/client-go/tools/cache/store.go | 244 + .../tools/cache/thread_safe_store.go | 311 + .../client-go/tools/cache/undelta_store.go | 83 + .../client-go/tools/clientcmd/api/doc.go | 19 + .../client-go/tools/clientcmd/api/helpers.go | 188 + .../tools/clientcmd/api/latest/latest.go | 61 + .../client-go/tools/clientcmd/api/register.go | 46 + .../client-go/tools/clientcmd/api/types.go | 262 + .../tools/clientcmd/api/v1/conversion.go | 244 + .../client-go/tools/clientcmd/api/v1/doc.go | 19 + .../tools/clientcmd/api/v1/register.go | 56 + .../client-go/tools/clientcmd/api/v1/types.go | 203 + .../clientcmd/api/v1/zz_generated.deepcopy.go | 348 + .../clientcmd/api/zz_generated.deepcopy.go | 324 + .../client-go/tools/clientcmd/auth_loaders.go | 111 + .../tools/clientcmd/client_config.go | 569 + .../client-go/tools/clientcmd/config.go | 490 + .../k8s.io/client-go/tools/clientcmd/doc.go | 37 + .../k8s.io/client-go/tools/clientcmd/flag.go | 49 + .../client-go/tools/clientcmd/helpers.go | 35 + .../client-go/tools/clientcmd/loader.go | 633 + .../tools/clientcmd/merged_client_builder.go | 173 + .../client-go/tools/clientcmd/overrides.go | 247 + .../client-go/tools/clientcmd/validation.go | 298 + .../k8s.io/client-go/tools/metrics/metrics.go | 61 + .../k8s.io/client-go/tools/pager/pager.go | 117 + .../k8s.io/client-go/tools/reference/ref.go | 126 + .../k8s.io/client-go/transport/cache.go | 117 + .../k8s.io/client-go/transport/config.go | 126 + .../client-go/transport/round_trippers.go | 564 + .../client-go/transport/token_source.go | 140 + .../k8s.io/client-go/transport/transport.go | 227 + cli/vendor/k8s.io/client-go/util/cert/cert.go | 206 + cli/vendor/k8s.io/client-go/util/cert/csr.go | 75 + cli/vendor/k8s.io/client-go/util/cert/io.go | 98 + cli/vendor/k8s.io/client-go/util/cert/pem.go | 61 + .../util/connrotation/connrotation.go | 105 + .../client-go/util/flowcontrol/backoff.go | 149 + .../client-go/util/flowcontrol/throttle.go | 143 + .../k8s.io/client-go/util/homedir/homedir.go | 47 + .../k8s.io/client-go/util/jsonpath/doc.go | 20 + .../client-go/util/jsonpath/jsonpath.go | 525 + .../k8s.io/client-go/util/jsonpath/node.go | 256 + .../k8s.io/client-go/util/jsonpath/parser.go | 526 + .../k8s.io/client-go/util/keyutil/key.go | 323 + .../k8s.io/client-go/util/retry/util.go | 79 + cli/vendor/k8s.io/klog/LICENSE | 191 + cli/vendor/k8s.io/klog/README.md | 77 + cli/vendor/k8s.io/klog/klog.go | 1241 + cli/vendor/k8s.io/klog/klog_file.go | 126 + cli/vendor/k8s.io/kube-openapi/LICENSE | 202 + cli/vendor/k8s.io/kube-openapi/README.md | 18 + .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 303 + .../kube-openapi/pkg/util/proto/openapi.go | 278 + cli/vendor/k8s.io/kubernetes/LICENSE | 202 + cli/vendor/k8s.io/kubernetes/README.md | 84 + cli/vendor/k8s.io/kubernetes/build/README.md | 130 + .../k8s.io/kubernetes/build/pause/orphan.c | 36 + .../k8s.io/kubernetes/build/pause/pause.c | 68 + .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 308 + .../google/protobuf/compiler/plugin.proto | 150 + .../protobuf/google/protobuf/descriptor.proto | 779 + cli/vendor/k8s.io/utils/LICENSE | 202 + cli/vendor/k8s.io/utils/README.md | 69 + .../k8s.io/utils/buffer/ring_growing.go | 72 + cli/vendor/k8s.io/utils/integer/integer.go | 73 + cli/vendor/k8s.io/utils/trace/trace.go | 96 + cli/vendor/sigs.k8s.io/yaml/LICENSE | 50 + cli/vendor/sigs.k8s.io/yaml/README.md | 121 + cli/vendor/sigs.k8s.io/yaml/fields.go | 502 + cli/vendor/sigs.k8s.io/yaml/yaml.go | 319 + cli/vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 + cli/vendor/vbom.ml/util/LICENSE | 17 + cli/vendor/vbom.ml/util/README.md | 5 + cli/vendor/vbom.ml/util/sortorder/README.md | 5 + cli/vendor/vbom.ml/util/sortorder/doc.go | 5 + cli/vendor/vbom.ml/util/sortorder/natsort.go | 76 + components.conf | 9 + engine/.DEREK.yml | 21 + engine/.dockerignore | 6 + engine/.mailmap | 539 + engine/AUTHORS | 2080 + engine/CHANGELOG.md | 3609 ++ engine/CONTRIBUTING.md | 458 + engine/Dockerfile | 291 + engine/Dockerfile.e2e | 84 + engine/Dockerfile.simple | 52 + engine/Dockerfile.windows | 266 + engine/Jenkinsfile | 879 + engine/LICENSE | 191 + engine/MAINTAINERS | 484 + engine/Makefile | 224 + engine/NOTICE | 19 + engine/README.md | 57 + engine/ROADMAP.md | 117 + engine/TESTING.md | 119 + engine/VENDORING.md | 46 + engine/api/README.md | 42 + engine/api/common.go | 11 + engine/api/common_unix.go | 6 + engine/api/common_windows.go | 8 + engine/api/server/backend/build/backend.go | 147 + engine/api/server/backend/build/tag.go | 77 + engine/api/server/httputils/decoder.go | 16 + .../api/server/httputils/errors_deprecated.go | 9 + engine/api/server/httputils/form.go | 76 + engine/api/server/httputils/form_test.go | 105 + engine/api/server/httputils/httputils.go | 124 + engine/api/server/httputils/httputils_test.go | 18 + .../server/httputils/httputils_write_json.go | 15 + .../api/server/httputils/write_log_stream.go | 84 + engine/api/server/middleware.go | 24 + engine/api/server/middleware/cors.go | 37 + engine/api/server/middleware/debug.go | 90 + engine/api/server/middleware/debug_test.go | 75 + engine/api/server/middleware/experimental.go | 28 + engine/api/server/middleware/middleware.go | 12 + engine/api/server/middleware/version.go | 65 + engine/api/server/middleware/version_test.go | 92 + engine/api/server/router/build/backend.go | 24 + engine/api/server/router/build/build.go | 53 + .../api/server/router/build/build_routes.go | 439 + .../api/server/router/checkpoint/backend.go | 10 + .../server/router/checkpoint/checkpoint.go | 36 + .../router/checkpoint/checkpoint_routes.go | 65 + engine/api/server/router/container/backend.go | 83 + .../api/server/router/container/container.go | 70 + .../router/container/container_routes.go | 695 + engine/api/server/router/container/copy.go | 145 + engine/api/server/router/container/exec.go | 156 + engine/api/server/router/container/inspect.go | 21 + engine/api/server/router/debug/debug.go | 53 + .../api/server/router/debug/debug_routes.go | 12 + .../api/server/router/distribution/backend.go | 15 + .../router/distribution/distribution.go | 31 + .../distribution/distribution_routes.go | 150 + engine/api/server/router/experimental.go | 68 + engine/api/server/router/grpc/backend.go | 8 + engine/api/server/router/grpc/grpc.go | 37 + engine/api/server/router/grpc/grpc_routes.go | 45 + engine/api/server/router/image/backend.go | 41 + engine/api/server/router/image/image.go | 44 + .../api/server/router/image/image_routes.go | 322 + engine/api/server/router/local.go | 71 + engine/api/server/router/network/backend.go | 32 + engine/api/server/router/network/filter.go | 1 + engine/api/server/router/network/network.go | 43 + .../server/router/network/network_routes.go | 414 + engine/api/server/router/plugin/backend.go | 27 + engine/api/server/router/plugin/plugin.go | 39 + .../api/server/router/plugin/plugin_routes.go | 315 + engine/api/server/router/router.go | 19 + engine/api/server/router/session/backend.go | 11 + engine/api/server/router/session/session.go | 29 + .../server/router/session/session_routes.go | 16 + engine/api/server/router/swarm/backend.go | 48 + engine/api/server/router/swarm/cluster.go | 63 + .../api/server/router/swarm/cluster_routes.go | 545 + engine/api/server/router/swarm/helpers.go | 98 + .../api/server/router/swarm/helpers_test.go | 87 + engine/api/server/router/system/backend.go | 28 + engine/api/server/router/system/system.go | 47 + .../api/server/router/system/system_routes.go | 252 + engine/api/server/router/volume/backend.go | 20 + engine/api/server/router/volume/volume.go | 36 + .../api/server/router/volume/volume_routes.go | 96 + engine/api/server/router_swapper.go | 30 + engine/api/server/server.go | 211 + engine/api/server/server_test.go | 45 + engine/api/swagger-gen.yaml | 12 + engine/api/swagger.yaml | 10415 +++ engine/api/templates/server/operation.gotmpl | 24 + engine/api/types/auth.go | 22 + engine/api/types/backend/backend.go | 128 + engine/api/types/backend/build.go | 45 + engine/api/types/blkiodev/blkio.go | 23 + engine/api/types/client.go | 415 + engine/api/types/configs.go | 64 + engine/api/types/container/config.go | 69 + .../api/types/container/container_changes.go | 21 + .../api/types/container/container_create.go | 21 + engine/api/types/container/container_top.go | 21 + .../api/types/container/container_update.go | 17 + engine/api/types/container/container_wait.go | 29 + engine/api/types/container/host_config.go | 424 + engine/api/types/container/hostconfig_unix.go | 41 + .../api/types/container/hostconfig_windows.go | 40 + engine/api/types/container/waitcondition.go | 22 + engine/api/types/error_response.go | 13 + engine/api/types/events/events.go | 52 + engine/api/types/filters/example_test.go | 24 + engine/api/types/filters/parse.go | 315 + engine/api/types/filters/parse_test.go | 386 + engine/api/types/graph_driver_data.go | 17 + engine/api/types/id_response.go | 13 + engine/api/types/image/image_history.go | 37 + .../api/types/image_delete_response_item.go | 15 + engine/api/types/image_summary.go | 49 + engine/api/types/mount/mount.go | 131 + engine/api/types/network/network.go | 127 + engine/api/types/plugin.go | 203 + engine/api/types/plugin_device.go | 25 + engine/api/types/plugin_env.go | 25 + engine/api/types/plugin_interface_type.go | 21 + engine/api/types/plugin_mount.go | 37 + engine/api/types/plugin_responses.go | 71 + .../api/types/plugins/logdriver/entry.pb.go | 716 + .../api/types/plugins/logdriver/entry.proto | 16 + engine/api/types/plugins/logdriver/gen.go | 3 + engine/api/types/plugins/logdriver/io.go | 87 + engine/api/types/port.go | 23 + engine/api/types/registry/authenticate.go | 21 + engine/api/types/registry/registry.go | 119 + engine/api/types/seccomp.go | 94 + engine/api/types/service_update_response.go | 12 + engine/api/types/stats.go | 181 + engine/api/types/strslice/strslice.go | 30 + engine/api/types/strslice/strslice_test.go | 86 + engine/api/types/swarm/common.go | 40 + engine/api/types/swarm/config.go | 40 + engine/api/types/swarm/container.go | 76 + engine/api/types/swarm/network.go | 121 + engine/api/types/swarm/node.go | 115 + engine/api/types/swarm/runtime.go | 27 + engine/api/types/swarm/runtime/gen.go | 3 + engine/api/types/swarm/runtime/plugin.pb.go | 712 + engine/api/types/swarm/runtime/plugin.proto | 20 + engine/api/types/swarm/secret.go | 36 + engine/api/types/swarm/service.go | 124 + engine/api/types/swarm/swarm.go | 227 + engine/api/types/swarm/task.go | 192 + engine/api/types/time/duration_convert.go | 12 + .../api/types/time/duration_convert_test.go | 26 + engine/api/types/time/timestamp.go | 129 + engine/api/types/time/timestamp_test.go | 93 + engine/api/types/types.go | 615 + engine/api/types/versions/README.md | 14 + engine/api/types/versions/compare.go | 62 + engine/api/types/versions/compare_test.go | 26 + engine/api/types/versions/v1p19/types.go | 35 + engine/api/types/versions/v1p20/types.go | 40 + engine/api/types/volume.go | 69 + engine/api/types/volume/volume_create.go | 29 + engine/api/types/volume/volume_list.go | 23 + .../adapters/containerimage/pull.go | 910 + .../adapters/localinlinecache/inlinecache.go | 163 + .../builder-next/adapters/snapshot/layer.go | 123 + .../adapters/snapshot/snapshot.go | 490 + engine/builder/builder-next/builder.go | 622 + engine/builder/builder-next/controller.go | 256 + engine/builder/builder-next/executor_unix.go | 133 + .../builder/builder-next/executor_windows.go | 29 + .../builder/builder-next/exporter/export.go | 179 + .../builder/builder-next/exporter/writer.go | 216 + .../builder-next/imagerefchecker/checker.go | 96 + engine/builder/builder-next/reqbodyhandler.go | 67 + engine/builder/builder-next/worker/gc.go | 51 + engine/builder/builder-next/worker/gc_unix.go | 17 + .../builder/builder-next/worker/gc_windows.go | 7 + engine/builder/builder-next/worker/worker.go | 466 + engine/builder/builder.go | 115 + engine/builder/dockerfile/buildargs.go | 172 + engine/builder/dockerfile/buildargs_test.go | 102 + engine/builder/dockerfile/builder.go | 437 + engine/builder/dockerfile/builder_unix.go | 7 + engine/builder/dockerfile/builder_windows.go | 8 + engine/builder/dockerfile/clientsession.go | 76 + engine/builder/dockerfile/containerbackend.go | 146 + engine/builder/dockerfile/copy.go | 597 + engine/builder/dockerfile/copy_test.go | 148 + engine/builder/dockerfile/copy_unix.go | 48 + engine/builder/dockerfile/copy_windows.go | 109 + engine/builder/dockerfile/dispatchers.go | 605 + engine/builder/dockerfile/dispatchers_test.go | 572 + engine/builder/dockerfile/dispatchers_unix.go | 36 + .../dockerfile/dispatchers_unix_test.go | 34 + .../builder/dockerfile/dispatchers_windows.go | 141 + .../dockerfile/dispatchers_windows_test.go | 46 + engine/builder/dockerfile/evaluator.go | 250 + engine/builder/dockerfile/evaluator_test.go | 140 + engine/builder/dockerfile/imagecontext.go | 122 + engine/builder/dockerfile/imageprobe.go | 63 + engine/builder/dockerfile/internals.go | 508 + engine/builder/dockerfile/internals_linux.go | 88 + .../dockerfile/internals_linux_test.go | 163 + engine/builder/dockerfile/internals_test.go | 178 + .../builder/dockerfile/internals_windows.go | 119 + .../dockerfile/internals_windows_test.go | 53 + engine/builder/dockerfile/metrics.go | 44 + engine/builder/dockerfile/mockbackend_test.go | 148 + engine/builder/dockerfile/utils_test.go | 50 + engine/builder/dockerignore/dockerignore.go | 64 + .../builder/dockerignore/dockerignore_test.go | 69 + engine/builder/fscache/fscache.go | 653 + engine/builder/fscache/fscache_test.go | 132 + engine/builder/fscache/naivedriver.go | 28 + engine/builder/remotecontext/archive.go | 125 + engine/builder/remotecontext/detect.go | 191 + engine/builder/remotecontext/detect_test.go | 123 + engine/builder/remotecontext/filehash.go | 45 + engine/builder/remotecontext/generate.go | 3 + engine/builder/remotecontext/git.go | 35 + engine/builder/remotecontext/git/gitutils.go | 209 + .../remotecontext/git/gitutils_test.go | 293 + engine/builder/remotecontext/lazycontext.go | 102 + engine/builder/remotecontext/mimetype.go | 27 + engine/builder/remotecontext/mimetype_test.go | 16 + engine/builder/remotecontext/remote.go | 127 + engine/builder/remotecontext/remote_test.go | 242 + engine/builder/remotecontext/tarsum.go | 157 + engine/builder/remotecontext/tarsum.pb.go | 525 + engine/builder/remotecontext/tarsum.proto | 7 + engine/builder/remotecontext/tarsum_test.go | 151 + engine/builder/remotecontext/utils_test.go | 55 + engine/cli/cobra.go | 131 + engine/cli/config/configdir.go | 27 + engine/cli/debug/debug.go | 26 + engine/cli/debug/debug_test.go | 43 + engine/cli/error.go | 33 + engine/cli/required.go | 27 + engine/client/README.md | 35 + engine/client/build_cancel.go | 16 + engine/client/build_prune.go | 45 + engine/client/checkpoint_create.go | 14 + engine/client/checkpoint_create_test.go | 77 + engine/client/checkpoint_delete.go | 20 + engine/client/checkpoint_delete_test.go | 58 + engine/client/checkpoint_list.go | 28 + engine/client/checkpoint_list_test.go | 72 + engine/client/client.go | 309 + engine/client/client_deprecated.go | 23 + engine/client/client_mock_test.go | 53 + engine/client/client_test.go | 374 + engine/client/client_unix.go | 9 + engine/client/client_windows.go | 7 + engine/client/config_create.go | 25 + engine/client/config_create_test.go | 74 + engine/client/config_inspect.go | 36 + engine/client/config_inspect_test.go | 107 + engine/client/config_list.go | 38 + engine/client/config_list_test.go | 111 + engine/client/config_remove.go | 13 + engine/client/config_remove_test.go | 64 + engine/client/config_update.go | 21 + engine/client/config_update_test.go | 65 + engine/client/container_attach.go | 57 + engine/client/container_commit.go | 55 + engine/client/container_commit_test.go | 100 + engine/client/container_copy.go | 103 + engine/client/container_copy_test.go | 285 + engine/client/container_create.go | 52 + engine/client/container_create_test.go | 125 + engine/client/container_diff.go | 23 + engine/client/container_diff_test.go | 64 + engine/client/container_exec.go | 54 + engine/client/container_exec_test.go | 166 + engine/client/container_export.go | 19 + engine/client/container_export_test.go | 54 + engine/client/container_inspect.go | 53 + engine/client/container_inspect_test.go | 142 + engine/client/container_kill.go | 16 + engine/client/container_kill_test.go | 50 + engine/client/container_list.go | 56 + engine/client/container_list_test.go | 100 + engine/client/container_logs.go | 80 + engine/client/container_logs_test.go | 166 + engine/client/container_pause.go | 10 + engine/client/container_pause_test.go | 45 + engine/client/container_prune.go | 36 + engine/client/container_prune_test.go | 125 + engine/client/container_remove.go | 27 + engine/client/container_remove_test.go | 66 + engine/client/container_rename.go | 15 + engine/client/container_rename_test.go | 50 + engine/client/container_resize.go | 29 + engine/client/container_resize_test.go | 89 + engine/client/container_restart.go | 22 + engine/client/container_restart_test.go | 52 + engine/client/container_start.go | 23 + engine/client/container_start_test.go | 61 + engine/client/container_stats.go | 26 + engine/client/container_stats_test.go | 74 + engine/client/container_stop.go | 26 + engine/client/container_stop_test.go | 52 + engine/client/container_top.go | 28 + engine/client/container_top_test.go | 78 + engine/client/container_unpause.go | 10 + engine/client/container_unpause_test.go | 45 + engine/client/container_update.go | 21 + engine/client/container_update_test.go | 62 + engine/client/container_wait.go | 83 + engine/client/container_wait_test.go | 77 + engine/client/disk_usage.go | 26 + engine/client/disk_usage_test.go | 59 + engine/client/distribution_inspect.go | 38 + engine/client/distribution_inspect_test.go | 32 + engine/client/errors.go | 138 + engine/client/events.go | 101 + engine/client/events_test.go | 168 + engine/client/hijack.go | 143 + engine/client/hijack_test.go | 103 + engine/client/image_build.go | 146 + engine/client/image_build_test.go | 236 + engine/client/image_create.go | 37 + engine/client/image_create_test.go | 79 + engine/client/image_history.go | 22 + engine/client/image_history_test.go | 64 + engine/client/image_import.go | 40 + engine/client/image_import_test.go | 85 + engine/client/image_inspect.go | 32 + engine/client/image_inspect_test.go | 88 + engine/client/image_list.go | 45 + engine/client/image_list_test.go | 163 + engine/client/image_load.go | 29 + engine/client/image_load_test.go | 99 + engine/client/image_prune.go | 36 + engine/client/image_prune_test.go | 120 + engine/client/image_pull.go | 64 + engine/client/image_pull_test.go | 202 + engine/client/image_push.go | 55 + engine/client/image_push_test.go | 183 + engine/client/image_remove.go | 31 + engine/client/image_remove_test.go | 105 + engine/client/image_save.go | 21 + engine/client/image_save_test.go | 61 + engine/client/image_search.go | 51 + engine/client/image_search_test.go | 168 + engine/client/image_tag.go | 37 + engine/client/image_tag_test.go | 147 + engine/client/info.go | 26 + engine/client/info_test.go | 80 + engine/client/interface.go | 199 + engine/client/interface_experimental.go | 18 + engine/client/interface_stable.go | 10 + engine/client/login.go | 25 + engine/client/network_connect.go | 19 + engine/client/network_connect_test.go | 114 + engine/client/network_create.go | 25 + engine/client/network_create_test.go | 76 + engine/client/network_disconnect.go | 15 + engine/client/network_disconnect_test.go | 68 + engine/client/network_inspect.go | 49 + engine/client/network_inspect_test.go | 118 + engine/client/network_list.go | 31 + engine/client/network_list_test.go | 112 + engine/client/network_prune.go | 36 + engine/client/network_prune_test.go | 117 + engine/client/network_remove.go | 10 + engine/client/network_remove_test.go | 51 + engine/client/node_inspect.go | 32 + engine/client/node_inspect_test.go | 82 + engine/client/node_list.go | 36 + engine/client/node_list_test.go | 98 + engine/client/node_remove.go | 20 + engine/client/node_remove_test.go | 72 + engine/client/node_update.go | 18 + engine/client/node_update_test.go | 52 + engine/client/options.go | 172 + engine/client/options_test.go | 16 + engine/client/ping.go | 66 + engine/client/ping_test.go | 129 + engine/client/plugin_create.go | 23 + engine/client/plugin_disable.go | 19 + engine/client/plugin_disable_test.go | 52 + engine/client/plugin_enable.go | 19 + engine/client/plugin_enable_test.go | 52 + engine/client/plugin_inspect.go | 31 + engine/client/plugin_inspect_test.go | 71 + engine/client/plugin_install.go | 113 + engine/client/plugin_list.go | 32 + engine/client/plugin_list_test.go | 111 + engine/client/plugin_push.go | 16 + engine/client/plugin_push_test.go | 55 + engine/client/plugin_remove.go | 20 + engine/client/plugin_remove_test.go | 52 + engine/client/plugin_set.go | 12 + engine/client/plugin_set_test.go | 51 + engine/client/plugin_upgrade.go | 39 + engine/client/request.go | 279 + engine/client/request_test.go | 110 + engine/client/secret_create.go | 25 + engine/client/secret_create_test.go | 74 + engine/client/secret_inspect.go | 36 + engine/client/secret_inspect_test.go | 96 + engine/client/secret_list.go | 38 + engine/client/secret_list_test.go | 111 + engine/client/secret_remove.go | 13 + engine/client/secret_remove_test.go | 64 + engine/client/secret_update.go | 21 + engine/client/secret_update_test.go | 65 + engine/client/service_create.go | 166 + engine/client/service_create_test.go | 215 + engine/client/service_inspect.go | 37 + engine/client/service_inspect_test.go | 83 + engine/client/service_list.go | 35 + engine/client/service_list_test.go | 98 + engine/client/service_logs.go | 52 + engine/client/service_logs_test.go | 135 + engine/client/service_remove.go | 10 + engine/client/service_remove_test.go | 57 + engine/client/service_update.go | 94 + engine/client/service_update_test.go | 80 + engine/client/swarm_get_unlock_key.go | 21 + engine/client/swarm_get_unlock_key_test.go | 59 + engine/client/swarm_init.go | 21 + engine/client/swarm_init_test.go | 57 + engine/client/swarm_inspect.go | 21 + engine/client/swarm_inspect_test.go | 60 + engine/client/swarm_join.go | 14 + engine/client/swarm_join_test.go | 54 + engine/client/swarm_leave.go | 17 + engine/client/swarm_leave_test.go | 70 + engine/client/swarm_unlock.go | 14 + engine/client/swarm_unlock_test.go | 52 + engine/client/swarm_update.go | 22 + engine/client/swarm_update_test.go | 52 + engine/client/task_inspect.go | 32 + engine/client/task_inspect_test.go | 71 + engine/client/task_list.go | 35 + engine/client/task_list_test.go | 98 + engine/client/task_logs.go | 51 + engine/client/testdata/ca.pem | 18 + engine/client/testdata/cert.pem | 18 + engine/client/testdata/key.pem | 27 + engine/client/transport.go | 17 + engine/client/utils.go | 34 + engine/client/version.go | 21 + engine/client/volume_create.go | 21 + engine/client/volume_create_test.go | 79 + engine/client/volume_inspect.go | 38 + engine/client/volume_inspect_test.go | 79 + engine/client/volume_list.go | 32 + engine/client/volume_list_test.go | 102 + engine/client/volume_prune.go | 36 + engine/client/volume_remove.go | 21 + engine/client/volume_remove_test.go | 51 + engine/cmd/dockerd/README.md | 3 + engine/cmd/dockerd/config.go | 107 + engine/cmd/dockerd/config_common_unix.go | 64 + engine/cmd/dockerd/config_unix.go | 68 + engine/cmd/dockerd/config_unix_test.go | 24 + engine/cmd/dockerd/config_windows.go | 35 + engine/cmd/dockerd/daemon.go | 720 + engine/cmd/dockerd/daemon_freebsd.go | 9 + engine/cmd/dockerd/daemon_linux.go | 13 + engine/cmd/dockerd/daemon_test.go | 204 + engine/cmd/dockerd/daemon_unix.go | 181 + engine/cmd/dockerd/daemon_unix_test.go | 99 + engine/cmd/dockerd/daemon_windows.go | 100 + engine/cmd/dockerd/docker.go | 100 + engine/cmd/dockerd/docker_unix.go | 18 + engine/cmd/dockerd/docker_windows.go | 54 + .../dockerd/hack/malformed_host_override.go | 121 + .../hack/malformed_host_override_test.go | 124 + engine/cmd/dockerd/metrics.go | 27 + engine/cmd/dockerd/options.go | 108 + engine/cmd/dockerd/options_test.go | 44 + engine/cmd/dockerd/service_unsupported.go | 10 + engine/cmd/dockerd/service_windows.go | 430 + engine/codecov.yml | 17 + engine/container/archive.go | 86 + engine/container/container.go | 736 + engine/container/container_unit_test.go | 126 + engine/container/container_unix.go | 473 + engine/container/container_windows.go | 207 + engine/container/env.go | 43 + engine/container/env_test.go | 24 + engine/container/health.go | 82 + engine/container/history.go | 30 + engine/container/memory_store.go | 95 + engine/container/memory_store_test.go | 106 + engine/container/monitor.go | 46 + engine/container/mounts_unix.go | 13 + engine/container/mounts_windows.go | 8 + engine/container/state.go | 409 + engine/container/state_test.go | 192 + engine/container/store.go | 28 + engine/container/stream/attach.go | 175 + engine/container/stream/streams.go | 169 + engine/container/view.go | 494 + engine/container/view_test.go | 186 + engine/contrib/README.md | 4 + engine/contrib/REVIEWERS | 1 + engine/contrib/apparmor/main.go | 56 + engine/contrib/apparmor/template.go | 268 + engine/contrib/check-config.sh | 363 + engine/contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 36 + .../desktop-integration/gparted/Dockerfile | 31 + engine/contrib/docker-device-tool/README.md | 14 + .../contrib/docker-device-tool/device_tool.go | 167 + .../docker-device-tool/device_tool_windows.go | 4 + .../contrib/docker-machine-install-bundle.sh | 111 + engine/contrib/dockerd-rootless.sh | 90 + engine/contrib/dockerize-disk.sh | 118 + engine/contrib/download-frozen-image-v1.sh | 108 + engine/contrib/download-frozen-image-v2.sh | 345 + engine/contrib/editorconfig | 13 + engine/contrib/gitdm/aliases | 148 + engine/contrib/gitdm/domain-map | 47 + engine/contrib/gitdm/generate_aliases.sh | 16 + engine/contrib/gitdm/gitdm.config | 17 + engine/contrib/httpserver/Dockerfile | 4 + engine/contrib/httpserver/server.go | 12 + engine/contrib/init/openrc/docker.confd | 29 + engine/contrib/init/openrc/docker.initd | 28 + engine/contrib/init/systemd/REVIEWERS | 3 + engine/contrib/init/systemd/docker.service | 34 + .../contrib/init/systemd/docker.service.rpm | 33 + engine/contrib/init/systemd/docker.socket | 12 + engine/contrib/init/sysvinit-debian/docker | 156 + .../init/sysvinit-debian/docker.default | 20 + engine/contrib/init/sysvinit-redhat/docker | 153 + .../init/sysvinit-redhat/docker.sysconfig | 7 + engine/contrib/init/upstart/REVIEWERS | 2 + engine/contrib/init/upstart/docker.conf | 72 + engine/contrib/mac-install-bundle.sh | 45 + engine/contrib/mkimage-alpine.sh | 90 + engine/contrib/mkimage-arch-pacman.conf | 92 + engine/contrib/mkimage-arch.sh | 126 + engine/contrib/mkimage-archarm-pacman.conf | 98 + engine/contrib/mkimage-crux.sh | 75 + engine/contrib/mkimage-pld.sh | 73 + engine/contrib/mkimage-yum.sh | 150 + engine/contrib/mkimage.sh | 120 + engine/contrib/mkimage/.febootstrap-minimize | 28 + engine/contrib/mkimage/busybox-static | 34 + engine/contrib/mkimage/debootstrap | 251 + engine/contrib/mkimage/mageia-urpmi | 61 + engine/contrib/mkimage/rinse | 25 + engine/contrib/nnp-test/Dockerfile | 9 + engine/contrib/nnp-test/nnp-test.c | 10 + engine/contrib/nuke-graph-directory.sh | 64 + engine/contrib/report-issue.sh | 105 + engine/contrib/syntax/nano/Dockerfile.nanorc | 26 + engine/contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 160 + .../textmate/Docker.tmbundle/info.plist | 16 + engine/contrib/syntax/textmate/README.md | 17 + engine/contrib/syntax/textmate/REVIEWERS | 1 + engine/contrib/syntax/vim/LICENSE | 22 + engine/contrib/syntax/vim/README.md | 30 + engine/contrib/syntax/vim/doc/dockerfile.txt | 18 + .../syntax/vim/ftdetect/dockerfile.vim | 1 + .../contrib/syntax/vim/syntax/dockerfile.vim | 31 + engine/contrib/syscall-test/Dockerfile | 15 + engine/contrib/syscall-test/acct.c | 16 + engine/contrib/syscall-test/exit32.s | 7 + engine/contrib/syscall-test/ns.c | 63 + engine/contrib/syscall-test/raw.c | 14 + engine/contrib/syscall-test/setgid.c | 11 + engine/contrib/syscall-test/setuid.c | 11 + engine/contrib/syscall-test/socket.c | 30 + engine/contrib/syscall-test/userns.c | 63 + engine/contrib/udev/80-docker.rules | 3 + engine/contrib/vagrant-docker/README.md | 50 + engine/daemon/apparmor_default.go | 36 + engine/daemon/apparmor_default_unsupported.go | 7 + engine/daemon/archive.go | 453 + engine/daemon/archive_tarcopyoptions.go | 15 + engine/daemon/archive_tarcopyoptions_unix.go | 27 + .../daemon/archive_tarcopyoptions_windows.go | 10 + engine/daemon/archive_unix.go | 31 + engine/daemon/archive_windows.go | 39 + engine/daemon/attach.go | 187 + engine/daemon/auth.go | 13 + engine/daemon/changes.go | 34 + engine/daemon/checkpoint.go | 130 + engine/daemon/cluster.go | 27 + engine/daemon/cluster/cluster.go | 456 + engine/daemon/cluster/configs.go | 120 + .../cluster/controllers/plugin/controller.go | 261 + .../controllers/plugin/controller_test.go | 390 + engine/daemon/cluster/convert/config.go | 78 + engine/daemon/cluster/convert/container.go | 466 + engine/daemon/cluster/convert/network.go | 240 + engine/daemon/cluster/convert/network_test.go | 34 + engine/daemon/cluster/convert/node.go | 94 + engine/daemon/cluster/convert/secret.go | 80 + engine/daemon/cluster/convert/service.go | 641 + engine/daemon/cluster/convert/service_test.go | 613 + engine/daemon/cluster/convert/swarm.go | 150 + engine/daemon/cluster/convert/task.go | 69 + engine/daemon/cluster/errors.go | 61 + engine/daemon/cluster/executor/backend.go | 77 + .../cluster/executor/container/adapter.go | 527 + .../executor/container/adapter_test.go | 139 + .../cluster/executor/container/attachment.go | 74 + .../cluster/executor/container/container.go | 678 + .../executor/container/container_test.go | 135 + .../cluster/executor/container/controller.go | 712 + .../cluster/executor/container/errors.go | 17 + .../cluster/executor/container/executor.go | 292 + .../cluster/executor/container/health_test.go | 100 + .../cluster/executor/container/validate.go | 45 + .../executor/container/validate_test.go | 142 + .../executor/container/validate_unix_test.go | 8 + .../container/validate_windows_test.go | 24 + engine/daemon/cluster/filters.go | 125 + engine/daemon/cluster/filters_test.go | 102 + engine/daemon/cluster/helpers.go | 246 + engine/daemon/cluster/listen_addr.go | 356 + engine/daemon/cluster/listen_addr_linux.go | 89 + engine/daemon/cluster/listen_addr_others.go | 9 + engine/daemon/cluster/networks.go | 334 + engine/daemon/cluster/noderunner.go | 405 + engine/daemon/cluster/nodes.go | 108 + engine/daemon/cluster/provider/network.go | 37 + engine/daemon/cluster/secrets.go | 121 + engine/daemon/cluster/services.go | 604 + engine/daemon/cluster/swarm.go | 600 + engine/daemon/cluster/tasks.go | 90 + engine/daemon/cluster/utils.go | 63 + engine/daemon/commit.go | 185 + engine/daemon/config/builder.go | 22 + engine/daemon/config/config.go | 603 + engine/daemon/config/config_common_unix.go | 77 + .../daemon/config/config_common_unix_test.go | 84 + engine/daemon/config/config_test.go | 530 + engine/daemon/config/config_unix.go | 95 + engine/daemon/config/config_unix_test.go | 134 + engine/daemon/config/config_windows.go | 62 + engine/daemon/config/config_windows_test.go | 60 + engine/daemon/config/opts.go | 22 + engine/daemon/configs.go | 21 + engine/daemon/configs_linux.go | 5 + engine/daemon/configs_unsupported.go | 7 + engine/daemon/configs_windows.go | 5 + engine/daemon/container.go | 405 + engine/daemon/container_linux.go | 30 + engine/daemon/container_operations.go | 1150 + engine/daemon/container_operations_unix.go | 415 + engine/daemon/container_operations_windows.go | 205 + engine/daemon/container_unix_test.go | 44 + engine/daemon/container_windows.go | 9 + engine/daemon/create.go | 345 + engine/daemon/create_test.go | 21 + engine/daemon/create_unix.go | 94 + engine/daemon/create_windows.go | 93 + engine/daemon/daemon.go | 1488 + engine/daemon/daemon_linux.go | 147 + engine/daemon/daemon_linux_test.go | 271 + engine/daemon/daemon_test.go | 319 + engine/daemon/daemon_unix.go | 1591 + engine/daemon/daemon_unix_test.go | 439 + engine/daemon/daemon_unsupported.go | 9 + engine/daemon/daemon_windows.go | 673 + engine/daemon/daemon_windows_test.go | 72 + engine/daemon/debugtrap_unix.go | 27 + engine/daemon/debugtrap_unsupported.go | 7 + engine/daemon/debugtrap_windows.go | 46 + engine/daemon/delete.go | 152 + engine/daemon/delete_test.go | 95 + engine/daemon/dependency.go | 17 + engine/daemon/devices_linux.go | 38 + engine/daemon/discovery/discovery.go | 202 + engine/daemon/discovery/discovery_test.go | 96 + engine/daemon/disk_usage.go | 50 + engine/daemon/errors.go | 167 + engine/daemon/events.go | 308 + engine/daemon/events/events.go | 165 + engine/daemon/events/events_test.go | 282 + engine/daemon/events/filter.go | 138 + engine/daemon/events/metrics.go | 15 + engine/daemon/events/testutils/testutils.go | 76 + engine/daemon/events_test.go | 90 + engine/daemon/exec.go | 336 + engine/daemon/exec/exec.go | 147 + engine/daemon/exec_linux.go | 61 + engine/daemon/exec_linux_test.go | 53 + engine/daemon/exec_windows.go | 14 + engine/daemon/export.go | 86 + engine/daemon/graphdriver/aufs/aufs.go | 684 + engine/daemon/graphdriver/aufs/aufs_test.go | 805 + engine/daemon/graphdriver/aufs/dirs.go | 64 + engine/daemon/graphdriver/aufs/mount.go | 41 + engine/daemon/graphdriver/btrfs/btrfs.go | 686 + engine/daemon/graphdriver/btrfs/btrfs_test.go | 65 + .../graphdriver/btrfs/dummy_unsupported.go | 3 + engine/daemon/graphdriver/btrfs/version.go | 26 + .../daemon/graphdriver/btrfs/version_none.go | 14 + .../daemon/graphdriver/btrfs/version_test.go | 13 + engine/daemon/graphdriver/copy/copy.go | 269 + engine/daemon/graphdriver/copy/copy_cgo.go | 22 + engine/daemon/graphdriver/copy/copy_nocgo.go | 13 + engine/daemon/graphdriver/copy/copy_test.go | 159 + engine/daemon/graphdriver/counter.go | 62 + engine/daemon/graphdriver/devmapper/README.md | 98 + .../graphdriver/devmapper/device_setup.go | 231 + .../daemon/graphdriver/devmapper/deviceset.go | 2824 + .../graphdriver/devmapper/devmapper_doc.go | 106 + .../graphdriver/devmapper/devmapper_test.go | 205 + engine/daemon/graphdriver/devmapper/driver.go | 253 + engine/daemon/graphdriver/devmapper/mount.go | 66 + engine/daemon/graphdriver/driver.go | 333 + engine/daemon/graphdriver/driver_freebsd.go | 21 + engine/daemon/graphdriver/driver_linux.go | 124 + engine/daemon/graphdriver/driver_test.go | 36 + .../daemon/graphdriver/driver_unsupported.go | 13 + engine/daemon/graphdriver/driver_windows.go | 12 + engine/daemon/graphdriver/errors.go | 36 + engine/daemon/graphdriver/fsdiff.go | 175 + .../graphdriver/graphtest/graphbench_unix.go | 257 + .../graphdriver/graphtest/graphtest_unix.go | 352 + .../graphtest/graphtest_windows.go | 1 + .../daemon/graphdriver/graphtest/testutil.go | 337 + .../graphdriver/graphtest/testutil_unix.go | 69 + engine/daemon/graphdriver/lcow/lcow.go | 1163 + engine/daemon/graphdriver/lcow/lcow_svm.go | 421 + engine/daemon/graphdriver/lcow/remotefs.go | 139 + .../daemon/graphdriver/lcow/remotefs_file.go | 211 + .../graphdriver/lcow/remotefs_filedriver.go | 123 + .../graphdriver/lcow/remotefs_pathdriver.go | 212 + engine/daemon/graphdriver/overlay/overlay.go | 524 + .../graphdriver/overlay/overlay_test.go | 93 + .../overlay/overlay_unsupported.go | 3 + engine/daemon/graphdriver/overlay2/check.go | 133 + engine/daemon/graphdriver/overlay2/mount.go | 89 + engine/daemon/graphdriver/overlay2/overlay.go | 786 + .../graphdriver/overlay2/overlay_test.go | 109 + .../overlay2/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/randomid.go | 80 + .../graphdriver/overlayutils/overlayutils.go | 25 + engine/daemon/graphdriver/plugin.go | 55 + engine/daemon/graphdriver/proxy.go | 264 + engine/daemon/graphdriver/quota/errors.go | 19 + .../daemon/graphdriver/quota/projectquota.go | 377 + .../graphdriver/quota/projectquota_test.go | 152 + .../quota/projectquota_unsupported.go | 18 + engine/daemon/graphdriver/quota/types.go | 19 + .../graphdriver/register/register_aufs.go | 8 + .../graphdriver/register/register_btrfs.go | 8 + .../register/register_devicemapper.go | 8 + .../graphdriver/register/register_overlay.go | 8 + .../graphdriver/register/register_overlay2.go | 8 + .../graphdriver/register/register_vfs.go | 6 + .../graphdriver/register/register_windows.go | 7 + .../graphdriver/register/register_zfs.go | 8 + engine/daemon/graphdriver/vfs/copy_linux.go | 7 + .../graphdriver/vfs/copy_unsupported.go | 9 + engine/daemon/graphdriver/vfs/driver.go | 202 + engine/daemon/graphdriver/vfs/quota_linux.go | 36 + .../graphdriver/vfs/quota_unsupported.go | 28 + engine/daemon/graphdriver/vfs/vfs_test.go | 41 + engine/daemon/graphdriver/windows/windows.go | 957 + engine/daemon/graphdriver/zfs/MAINTAINERS | 2 + engine/daemon/graphdriver/zfs/zfs.go | 432 + engine/daemon/graphdriver/zfs/zfs_freebsd.go | 38 + engine/daemon/graphdriver/zfs/zfs_linux.go | 28 + engine/daemon/graphdriver/zfs/zfs_test.go | 35 + .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 + engine/daemon/health.go | 381 + engine/daemon/health_test.go | 154 + engine/daemon/images/cache.go | 27 + engine/daemon/images/image.go | 64 + engine/daemon/images/image_builder.go | 225 + engine/daemon/images/image_commit.go | 127 + engine/daemon/images/image_delete.go | 414 + engine/daemon/images/image_events.go | 39 + engine/daemon/images/image_exporter.go | 25 + engine/daemon/images/image_history.go | 87 + engine/daemon/images/image_import.go | 138 + engine/daemon/images/image_inspect.go | 104 + engine/daemon/images/image_prune.go | 211 + engine/daemon/images/image_pull.go | 126 + engine/daemon/images/image_push.go | 66 + engine/daemon/images/image_search.go | 95 + engine/daemon/images/image_search_test.go | 357 + engine/daemon/images/image_tag.go | 41 + engine/daemon/images/image_unix.go | 45 + engine/daemon/images/image_windows.go | 41 + engine/daemon/images/images.go | 359 + engine/daemon/images/locals.go | 32 + engine/daemon/images/service.go | 249 + engine/daemon/info.go | 266 + engine/daemon/info_test.go | 53 + engine/daemon/info_unix.go | 253 + engine/daemon/info_unix_test.go | 103 + engine/daemon/info_windows.go | 20 + engine/daemon/initlayer/setup_unix.go | 73 + engine/daemon/initlayer/setup_windows.go | 16 + engine/daemon/inspect.go | 273 + engine/daemon/inspect_linux.go | 73 + engine/daemon/inspect_test.go | 33 + engine/daemon/inspect_windows.go | 26 + engine/daemon/keys.go | 59 + engine/daemon/keys_unsupported.go | 8 + engine/daemon/kill.go | 179 + engine/daemon/licensing.go | 10 + engine/daemon/licensing_test.go | 18 + engine/daemon/links.go | 91 + engine/daemon/links/links.go | 141 + engine/daemon/links/links_test.go | 213 + engine/daemon/list.go | 608 + engine/daemon/list_test.go | 145 + engine/daemon/list_unix.go | 11 + engine/daemon/list_windows.go | 20 + engine/daemon/listeners/group_unix.go | 24 + engine/daemon/listeners/listeners_linux.go | 107 + engine/daemon/listeners/listeners_windows.go | 54 + engine/daemon/logdrivers_linux.go | 16 + engine/daemon/logdrivers_windows.go | 15 + engine/daemon/logger/adapter.go | 136 + engine/daemon/logger/adapter_test.go | 216 + .../daemon/logger/awslogs/cloudwatchlogs.go | 865 + .../logger/awslogs/cloudwatchlogs_test.go | 1702 + .../logger/awslogs/cwlogsiface_mock_test.go | 119 + engine/daemon/logger/copier.go | 186 + engine/daemon/logger/copier_test.go | 484 + .../daemon/logger/etwlogs/etwlogs_windows.go | 168 + engine/daemon/logger/factory.go | 162 + engine/daemon/logger/fluentd/fluentd.go | 266 + engine/daemon/logger/gcplogs/gcplogging.go | 244 + .../daemon/logger/gcplogs/gcplogging_linux.go | 29 + .../logger/gcplogs/gcplogging_others.go | 7 + engine/daemon/logger/gelf/gelf.go | 268 + engine/daemon/logger/gelf/gelf_test.go | 260 + engine/daemon/logger/journald/journald.go | 123 + .../daemon/logger/journald/journald_test.go | 23 + .../logger/journald/journald_unsupported.go | 6 + engine/daemon/logger/journald/read.go | 387 + engine/daemon/logger/journald/read_native.go | 6 + .../logger/journald/read_native_compat.go | 6 + .../logger/journald/read_unsupported.go | 7 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 186 + .../logger/jsonfilelog/jsonfilelog_test.go | 319 + .../logger/jsonfilelog/jsonlog/jsonlog.go | 25 + .../jsonfilelog/jsonlog/jsonlogbytes.go | 125 + .../jsonfilelog/jsonlog/jsonlogbytes_test.go | 51 + .../jsonfilelog/jsonlog/time_marshalling.go | 20 + .../jsonlog/time_marshalling_test.go | 34 + engine/daemon/logger/jsonfilelog/read.go | 97 + engine/daemon/logger/jsonfilelog/read_test.go | 93 + engine/daemon/logger/local/config.go | 36 + engine/daemon/logger/local/doc.go | 9 + engine/daemon/logger/local/local.go | 218 + engine/daemon/logger/local/local_test.go | 220 + engine/daemon/logger/local/read.go | 174 + engine/daemon/logger/logentries/logentries.go | 115 + engine/daemon/logger/logger.go | 162 + engine/daemon/logger/logger_test.go | 21 + engine/daemon/logger/loggerutils/log_tag.go | 31 + .../daemon/logger/loggerutils/log_tag_test.go | 47 + engine/daemon/logger/loggerutils/logfile.go | 728 + .../daemon/logger/loggerutils/logfile_test.go | 203 + engine/daemon/logger/loginfo.go | 129 + engine/daemon/logger/metrics.go | 21 + engine/daemon/logger/plugin.go | 116 + engine/daemon/logger/plugin_unix.go | 23 + engine/daemon/logger/plugin_unsupported.go | 12 + engine/daemon/logger/proxy.go | 107 + engine/daemon/logger/ring.go | 223 + engine/daemon/logger/ring_test.go | 299 + engine/daemon/logger/splunk/splunk.go | 649 + engine/daemon/logger/splunk/splunk_test.go | 1394 + .../logger/splunk/splunkhecmock_test.go | 182 + engine/daemon/logger/syslog/syslog.go | 266 + engine/daemon/logger/syslog/syslog_test.go | 159 + engine/daemon/logger/templates/templates.go | 50 + .../daemon/logger/templates/templates_test.go | 19 + engine/daemon/logs.go | 209 + engine/daemon/logs_test.go | 15 + engine/daemon/metrics.go | 192 + engine/daemon/metrics_unix.go | 60 + engine/daemon/metrics_unsupported.go | 12 + engine/daemon/monitor.go | 217 + engine/daemon/mounts.go | 55 + engine/daemon/names.go | 113 + engine/daemon/names/names.go | 9 + engine/daemon/network.go | 1095 + engine/daemon/network/filter.go | 130 + engine/daemon/network/filter_test.go | 206 + engine/daemon/network/settings.go | 81 + engine/daemon/nvidia_linux.go | 105 + engine/daemon/oci_linux.go | 982 + engine/daemon/oci_linux_test.go | 150 + engine/daemon/oci_utils.go | 16 + engine/daemon/oci_windows.go | 521 + engine/daemon/oci_windows_test.go | 314 + engine/daemon/pause.go | 55 + engine/daemon/prune.go | 250 + engine/daemon/reload.go | 336 + engine/daemon/reload_test.go | 573 + engine/daemon/reload_unix.go | 56 + engine/daemon/reload_windows.go | 9 + engine/daemon/rename.go | 124 + engine/daemon/resize.go | 50 + engine/daemon/resize_test.go | 103 + engine/daemon/restart.go | 83 + engine/daemon/seccomp_disabled.go | 24 + engine/daemon/seccomp_linux.go | 61 + engine/daemon/seccomp_unsupported.go | 20 + engine/daemon/secrets.go | 23 + engine/daemon/secrets_linux.go | 5 + engine/daemon/secrets_unsupported.go | 7 + engine/daemon/secrets_windows.go | 5 + engine/daemon/selinux_linux.go | 15 + engine/daemon/selinux_unsupported.go | 13 + engine/daemon/start.go | 267 + engine/daemon/start_unix.go | 57 + engine/daemon/start_windows.go | 44 + engine/daemon/stats.go | 155 + engine/daemon/stats/collector.go | 163 + engine/daemon/stats/collector_unix.go | 75 + engine/daemon/stats/collector_windows.go | 17 + engine/daemon/stats_collector.go | 26 + engine/daemon/stats_unix.go | 57 + engine/daemon/stats_windows.go | 11 + engine/daemon/stop.go | 89 + engine/daemon/testdata/keyfile | 7 + engine/daemon/top_unix.go | 189 + engine/daemon/top_unix_test.go | 79 + engine/daemon/top_windows.go | 63 + engine/daemon/trustkey.go | 57 + engine/daemon/trustkey_test.go | 71 + engine/daemon/unpause.go | 44 + engine/daemon/update.go | 95 + engine/daemon/update_linux.go | 55 + engine/daemon/update_windows.go | 11 + engine/daemon/util_test.go | 72 + engine/daemon/volumes.go | 423 + engine/daemon/volumes_linux.go | 36 + engine/daemon/volumes_linux_test.go | 56 + engine/daemon/volumes_unit_test.go | 42 + engine/daemon/volumes_unix.go | 161 + engine/daemon/volumes_unix_test.go | 256 + engine/daemon/volumes_windows.go | 51 + engine/daemon/wait.go | 23 + engine/daemon/workdir.go | 20 + engine/distribution/config.go | 266 + engine/distribution/errors.go | 207 + engine/distribution/errors_test.go | 85 + .../fixtures/validate_manifest/bad_manifest | 38 + .../validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + engine/distribution/metadata/metadata.go | 75 + engine/distribution/metadata/v1_id_service.go | 51 + .../metadata/v1_id_service_test.go | 88 + .../metadata/v2_metadata_service.go | 241 + .../metadata/v2_metadata_service_test.go | 115 + engine/distribution/oci.go | 29 + engine/distribution/pull.go | 196 + engine/distribution/pull_v2.go | 989 + engine/distribution/pull_v2_test.go | 207 + engine/distribution/pull_v2_unix.go | 67 + engine/distribution/pull_v2_windows.go | 146 + engine/distribution/push.go | 180 + engine/distribution/push_v2.go | 712 + engine/distribution/push_v2_test.go | 740 + engine/distribution/registry.go | 158 + engine/distribution/registry_unit_test.go | 127 + engine/distribution/utils/progress.go | 44 + engine/distribution/xfer/download.go | 474 + engine/distribution/xfer/download_test.go | 362 + engine/distribution/xfer/transfer.go | 401 + engine/distribution/xfer/transfer_test.go | 410 + engine/distribution/xfer/upload.go | 174 + engine/distribution/xfer/upload_test.go | 134 + engine/dockerversion/useragent.go | 76 + engine/dockerversion/version_lib.go | 17 + engine/docs/api/v1.18.md | 2179 + engine/docs/api/v1.19.md | 2259 + engine/docs/api/v1.20.md | 2414 + engine/docs/api/v1.21.md | 3003 + engine/docs/api/v1.22.md | 3343 + engine/docs/api/v1.23.md | 3459 + engine/docs/api/v1.24.md | 5377 ++ engine/docs/api/version-history.md | 503 + engine/docs/contributing/README.md | 8 + .../docs/contributing/images/branch-sig.png | Bin 0 -> 56537 bytes .../contributing/images/contributor-edit.png | Bin 0 -> 17933 bytes engine/docs/contributing/images/copy_url.png | Bin 0 -> 69486 bytes .../docs/contributing/images/fork_docker.png | Bin 0 -> 52190 bytes engine/docs/contributing/images/git_bash.png | Bin 0 -> 26097 bytes .../docs/contributing/images/list_example.png | Bin 0 -> 51194 bytes engine/docs/contributing/set-up-dev-env.md | 372 + engine/docs/contributing/set-up-git.md | 280 + engine/docs/contributing/software-req-win.md | 177 + engine/docs/contributing/software-required.md | 94 + engine/docs/contributing/test.md | 244 + engine/docs/contributing/who-written-for.md | 49 + engine/docs/rootless.md | 104 + engine/docs/static_files/contributors.png | Bin 0 -> 23100 bytes .../docs/static_files/moby-project-logo.png | Bin 0 -> 20458 bytes engine/errdefs/defs.go | 69 + engine/errdefs/doc.go | 8 + engine/errdefs/helpers.go | 227 + engine/errdefs/helpers_test.go | 181 + engine/errdefs/http_helpers.go | 198 + engine/errdefs/http_helpers_test.go | 92 + engine/errdefs/is.go | 107 + engine/hack/README.md | 55 + engine/hack/ci/arm | 10 + engine/hack/ci/experimental | 9 + engine/hack/ci/janky | 14 + engine/hack/ci/master | 13 + engine/hack/ci/powerpc | 6 + engine/hack/ci/windows.ps1 | 1043 + engine/hack/ci/z | 6 + engine/hack/dind | 33 + .../dockerfile/install/containerd.installer | 36 + .../dockerfile/install/dockercli.installer | 30 + .../dockerfile/install/gometalinter.installer | 12 + .../dockerfile/install/gotestsum.installer | 11 + engine/hack/dockerfile/install/install.sh | 30 + .../hack/dockerfile/install/proxy.installer | 38 + .../dockerfile/install/rootlesskit.installer | 36 + engine/hack/dockerfile/install/runc.installer | 30 + engine/hack/dockerfile/install/tini.installer | 14 + .../hack/dockerfile/install/tomlv.installer | 12 + engine/hack/dockerfile/install/vndr.installer | 11 + engine/hack/generate-authors.sh | 15 + engine/hack/generate-swagger-api.sh | 27 + engine/hack/make.ps1 | 498 + engine/hack/make.sh | 176 + engine/hack/make/.binary | 98 + engine/hack/make/.binary-setup | 13 + engine/hack/make/.detect-daemon-osarch | 43 + engine/hack/make/.ensure-emptyfs | 23 + engine/hack/make/.go-autogen | 89 + engine/hack/make/.go-autogen.ps1 | 97 + engine/hack/make/.integration-daemon-setup | 7 + engine/hack/make/.integration-daemon-start | 126 + engine/hack/make/.integration-daemon-stop | 30 + engine/hack/make/.integration-test-helpers | 188 + engine/hack/make/.resources-windows/common.rc | 38 + .../.resources-windows/docker.exe.manifest | 18 + .../hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes .../hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes engine/hack/make/.resources-windows/docker.rc | 3 + .../hack/make/.resources-windows/dockerd.rc | 4 + .../make/.resources-windows/event_messages.mc | 39 + .../hack/make/.resources-windows/resources.go | 18 + engine/hack/make/README.md | 16 + engine/hack/make/binary | 10 + engine/hack/make/binary-daemon | 35 + .../hack/make/build-integration-test-binary | 7 + engine/hack/make/containerutility | 20 + engine/hack/make/cross | 37 + engine/hack/make/cross-platform-dependent | 6 + engine/hack/make/dynbinary | 9 + engine/hack/make/dynbinary-daemon | 11 + engine/hack/make/install-binary | 35 + engine/hack/make/run | 44 + engine/hack/make/test-docker-py | 65 + engine/hack/make/test-integration | 27 + engine/hack/make/test-integration-cli | 6 + engine/hack/make/test-integration-flaky | 35 + engine/hack/make/test-integration-shell | 9 + engine/hack/test/e2e-run.sh | 84 + engine/hack/test/unit | 28 + engine/hack/validate/.swagger-yamllint | 4 + engine/hack/validate/.validate | 33 + engine/hack/validate/all | 8 + .../hack/validate/changelog-date-descending | 12 + engine/hack/validate/changelog-well-formed | 25 + engine/hack/validate/dco | 55 + engine/hack/validate/default | 16 + engine/hack/validate/default-seccomp | 28 + .../hack/validate/deprecate-integration-cli | 25 + engine/hack/validate/gometalinter | 14 + engine/hack/validate/gometalinter.json | 26 + engine/hack/validate/pkg-imports | 33 + engine/hack/validate/swagger | 13 + engine/hack/validate/swagger-gen | 29 + engine/hack/validate/toml | 31 + engine/hack/validate/vendor | 51 + engine/hack/vendor.sh | 15 + engine/image/cache/cache.go | 253 + engine/image/cache/compare.go | 63 + engine/image/cache/compare_test.go | 126 + engine/image/fs.go | 175 + engine/image/fs_test.go | 270 + engine/image/image.go | 232 + engine/image/image_test.go | 125 + engine/image/rootfs.go | 53 + engine/image/spec/README.md | 46 + engine/image/spec/v1.1.md | 621 + engine/image/spec/v1.2.md | 677 + engine/image/spec/v1.md | 562 + engine/image/store.go | 346 + engine/image/store_test.go | 197 + engine/image/tarexport/load.go | 432 + engine/image/tarexport/save.go | 431 + engine/image/tarexport/tarexport.go | 47 + engine/image/v1/imagev1.go | 150 + engine/image/v1/imagev1_test.go | 55 + engine/integration-cli/benchmark_test.go | 95 + engine/integration-cli/check_test.go | 429 + engine/integration-cli/checker/checker.go | 84 + engine/integration-cli/cli/build/build.go | 82 + engine/integration-cli/cli/cli.go | 226 + engine/integration-cli/daemon/daemon.go | 142 + engine/integration-cli/daemon/daemon_swarm.go | 213 + .../integration-cli/daemon_swarm_hack_test.go | 24 + .../integration-cli/docker_api_attach_test.go | 261 + .../integration-cli/docker_api_build_test.go | 559 + .../docker_api_build_windows_test.go | 38 + .../docker_api_containers_test.go | 2242 + .../docker_api_containers_windows_test.go | 76 + .../docker_api_exec_resize_test.go | 113 + .../integration-cli/docker_api_exec_test.go | 306 + .../integration-cli/docker_api_images_test.go | 208 + .../docker_api_inspect_test.go | 180 + .../integration-cli/docker_api_logs_test.go | 215 + .../docker_api_network_test.go | 378 + .../integration-cli/docker_api_stats_test.go | 311 + .../docker_api_swarm_node_test.go | 128 + .../docker_api_swarm_service_test.go | 599 + .../integration-cli/docker_api_swarm_test.go | 1046 + engine/integration-cli/docker_api_test.go | 110 + .../integration-cli/docker_cli_attach_test.go | 180 + .../docker_cli_attach_unix_test.go | 171 + .../integration-cli/docker_cli_build_test.go | 6220 ++ .../docker_cli_build_unix_test.go | 228 + .../docker_cli_by_digest_test.go | 690 + .../integration-cli/docker_cli_commit_test.go | 166 + .../docker_cli_cp_from_container_test.go | 399 + engine/integration-cli/docker_cli_cp_test.go | 607 + .../docker_cli_cp_to_container_test.go | 495 + .../docker_cli_cp_to_container_unix_test.go | 81 + .../docker_cli_cp_utils_test.go | 305 + .../integration-cli/docker_cli_create_test.go | 380 + .../docker_cli_daemon_plugins_test.go | 328 + .../integration-cli/docker_cli_daemon_test.go | 2973 + .../integration-cli/docker_cli_events_test.go | 753 + .../docker_cli_events_unix_test.go | 507 + .../integration-cli/docker_cli_exec_test.go | 531 + .../docker_cli_exec_unix_test.go | 97 + .../docker_cli_external_volume_driver_test.go | 619 + .../integration-cli/docker_cli_health_test.go | 167 + .../docker_cli_history_test.go | 121 + .../integration-cli/docker_cli_images_test.go | 364 + .../integration-cli/docker_cli_import_test.go | 142 + .../integration-cli/docker_cli_info_test.go | 237 + .../docker_cli_info_unix_test.go | 17 + .../docker_cli_inspect_test.go | 458 + .../integration-cli/docker_cli_links_test.go | 240 + .../integration-cli/docker_cli_login_test.go | 31 + .../integration-cli/docker_cli_logout_test.go | 107 + .../docker_cli_logs_bench_test.go | 31 + .../integration-cli/docker_cli_logs_test.go | 336 + .../docker_cli_netmode_test.go | 86 + .../docker_cli_network_test.go | 13 + .../docker_cli_network_unix_test.go | 1791 + .../docker_cli_plugins_logdriver_test.go | 48 + .../docker_cli_plugins_test.go | 464 + .../integration-cli/docker_cli_port_test.go | 345 + .../integration-cli/docker_cli_proxy_test.go | 50 + .../docker_cli_prune_unix_test.go | 289 + engine/integration-cli/docker_cli_ps_test.go | 864 + .../docker_cli_pull_local_test.go | 467 + .../integration-cli/docker_cli_pull_test.go | 277 + .../integration-cli/docker_cli_push_test.go | 383 + .../docker_cli_registry_user_agent_test.go | 104 + .../docker_cli_restart_test.go | 324 + engine/integration-cli/docker_cli_rmi_test.go | 335 + engine/integration-cli/docker_cli_run_test.go | 4541 ++ .../docker_cli_run_unix_test.go | 1582 + .../docker_cli_save_load_test.go | 406 + .../docker_cli_save_load_unix_test.go | 107 + .../integration-cli/docker_cli_search_test.go | 80 + .../docker_cli_service_create_test.go | 449 + .../docker_cli_service_health_test.go | 140 + .../docker_cli_service_logs_test.go | 389 + .../docker_cli_service_scale_test.go | 57 + engine/integration-cli/docker_cli_sni_test.go | 45 + .../integration-cli/docker_cli_start_test.go | 199 + .../integration-cli/docker_cli_stats_test.go | 182 + .../integration-cli/docker_cli_swarm_test.go | 1993 + .../docker_cli_swarm_unix_test.go | 107 + engine/integration-cli/docker_cli_top_test.go | 73 + .../docker_cli_update_unix_test.go | 337 + .../integration-cli/docker_cli_userns_test.go | 99 + .../docker_cli_v2_only_test.go | 59 + .../integration-cli/docker_cli_volume_test.go | 597 + .../docker_deprecated_api_v124_test.go | 251 + .../docker_deprecated_api_v124_unix_test.go | 31 + .../docker_hub_pull_suite_test.go | 81 + engine/integration-cli/docker_utils_test.go | 484 + .../environment/environment.go | 49 + engine/integration-cli/events_utils_test.go | 206 + .../auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + engine/integration-cli/fixtures/https/ca.pem | 1 + .../fixtures/https/client-cert.pem | 1 + .../fixtures/https/client-key.pem | 1 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + .../fixtures/https/server-cert.pem | 1 + .../fixtures/https/server-key.pem | 1 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + .../fixtures/registry/cert.pem | 21 + .../fixtures_linux_daemon_test.go | 142 + .../requirement/requirement.go | 34 + engine/integration-cli/requirements_test.go | 195 + .../integration-cli/requirements_unix_test.go | 105 + .../test_vars_noseccomp_test.go | 8 + .../integration-cli/test_vars_seccomp_test.go | 8 + engine/integration-cli/test_vars_test.go | 11 + engine/integration-cli/test_vars_unix_test.go | 10 + .../integration-cli/test_vars_windows_test.go | 11 + .../integration-cli/testdata/emptyLayer.tar | Bin 0 -> 30720 bytes engine/integration-cli/utils_test.go | 183 + .../integration/build/build_session_test.go | 135 + engine/integration/build/build_squash_test.go | 115 + engine/integration/build/build_test.go | 681 + engine/integration/build/main_test.go | 33 + .../Dockerfile.TestBuildMultiStageCopy | 20 + .../Dockerfile.testBuildPreserveOwnership | 57 + engine/integration/config/config_test.go | 450 + engine/integration/config/main_test.go | 33 + .../integration/container/checkpoint_test.go | 164 + .../integration/container/container_test.go | 42 + engine/integration/container/copy_test.go | 163 + engine/integration/container/create_test.go | 537 + .../container/daemon_linux_test.go | 122 + engine/integration/container/diff_test.go | 43 + engine/integration/container/exec_test.go | 134 + engine/integration/container/export_test.go | 76 + engine/integration/container/health_test.go | 78 + engine/integration/container/inspect_test.go | 48 + .../container/ipcmode_linux_test.go | 324 + engine/integration/container/kill_test.go | 182 + .../integration/container/links_linux_test.go | 56 + engine/integration/container/logs_test.go | 34 + engine/integration/container/main_test.go | 33 + .../container/mounts_linux_test.go | 267 + engine/integration/container/nat_test.go | 124 + engine/integration/container/pause_test.go | 97 + engine/integration/container/ps_test.go | 48 + engine/integration/container/remove_test.go | 111 + engine/integration/container/rename_test.go | 215 + engine/integration/container/resize_test.go | 67 + engine/integration/container/restart_test.go | 114 + .../integration/container/run_linux_test.go | 95 + engine/integration/container/stats_test.go | 42 + .../integration/container/stop_linux_test.go | 100 + engine/integration/container/stop_test.go | 39 + .../container/stop_windows_test.go | 68 + .../container/update_linux_test.go | 168 + engine/integration/container/update_test.go | 61 + engine/integration/container/wait_test.go | 102 + engine/integration/doc.go | 3 + engine/integration/image/commit_test.go | 48 + engine/integration/image/import_test.go | 47 + engine/integration/image/list_test.go | 52 + engine/integration/image/main_test.go | 33 + engine/integration/image/pull_test.go | 24 + engine/integration/image/remove_test.go | 60 + engine/integration/image/tag_test.go | 141 + .../internal/container/container.go | 59 + engine/integration/internal/container/exec.go | 86 + engine/integration/internal/container/ops.go | 162 + .../integration/internal/container/states.go | 59 + .../integration/internal/network/network.go | 35 + engine/integration/internal/network/ops.go | 94 + engine/integration/internal/network/states.go | 20 + .../internal/requirement/requirement.go | 27 + .../internal/requirement/requirement_linux.go | 31 + .../requirement/requirement_windows.go | 12 + engine/integration/internal/swarm/service.go | 224 + engine/integration/internal/swarm/states.go | 84 + engine/integration/network/delete_test.go | 91 + engine/integration/network/helpers.go | 94 + engine/integration/network/helpers_windows.go | 49 + engine/integration/network/inspect_test.go | 102 + .../integration/network/ipvlan/ipvlan_test.go | 448 + .../integration/network/ipvlan/main_test.go | 35 + .../network/ipvlan/main_windows_test.go | 1 + .../network/macvlan/macvlan_test.go | 271 + .../integration/network/macvlan/main_test.go | 35 + .../network/macvlan/main_windows_test.go | 1 + engine/integration/network/main_test.go | 33 + engine/integration/network/network_test.go | 91 + engine/integration/network/service_test.go | 431 + .../plugin/authz/authz_plugin_test.go | 500 + .../plugin/authz/authz_plugin_v2_test.go | 170 + engine/integration/plugin/authz/main_test.go | 181 + .../plugin/authz/main_windows_test.go | 1 + engine/integration/plugin/common/main_test.go | 27 + .../integration/plugin/common/plugin_test.go | 38 + .../plugin/graphdriver/external_test.go | 462 + .../plugin/graphdriver/main_test.go | 36 + .../plugin/logging/cmd/close_on_start/main.go | 48 + .../plugin/logging/cmd/dummy/main.go | 19 + .../plugin/logging/helpers_test.go | 67 + .../plugin/logging/logging_linux_test.go | 79 + .../integration/plugin/logging/main_test.go | 29 + .../plugin/logging/validation_test.go | 37 + engine/integration/plugin/pkg_test.go | 1 + .../plugin/volumes/cmd/dummy/main.go | 19 + .../plugin/volumes/helpers_test.go | 70 + .../integration/plugin/volumes/main_test.go | 32 + .../integration/plugin/volumes/mounts_test.go | 58 + engine/integration/secret/main_test.go | 33 + engine/integration/secret/secret_test.go | 417 + engine/integration/service/create_test.go | 442 + engine/integration/service/inspect_test.go | 134 + engine/integration/service/main_test.go | 33 + engine/integration/service/network_test.go | 117 + engine/integration/service/plugin_test.go | 121 + engine/integration/service/update_test.go | 287 + engine/integration/session/main_test.go | 33 + engine/integration/session/session_test.go | 61 + .../system/cgroupdriver_systemd_test.go | 55 + engine/integration/system/event_test.go | 124 + engine/integration/system/info_linux_test.go | 45 + engine/integration/system/info_test.go | 67 + engine/integration/system/login_test.go | 28 + engine/integration/system/main_test.go | 33 + engine/integration/system/ping_test.go | 59 + engine/integration/system/version_test.go | 23 + engine/integration/testdata/https/ca.pem | 23 + .../testdata/https/client-cert.pem | 73 + .../integration/testdata/https/client-key.pem | 16 + .../testdata/https/server-cert.pem | 76 + .../integration/testdata/https/server-key.pem | 16 + engine/integration/volume/main_test.go | 33 + engine/integration/volume/volume_test.go | 142 + engine/internal/test/daemon/config.go | 82 + engine/internal/test/daemon/container.go | 40 + engine/internal/test/daemon/daemon.go | 722 + engine/internal/test/daemon/daemon_unix.go | 39 + engine/internal/test/daemon/daemon_windows.go | 25 + engine/internal/test/daemon/node.go | 89 + engine/internal/test/daemon/ops.go | 65 + engine/internal/test/daemon/plugin.go | 75 + engine/internal/test/daemon/secret.go | 84 + engine/internal/test/daemon/service.go | 131 + engine/internal/test/daemon/swarm.go | 223 + engine/internal/test/environment/clean.go | 207 + .../internal/test/environment/environment.go | 190 + engine/internal/test/environment/protect.go | 254 + engine/internal/test/fakecontext/context.go | 131 + engine/internal/test/fakegit/fakegit.go | 136 + engine/internal/test/fakestorage/fixtures.go | 92 + engine/internal/test/fakestorage/storage.go | 200 + engine/internal/test/fixtures/load/frozen.go | 196 + .../test/fixtures/plugin/basic/basic.go | 34 + .../internal/test/fixtures/plugin/plugin.go | 216 + engine/internal/test/helper.go | 6 + engine/internal/test/registry/ops.go | 26 + engine/internal/test/registry/registry.go | 255 + .../internal/test/registry/registry_mock.go | 71 + engine/internal/test/request/npipe.go | 12 + engine/internal/test/request/npipe_windows.go | 12 + engine/internal/test/request/ops.go | 78 + engine/internal/test/request/request.go | 223 + engine/internal/test/suite/interfaces.go | 33 + engine/internal/test/suite/suite.go | 72 + engine/internal/test/suite/testify.LICENSE | 21 + engine/internal/testutil/helpers.go | 17 + engine/internal/testutil/stringutils.go | 14 + engine/internal/testutil/stringutils_test.go | 34 + engine/layer/empty.go | 61 + engine/layer/empty_test.go | 52 + engine/layer/filestore.go | 355 + engine/layer/filestore_test.go | 104 + engine/layer/filestore_unix.go | 15 + engine/layer/filestore_windows.go | 35 + engine/layer/layer.go | 237 + engine/layer/layer_store.go | 777 + engine/layer/layer_store_windows.go | 11 + engine/layer/layer_test.go | 768 + engine/layer/layer_unix.go | 9 + engine/layer/layer_unix_test.go | 73 + engine/layer/layer_windows.go | 46 + engine/layer/migration.go | 193 + engine/layer/migration_test.go | 269 + engine/layer/mount_test.go | 239 + engine/layer/mounted_layer.go | 112 + engine/layer/ro_layer.go | 182 + engine/layer/ro_layer_windows.go | 9 + engine/libcontainerd/libcontainerd_linux.go | 14 + engine/libcontainerd/libcontainerd_windows.go | 19 + engine/libcontainerd/local/local_windows.go | 1432 + engine/libcontainerd/local/process_windows.go | 44 + engine/libcontainerd/local/utils_windows.go | 43 + .../libcontainerd/local/utils_windows_test.go | 13 + engine/libcontainerd/queue/queue.go | 37 + engine/libcontainerd/queue/queue_test.go | 31 + engine/libcontainerd/remote/client.go | 848 + .../libcontainerd/remote/client_io_windows.go | 161 + engine/libcontainerd/remote/client_linux.go | 126 + engine/libcontainerd/remote/client_windows.go | 91 + .../libcontainerd/supervisor/remote_daemon.go | 327 + .../supervisor/remote_daemon_linux.go | 69 + .../supervisor/remote_daemon_options.go | 55 + .../supervisor/remote_daemon_options_linux.go | 9 + .../supervisor/remote_daemon_windows.go | 48 + .../libcontainerd/supervisor/utils_linux.go | 12 + .../libcontainerd/supervisor/utils_windows.go | 9 + engine/libcontainerd/types/types.go | 78 + engine/libcontainerd/types/types_linux.go | 31 + engine/libcontainerd/types/types_windows.go | 37 + engine/oci/caps/utils.go | 169 + engine/oci/defaults.go | 213 + engine/oci/devices_linux.go | 86 + engine/oci/devices_unsupported.go | 20 + engine/oci/namespaces.go | 13 + engine/oci/oci.go | 67 + engine/opts/address_pools.go | 84 + engine/opts/address_pools_test.go | 20 + engine/opts/env.go | 48 + engine/opts/env_test.go | 124 + engine/opts/hosts.go | 176 + engine/opts/hosts_test.go | 181 + engine/opts/hosts_unix.go | 8 + engine/opts/hosts_windows.go | 4 + engine/opts/ip.go | 47 + engine/opts/ip_test.go | 54 + engine/opts/opts.go | 337 + engine/opts/opts_test.go | 264 + engine/opts/opts_unix.go | 6 + engine/opts/opts_windows.go | 56 + engine/opts/quotedstring.go | 37 + engine/opts/quotedstring_test.go | 30 + engine/opts/runtime.go | 79 + engine/opts/ulimit.go | 81 + engine/opts/ulimit_test.go | 42 + engine/pkg/README.md | 11 + engine/pkg/aaparser/aaparser.go | 89 + engine/pkg/aaparser/aaparser_test.go | 73 + engine/pkg/archive/README.md | 1 + engine/pkg/archive/archive.go | 1284 + engine/pkg/archive/archive_linux.go | 261 + engine/pkg/archive/archive_linux_test.go | 294 + engine/pkg/archive/archive_other.go | 7 + engine/pkg/archive/archive_test.go | 1340 + engine/pkg/archive/archive_unix.go | 115 + engine/pkg/archive/archive_unix_test.go | 324 + engine/pkg/archive/archive_windows.go | 67 + engine/pkg/archive/archive_windows_test.go | 86 + engine/pkg/archive/changes.go | 445 + engine/pkg/archive/changes_linux.go | 286 + engine/pkg/archive/changes_other.go | 97 + engine/pkg/archive/changes_posix_test.go | 127 + engine/pkg/archive/changes_test.go | 522 + engine/pkg/archive/changes_unix.go | 43 + engine/pkg/archive/changes_windows.go | 34 + engine/pkg/archive/copy.go | 480 + engine/pkg/archive/copy_unix.go | 11 + engine/pkg/archive/copy_unix_test.go | 982 + engine/pkg/archive/copy_windows.go | 9 + engine/pkg/archive/diff.go | 260 + engine/pkg/archive/diff_test.go | 373 + engine/pkg/archive/example_changes.go | 97 + engine/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes engine/pkg/archive/time_linux.go | 16 + engine/pkg/archive/time_unsupported.go | 16 + engine/pkg/archive/utils_test.go | 166 + engine/pkg/archive/whiteouts.go | 23 + engine/pkg/archive/wrap.go | 59 + engine/pkg/archive/wrap_test.go | 92 + engine/pkg/authorization/api.go | 88 + engine/pkg/authorization/api_test.go | 76 + engine/pkg/authorization/authz.go | 189 + engine/pkg/authorization/authz_unix_test.go | 342 + engine/pkg/authorization/middleware.go | 110 + engine/pkg/authorization/middleware_test.go | 53 + .../pkg/authorization/middleware_unix_test.go | 66 + engine/pkg/authorization/plugin.go | 118 + engine/pkg/authorization/response.go | 210 + engine/pkg/broadcaster/unbuffered.go | 49 + engine/pkg/broadcaster/unbuffered_test.go | 161 + engine/pkg/capabilities/caps.go | 24 + engine/pkg/capabilities/caps_test.go | 72 + engine/pkg/chrootarchive/archive.go | 106 + engine/pkg/chrootarchive/archive_test.go | 413 + engine/pkg/chrootarchive/archive_unix.go | 208 + engine/pkg/chrootarchive/archive_unix_test.go | 171 + engine/pkg/chrootarchive/archive_windows.go | 29 + engine/pkg/chrootarchive/chroot_linux.go | 113 + engine/pkg/chrootarchive/chroot_unix.go | 16 + engine/pkg/chrootarchive/diff.go | 23 + engine/pkg/chrootarchive/diff_unix.go | 130 + engine/pkg/chrootarchive/diff_windows.go | 45 + engine/pkg/chrootarchive/init_unix.go | 29 + engine/pkg/chrootarchive/init_windows.go | 4 + engine/pkg/containerfs/archiver.go | 205 + engine/pkg/containerfs/containerfs.go | 87 + engine/pkg/containerfs/containerfs_unix.go | 10 + engine/pkg/containerfs/containerfs_windows.go | 15 + engine/pkg/devicemapper/devmapper.go | 826 + engine/pkg/devicemapper/devmapper_log.go | 124 + engine/pkg/devicemapper/devmapper_wrapper.go | 252 + .../devicemapper/devmapper_wrapper_dynamic.go | 6 + ...vmapper_wrapper_dynamic_deferred_remove.go | 35 + ...r_wrapper_dynamic_dlsym_deferred_remove.go | 128 + .../devmapper_wrapper_no_deferred_remove.go | 17 + engine/pkg/devicemapper/ioctl.go | 28 + engine/pkg/devicemapper/log.go | 11 + engine/pkg/directory/directory.go | 26 + engine/pkg/directory/directory_test.go | 193 + engine/pkg/directory/directory_unix.go | 54 + engine/pkg/directory/directory_windows.go | 42 + engine/pkg/discovery/README.md | 41 + engine/pkg/discovery/backends.go | 107 + engine/pkg/discovery/discovery.go | 35 + engine/pkg/discovery/discovery_test.go | 135 + engine/pkg/discovery/entry.go | 94 + engine/pkg/discovery/file/file.go | 107 + engine/pkg/discovery/file/file_test.go | 114 + engine/pkg/discovery/generator.go | 35 + engine/pkg/discovery/generator_test.go | 54 + engine/pkg/discovery/kv/kv.go | 192 + engine/pkg/discovery/kv/kv_test.go | 323 + engine/pkg/discovery/memory/memory.go | 93 + engine/pkg/discovery/memory/memory_test.go | 49 + engine/pkg/discovery/nodes/nodes.go | 54 + engine/pkg/discovery/nodes/nodes_test.go | 51 + engine/pkg/dmesg/dmesg_linux.go | 18 + engine/pkg/dmesg/dmesg_linux_test.go | 9 + engine/pkg/filenotify/filenotify.go | 40 + engine/pkg/filenotify/fsnotify.go | 18 + engine/pkg/filenotify/poller.go | 204 + engine/pkg/filenotify/poller_test.go | 119 + engine/pkg/fileutils/fileutils.go | 298 + engine/pkg/fileutils/fileutils_darwin.go | 27 + engine/pkg/fileutils/fileutils_test.go | 591 + engine/pkg/fileutils/fileutils_unix.go | 22 + engine/pkg/fileutils/fileutils_windows.go | 7 + engine/pkg/fsutils/fsutils_linux.go | 86 + engine/pkg/fsutils/fsutils_linux_test.go | 92 + engine/pkg/homedir/homedir_linux.go | 109 + engine/pkg/homedir/homedir_others.go | 33 + engine/pkg/homedir/homedir_test.go | 24 + engine/pkg/homedir/homedir_unix.go | 34 + engine/pkg/homedir/homedir_windows.go | 24 + engine/pkg/idtools/idtools.go | 264 + engine/pkg/idtools/idtools_test.go | 28 + engine/pkg/idtools/idtools_unix.go | 231 + engine/pkg/idtools/idtools_unix_test.go | 397 + engine/pkg/idtools/idtools_windows.go | 25 + engine/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + engine/pkg/idtools/utils_unix.go | 32 + engine/pkg/ioutils/buffer.go | 51 + engine/pkg/ioutils/buffer_test.go | 153 + engine/pkg/ioutils/bytespipe.go | 186 + engine/pkg/ioutils/bytespipe_test.go | 159 + engine/pkg/ioutils/fswriters.go | 162 + engine/pkg/ioutils/fswriters_test.go | 132 + engine/pkg/ioutils/readers.go | 157 + engine/pkg/ioutils/readers_test.go | 95 + engine/pkg/ioutils/temp_unix.go | 10 + engine/pkg/ioutils/temp_windows.go | 16 + engine/pkg/ioutils/writeflusher.go | 92 + engine/pkg/ioutils/writers.go | 66 + engine/pkg/ioutils/writers_test.go | 65 + engine/pkg/jsonmessage/jsonmessage.go | 283 + engine/pkg/jsonmessage/jsonmessage_test.go | 298 + engine/pkg/locker/README.md | 65 + engine/pkg/locker/locker.go | 112 + engine/pkg/locker/locker_test.go | 161 + engine/pkg/longpath/longpath.go | 26 + engine/pkg/longpath/longpath_test.go | 22 + engine/pkg/loopback/attach_loopback.go | 137 + engine/pkg/loopback/ioctl.go | 52 + engine/pkg/loopback/loop_wrapper.go | 52 + engine/pkg/loopback/loopback.go | 64 + engine/pkg/mount/flags.go | 137 + engine/pkg/mount/flags_freebsd.go | 49 + engine/pkg/mount/flags_linux.go | 87 + engine/pkg/mount/flags_unsupported.go | 31 + engine/pkg/mount/mount.go | 159 + engine/pkg/mount/mount_unix_test.go | 170 + engine/pkg/mount/mounter_freebsd.go | 59 + engine/pkg/mount/mounter_linux.go | 73 + engine/pkg/mount/mounter_linux_test.go | 228 + engine/pkg/mount/mounter_unsupported.go | 7 + engine/pkg/mount/mountinfo.go | 40 + engine/pkg/mount/mountinfo_freebsd.go | 55 + engine/pkg/mount/mountinfo_linux.go | 144 + engine/pkg/mount/mountinfo_linux_test.go | 556 + engine/pkg/mount/mountinfo_unsupported.go | 12 + engine/pkg/mount/mountinfo_windows.go | 6 + engine/pkg/mount/sharedsubtree_linux.go | 71 + engine/pkg/mount/sharedsubtree_linux_test.go | 349 + engine/pkg/mount/unmount_unix.go | 22 + engine/pkg/mount/unmount_unsupported.go | 7 + .../cmd/names-generator/main.go | 14 + engine/pkg/namesgenerator/names-generator.go | 847 + .../namesgenerator/names-generator_test.go | 27 + engine/pkg/parsers/kernel/kernel.go | 74 + engine/pkg/parsers/kernel/kernel_darwin.go | 56 + engine/pkg/parsers/kernel/kernel_unix.go | 35 + engine/pkg/parsers/kernel/kernel_unix_test.go | 97 + engine/pkg/parsers/kernel/kernel_windows.go | 51 + engine/pkg/parsers/kernel/uname_linux.go | 17 + engine/pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem_unix_test.go | 247 + .../operatingsystem_windows.go | 51 + engine/pkg/parsers/parsers.go | 97 + engine/pkg/parsers/parsers_test.go | 83 + engine/pkg/pidfile/pidfile.go | 53 + engine/pkg/pidfile/pidfile_darwin.go | 14 + engine/pkg/pidfile/pidfile_test.go | 38 + engine/pkg/pidfile/pidfile_unix.go | 16 + engine/pkg/pidfile/pidfile_windows.go | 25 + engine/pkg/platform/architecture_linux.go | 18 + engine/pkg/platform/architecture_unix.go | 20 + engine/pkg/platform/architecture_windows.go | 60 + engine/pkg/platform/platform.go | 23 + engine/pkg/plugingetter/getter.go | 52 + engine/pkg/plugins/client.go | 242 + engine/pkg/plugins/client_test.go | 276 + engine/pkg/plugins/discovery.go | 154 + engine/pkg/plugins/discovery_test.go | 152 + engine/pkg/plugins/discovery_unix.go | 5 + engine/pkg/plugins/discovery_unix_test.go | 159 + engine/pkg/plugins/discovery_windows.go | 8 + engine/pkg/plugins/errors.go | 33 + engine/pkg/plugins/plugin_test.go | 154 + engine/pkg/plugins/pluginrpc-gen/README.md | 58 + .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 83 + .../fixtures/otherfixture/spaceship.go | 4 + engine/pkg/plugins/pluginrpc-gen/main.go | 91 + engine/pkg/plugins/pluginrpc-gen/parser.go | 263 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 222 + engine/pkg/plugins/pluginrpc-gen/template.go | 118 + engine/pkg/plugins/plugins.go | 337 + engine/pkg/plugins/plugins_unix.go | 9 + engine/pkg/plugins/plugins_windows.go | 7 + engine/pkg/plugins/transport/http.go | 36 + engine/pkg/plugins/transport/http_test.go | 21 + engine/pkg/plugins/transport/transport.go | 36 + engine/pkg/pools/pools.go | 137 + engine/pkg/pools/pools_test.go | 163 + engine/pkg/progress/progress.go | 93 + engine/pkg/progress/progressreader.go | 66 + engine/pkg/progress/progressreader_test.go | 75 + engine/pkg/pubsub/publisher.go | 121 + engine/pkg/pubsub/publisher_test.go | 142 + engine/pkg/reexec/README.md | 5 + engine/pkg/reexec/command_linux.go | 28 + engine/pkg/reexec/command_unix.go | 23 + engine/pkg/reexec/command_unsupported.go | 16 + engine/pkg/reexec/command_windows.go | 21 + engine/pkg/reexec/reexec.go | 47 + engine/pkg/reexec/reexec_test.go | 52 + engine/pkg/signal/README.md | 1 + engine/pkg/signal/signal.go | 54 + engine/pkg/signal/signal_darwin.go | 41 + engine/pkg/signal/signal_freebsd.go | 43 + engine/pkg/signal/signal_linux.go | 83 + engine/pkg/signal/signal_linux_mipsx.go | 84 + engine/pkg/signal/signal_linux_test.go | 59 + engine/pkg/signal/signal_test.go | 34 + engine/pkg/signal/signal_unix.go | 21 + engine/pkg/signal/signal_unsupported.go | 10 + engine/pkg/signal/signal_windows.go | 26 + engine/pkg/signal/testfiles/main.go | 43 + engine/pkg/signal/trap.go | 104 + engine/pkg/signal/trap_linux_test.go | 83 + engine/pkg/stdcopy/stdcopy.go | 190 + engine/pkg/stdcopy/stdcopy_test.go | 289 + engine/pkg/streamformatter/streamformatter.go | 159 + .../streamformatter/streamformatter_test.go | 112 + engine/pkg/streamformatter/streamwriter.go | 47 + .../pkg/streamformatter/streamwriter_test.go | 35 + engine/pkg/stringid/README.md | 1 + engine/pkg/stringid/stringid.go | 63 + engine/pkg/stringid/stringid_test.go | 64 + engine/pkg/symlink/LICENSE.APACHE | 191 + engine/pkg/symlink/LICENSE.BSD | 27 + engine/pkg/symlink/README.md | 6 + engine/pkg/symlink/fs.go | 144 + engine/pkg/symlink/fs_unix.go | 15 + engine/pkg/symlink/fs_unix_test.go | 407 + engine/pkg/symlink/fs_windows.go | 169 + engine/pkg/sysinfo/README.md | 1 + engine/pkg/sysinfo/numcpu.go | 12 + engine/pkg/sysinfo/numcpu_linux.go | 42 + engine/pkg/sysinfo/numcpu_windows.go | 35 + engine/pkg/sysinfo/sysinfo.go | 155 + engine/pkg/sysinfo/sysinfo_linux.go | 277 + engine/pkg/sysinfo/sysinfo_linux_test.go | 104 + engine/pkg/sysinfo/sysinfo_test.go | 26 + engine/pkg/sysinfo/sysinfo_unix.go | 9 + engine/pkg/sysinfo/sysinfo_windows.go | 7 + engine/pkg/system/args_windows.go | 16 + engine/pkg/system/chtimes.go | 31 + engine/pkg/system/chtimes_test.go | 94 + engine/pkg/system/chtimes_unix.go | 14 + engine/pkg/system/chtimes_unix_test.go | 91 + engine/pkg/system/chtimes_windows.go | 26 + engine/pkg/system/chtimes_windows_test.go | 86 + engine/pkg/system/errors.go | 13 + engine/pkg/system/exitcode.go | 19 + engine/pkg/system/filesys.go | 67 + engine/pkg/system/filesys_windows.go | 294 + engine/pkg/system/init.go | 22 + engine/pkg/system/init_unix.go | 12 + engine/pkg/system/init_windows.go | 41 + engine/pkg/system/lcow.go | 32 + engine/pkg/system/lcow_unix.go | 8 + engine/pkg/system/lcow_windows.go | 6 + engine/pkg/system/lstat_unix.go | 20 + engine/pkg/system/lstat_unix_test.go | 30 + engine/pkg/system/lstat_windows.go | 14 + engine/pkg/system/meminfo.go | 17 + engine/pkg/system/meminfo_linux.go | 65 + engine/pkg/system/meminfo_unix_test.go | 40 + engine/pkg/system/meminfo_unsupported.go | 8 + engine/pkg/system/meminfo_windows.go | 45 + engine/pkg/system/mknod.go | 22 + engine/pkg/system/mknod_windows.go | 11 + engine/pkg/system/path.go | 60 + engine/pkg/system/path_unix.go | 10 + engine/pkg/system/path_windows.go | 24 + engine/pkg/system/path_windows_test.go | 83 + engine/pkg/system/process_unix.go | 24 + engine/pkg/system/process_windows.go | 18 + engine/pkg/system/rm.go | 80 + engine/pkg/system/rm_test.go | 84 + engine/pkg/system/stat_darwin.go | 13 + engine/pkg/system/stat_freebsd.go | 13 + engine/pkg/system/stat_linux.go | 19 + engine/pkg/system/stat_openbsd.go | 13 + engine/pkg/system/stat_solaris.go | 13 + engine/pkg/system/stat_unix.go | 66 + engine/pkg/system/stat_unix_test.go | 40 + engine/pkg/system/stat_windows.go | 49 + engine/pkg/system/syscall_unix.go | 17 + engine/pkg/system/syscall_windows.go | 193 + engine/pkg/system/syscall_windows_test.go | 9 + engine/pkg/system/umask.go | 13 + engine/pkg/system/umask_windows.go | 7 + engine/pkg/system/utimes_freebsd.go | 24 + engine/pkg/system/utimes_linux.go | 25 + engine/pkg/system/utimes_unix_test.go | 68 + engine/pkg/system/utimes_unsupported.go | 10 + engine/pkg/system/xattrs_linux.go | 29 + engine/pkg/system/xattrs_unsupported.go | 13 + engine/pkg/tailfile/tailfile.go | 222 + engine/pkg/tailfile/tailfile_test.go | 327 + engine/pkg/tarsum/builder_context.go | 21 + engine/pkg/tarsum/builder_context_test.go | 67 + engine/pkg/tarsum/fileinfosums.go | 133 + engine/pkg/tarsum/fileinfosums_test.go | 62 + engine/pkg/tarsum/tarsum.go | 301 + engine/pkg/tarsum/tarsum_spec.md | 230 + engine/pkg/tarsum/tarsum_test.go | 658 + .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes engine/pkg/tarsum/testdata/xattr/json | 1 + engine/pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes engine/pkg/tarsum/versioning.go | 158 + engine/pkg/tarsum/versioning_test.go | 98 + engine/pkg/tarsum/writercloser.go | 22 + engine/pkg/term/ascii.go | 66 + engine/pkg/term/ascii_test.go | 25 + engine/pkg/term/proxy.go | 78 + engine/pkg/term/proxy_test.go | 151 + engine/pkg/term/tc.go | 20 + engine/pkg/term/term.go | 124 + engine/pkg/term/term_linux_test.go | 117 + engine/pkg/term/term_windows.go | 221 + engine/pkg/term/termios_bsd.go | 42 + engine/pkg/term/termios_linux.go | 39 + engine/pkg/term/windows/ansi_reader.go | 263 + engine/pkg/term/windows/ansi_writer.go | 64 + engine/pkg/term/windows/console.go | 35 + engine/pkg/term/windows/windows.go | 33 + engine/pkg/term/windows/windows_test.go | 3 + engine/pkg/term/winsize.go | 20 + engine/pkg/truncindex/truncindex.go | 139 + engine/pkg/truncindex/truncindex_test.go | 453 + engine/pkg/urlutil/urlutil.go | 52 + engine/pkg/urlutil/urlutil_test.go | 56 + engine/pkg/useragent/README.md | 1 + engine/pkg/useragent/useragent.go | 55 + engine/pkg/useragent/useragent_test.go | 31 + engine/plugin/backend_linux.go | 876 + engine/plugin/backend_linux_test.go | 81 + engine/plugin/backend_unsupported.go | 72 + engine/plugin/blobstore.go | 190 + engine/plugin/defs.go | 50 + engine/plugin/errors.go | 66 + engine/plugin/events.go | 111 + .../plugin/executor/containerd/containerd.go | 168 + engine/plugin/manager.go | 384 + engine/plugin/manager_linux.go | 335 + engine/plugin/manager_linux_test.go | 279 + engine/plugin/manager_test.go | 55 + engine/plugin/manager_windows.go | 28 + engine/plugin/store.go | 291 + engine/plugin/store_test.go | 64 + engine/plugin/v2/plugin.go | 311 + engine/plugin/v2/plugin_linux.go | 141 + engine/plugin/v2/plugin_unsupported.go | 14 + engine/plugin/v2/settable.go | 102 + engine/plugin/v2/settable_test.go | 91 + engine/poule.yml | 134 + engine/profiles/apparmor/apparmor.go | 135 + engine/profiles/apparmor/template.go | 50 + engine/profiles/seccomp/default.json | 795 + engine/profiles/seccomp/fixtures/example.json | 27 + engine/profiles/seccomp/generate.go | 32 + engine/profiles/seccomp/seccomp.go | 191 + engine/profiles/seccomp/seccomp_default.go | 671 + engine/profiles/seccomp/seccomp_test.go | 32 + .../profiles/seccomp/seccomp_unsupported.go | 12 + engine/project/ARM.md | 45 + engine/project/BRANCHES-AND-TAGS.md | 35 + engine/project/GOVERNANCE.md | 120 + engine/project/IRC-ADMINISTRATION.md | 37 + engine/project/ISSUE-TRIAGE.md | 132 + engine/project/PACKAGE-REPO-MAINTENANCE.md | 74 + engine/project/PACKAGERS.md | 328 + engine/project/PATCH-RELEASES.md | 68 + engine/project/PRINCIPLES.md | 19 + engine/project/README.md | 24 + engine/project/RELEASE-PROCESS.md | 78 + engine/project/REVIEWING.md | 246 + engine/project/TOOLS.md | 63 + engine/reference/errors.go | 25 + engine/reference/store.go | 348 + engine/reference/store_test.go | 358 + engine/registry/auth.go | 295 + engine/registry/auth_test.go | 120 + engine/registry/config.go | 436 + engine/registry/config_test.go | 381 + engine/registry/config_unix.go | 16 + engine/registry/config_windows.go | 18 + engine/registry/endpoint_test.go | 78 + engine/registry/endpoint_v1.go | 198 + engine/registry/errors.go | 31 + engine/registry/registry.go | 191 + engine/registry/registry_mock_test.go | 476 + engine/registry/registry_test.go | 934 + .../resumable/resumablerequestreader.go | 96 + .../resumable/resumablerequestreader_test.go | 257 + engine/registry/service.go | 313 + engine/registry/service_v2.go | 82 + engine/registry/session.go | 779 + engine/registry/types.go | 70 + engine/reports/2017-05-01.md | 35 + engine/reports/2017-05-08.md | 34 + engine/reports/2017-05-15.md | 52 + engine/reports/2017-06-05.md | 36 + engine/reports/2017-06-12.md | 78 + engine/reports/2017-06-26.md | 120 + engine/reports/builder/2017-05-01.md | 47 + engine/reports/builder/2017-05-08.md | 57 + engine/reports/builder/2017-05-15.md | 64 + engine/reports/builder/2017-05-22.md | 47 + engine/reports/builder/2017-05-29.md | 52 + engine/reports/builder/2017-06-05.md | 58 + engine/reports/builder/2017-06-12.md | 58 + engine/reports/builder/2017-06-26.md | 78 + engine/reports/builder/2017-07-10.md | 65 + engine/reports/builder/2017-07-17.md | 79 + engine/restartmanager/restartmanager.go | 133 + engine/restartmanager/restartmanager_test.go | 36 + engine/rootless/rootless.go | 25 + engine/rootless/specconv/specconv_linux.go | 38 + engine/runconfig/config.go | 81 + engine/runconfig/config_test.go | 190 + engine/runconfig/config_unix.go | 59 + engine/runconfig/config_windows.go | 19 + engine/runconfig/errors.go | 42 + .../fixtures/unix/container_config_1_14.json | 30 + .../fixtures/unix/container_config_1_17.json | 50 + .../fixtures/unix/container_config_1_19.json | 58 + .../unix/container_hostconfig_1_14.json | 18 + .../unix/container_hostconfig_1_19.json | 30 + .../windows/container_config_1_19.json | 58 + engine/runconfig/hostconfig.go | 79 + engine/runconfig/hostconfig_test.go | 273 + engine/runconfig/hostconfig_unix.go | 110 + engine/runconfig/hostconfig_windows.go | 96 + engine/runconfig/hostconfig_windows_test.go | 17 + engine/runconfig/opts/parse.go | 20 + engine/vendor.conf | 166 + .../code.cloudfoundry.org/clock/LICENSE | 201 + .../vendor/code.cloudfoundry.org/clock/NOTICE | 20 + .../code.cloudfoundry.org/clock/README.md | 5 + .../code.cloudfoundry.org/clock/clock.go | 53 + .../code.cloudfoundry.org/clock/package.go | 1 + .../code.cloudfoundry.org/clock/ticker.go | 20 + .../code.cloudfoundry.org/clock/timer.go | 25 + .../github.com/containerd/go-runc/LICENSE | 201 + .../github.com/containerd/go-runc/README.md | 25 + .../containerd/go-runc/command_linux.go | 56 + .../containerd/go-runc/command_other.go | 35 + .../github.com/containerd/go-runc/console.go | 165 + .../containerd/go-runc/container.go | 30 + .../github.com/containerd/go-runc/events.go | 100 + .../github.com/containerd/go-runc/io.go | 218 + .../github.com/containerd/go-runc/io_unix.go | 76 + .../containerd/go-runc/io_windows.go | 62 + .../github.com/containerd/go-runc/monitor.go | 76 + .../github.com/containerd/go-runc/runc.go | 715 + .../github.com/containerd/go-runc/utils.go | 107 + .../github.com/containerd/ttrpc/LICENSE | 201 + .../github.com/containerd/ttrpc/README.md | 62 + .../github.com/containerd/ttrpc/channel.go | 153 + .../github.com/containerd/ttrpc/client.go | 350 + .../github.com/containerd/ttrpc/codec.go | 42 + .../github.com/containerd/ttrpc/config.go | 52 + .../github.com/containerd/ttrpc/handshake.go | 50 + .../containerd/ttrpc/interceptor.go | 50 + .../github.com/containerd/ttrpc/metadata.go | 107 + .../github.com/containerd/ttrpc/server.go | 485 + .../github.com/containerd/ttrpc/services.go | 156 + .../github.com/containerd/ttrpc/types.go | 63 + .../containerd/ttrpc/unixcreds_linux.go | 108 + engine/vendor/github.com/golang/gddo/LICENSE | 27 + .../github.com/golang/gddo/README.markdown | 44 + .../github.com/golang/gddo/httputil/buster.go | 95 + .../golang/gddo/httputil/header/header.go | 298 + .../golang/gddo/httputil/httputil.go | 25 + .../golang/gddo/httputil/negotiate.go | 79 + .../golang/gddo/httputil/respbuf.go | 58 + .../github.com/golang/gddo/httputil/static.go | 265 + .../golang/gddo/httputil/transport.go | 87 + .../grpc-ecosystem/grpc-opentracing/LICENSE | 27 + .../grpc-ecosystem/grpc-opentracing/PATENTS | 23 + .../grpc-opentracing/README.rst | 25 + .../grpc-opentracing/go/otgrpc/README.md | 57 + .../grpc-opentracing/go/otgrpc/client.go | 239 + .../grpc-opentracing/go/otgrpc/errors.go | 69 + .../grpc-opentracing/go/otgrpc/options.go | 76 + .../grpc-opentracing/go/otgrpc/package.go | 5 + .../grpc-opentracing/go/otgrpc/server.go | 141 + .../grpc-opentracing/go/otgrpc/shared.go | 42 + .../grpc-opentracing/python/README.md | 4 + .../python/examples/protos/command_line.proto | 15 + .../python/examples/protos/store.proto | 37 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 41 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 657 + .../hashicorp/go-immutable-radix/iter.go | 91 + .../hashicorp/go-immutable-radix/node.go | 352 + .../hashicorp/go-immutable-radix/raw_iter.go | 78 + .../vendor/github.com/moby/buildkit/LICENSE | 201 + .../vendor/github.com/moby/buildkit/README.md | 373 + .../api/services/control/control.pb.go | 5970 ++ .../api/services/control/control.proto | 149 + .../buildkit/api/services/control/generate.go | 3 + .../moby/buildkit/api/types/generate.go | 3 + .../moby/buildkit/api/types/worker.pb.go | 911 + .../moby/buildkit/api/types/worker.proto | 24 + .../buildkit/cache/contenthash/checksum.go | 909 + .../buildkit/cache/contenthash/checksum.pb.go | 839 + .../buildkit/cache/contenthash/checksum.proto | 30 + .../buildkit/cache/contenthash/filehash.go | 98 + .../cache/contenthash/filehash_unix.go | 47 + .../cache/contenthash/filehash_windows.go | 23 + .../buildkit/cache/contenthash/generate.go | 3 + .../moby/buildkit/cache/contenthash/path.go | 107 + .../moby/buildkit/cache/contenthash/tarsum.go | 60 + .../github.com/moby/buildkit/cache/manager.go | 884 + .../moby/buildkit/cache/metadata.go | 262 + .../moby/buildkit/cache/metadata/metadata.go | 394 + .../github.com/moby/buildkit/cache/refs.go | 467 + .../moby/buildkit/cache/remotecache/export.go | 142 + .../moby/buildkit/cache/remotecache/import.go | 277 + .../cache/remotecache/inline/inline.go | 106 + .../buildkit/cache/remotecache/local/local.go | 83 + .../cache/remotecache/registry/registry.go | 90 + .../cache/remotecache/v1/cachestorage.go | 296 + .../buildkit/cache/remotecache/v1/chains.go | 154 + .../moby/buildkit/cache/remotecache/v1/doc.go | 50 + .../buildkit/cache/remotecache/v1/parse.go | 110 + .../buildkit/cache/remotecache/v1/spec.go | 35 + .../buildkit/cache/remotecache/v1/utils.go | 322 + .../moby/buildkit/cache/util/fsutil.go | 139 + .../github.com/moby/buildkit/client/build.go | 105 + .../moby/buildkit/client/buildid/metadata.go | 29 + .../github.com/moby/buildkit/client/client.go | 169 + .../moby/buildkit/client/client_unix.go | 19 + .../moby/buildkit/client/client_windows.go | 24 + .../buildkit/client/connhelper/connhelper.go | 37 + .../moby/buildkit/client/diskusage.go | 84 + .../moby/buildkit/client/exporters.go | 9 + .../github.com/moby/buildkit/client/filter.go | 19 + .../github.com/moby/buildkit/client/graph.go | 46 + .../moby/buildkit/client/llb/exec.go | 654 + .../moby/buildkit/client/llb/fileop.go | 727 + .../client/llb/imagemetaresolver/resolver.go | 110 + .../moby/buildkit/client/llb/marshal.go | 112 + .../moby/buildkit/client/llb/meta.go | 233 + .../moby/buildkit/client/llb/resolver.go | 20 + .../moby/buildkit/client/llb/source.go | 429 + .../moby/buildkit/client/llb/state.go | 515 + .../moby/buildkit/client/ociindex/ociindex.go | 113 + .../github.com/moby/buildkit/client/prune.go | 83 + .../github.com/moby/buildkit/client/solve.go | 475 + .../moby/buildkit/client/workers.go | 70 + .../moby/buildkit/control/control.go | 449 + .../moby/buildkit/control/gateway/gateway.go | 145 + .../moby/buildkit/executor/executor.go | 39 + .../moby/buildkit/executor/oci/hosts.go | 78 + .../moby/buildkit/executor/oci/mounts.go | 117 + .../moby/buildkit/executor/oci/resolvconf.go | 122 + .../moby/buildkit/executor/oci/spec.go | 13 + .../moby/buildkit/executor/oci/spec_unix.go | 254 + .../moby/buildkit/executor/oci/user.go | 99 + .../executor/runcexecutor/executor.go | 337 + .../exporter/containerimage/exptypes/types.go | 16 + .../moby/buildkit/exporter/exporter.go | 22 + .../moby/buildkit/exporter/local/export.go | 173 + .../moby/buildkit/exporter/tar/export.go | 177 + .../frontend/dockerfile/builder/build.go | 601 + .../frontend/dockerfile/command/command.go | 46 + .../dockerfile/dockerfile2llb/convert.go | 1329 + .../dockerfile2llb/convert_norunmount.go | 16 + .../dockerfile2llb/convert_norunsecurity.go | 11 + .../dockerfile2llb/convert_nosecrets.go | 13 + .../dockerfile2llb/convert_nossh.go | 13 + .../dockerfile2llb/convert_runmount.go | 156 + .../dockerfile2llb/convert_runsecurity.go | 27 + .../dockerfile2llb/convert_secrets.go | 54 + .../dockerfile/dockerfile2llb/convert_ssh.go | 42 + .../dockerfile2llb/defaultshell_unix.go | 7 + .../dockerfile2llb/defaultshell_windows.go | 7 + .../dockerfile/dockerfile2llb/directives.go | 38 + .../dockerfile/dockerfile2llb/image.go | 79 + .../dockerfile/dockerfile2llb/platform.go | 58 + .../frontend/dockerfile/instructions/bflag.go | 200 + .../dockerfile/instructions/commands.go | 451 + .../instructions/commands_nosecrets.go | 7 + .../dockerfile/instructions/commands_nossh.go | 7 + .../instructions/commands_runmount.go | 263 + .../instructions/commands_runsecurity.go | 83 + .../instructions/commands_secrets.go | 7 + .../dockerfile/instructions/commands_ssh.go | 7 + .../dockerfile/instructions/errors_unix.go | 9 + .../dockerfile/instructions/errors_windows.go | 27 + .../frontend/dockerfile/instructions/parse.go | 650 + .../dockerfile/instructions/support.go | 19 + .../dockerfile/parser/line_parsers.go | 368 + .../frontend/dockerfile/parser/parser.go | 332 + .../dockerfile/parser/split_command.go | 118 + .../dockerfile/shell/equal_env_unix.go | 10 + .../dockerfile/shell/equal_env_windows.go | 10 + .../buildkit/frontend/dockerfile/shell/lex.go | 426 + .../moby/buildkit/frontend/frontend.go | 30 + .../frontend/gateway/client/client.go | 76 + .../frontend/gateway/client/result.go | 54 + .../frontend/gateway/forwarder/forward.go | 179 + .../frontend/gateway/forwarder/frontend.go | 38 + .../moby/buildkit/frontend/gateway/gateway.go | 685 + .../frontend/gateway/grpcclient/client.go | 501 + .../moby/buildkit/frontend/gateway/pb/caps.go | 95 + .../frontend/gateway/pb/gateway.pb.go | 5292 ++ .../frontend/gateway/pb/gateway.proto | 138 + .../buildkit/frontend/gateway/pb/generate.go | 3 + .../moby/buildkit/frontend/result.go | 23 + engine/vendor/github.com/moby/buildkit/go.mod | 77 + .../moby/buildkit/identity/randomid.go | 53 + .../moby/buildkit/session/auth/auth.go | 27 + .../moby/buildkit/session/auth/auth.pb.go | 728 + .../moby/buildkit/session/auth/auth.proto | 19 + .../moby/buildkit/session/auth/generate.go | 3 + .../buildkit/session/content/attachable.go | 132 + .../moby/buildkit/session/content/caller.go | 91 + .../moby/buildkit/session/context.go | 22 + .../buildkit/session/filesync/diffcopy.go | 117 + .../buildkit/session/filesync/filesync.go | 321 + .../buildkit/session/filesync/filesync.pb.go | 669 + .../buildkit/session/filesync/filesync.proto | 20 + .../buildkit/session/filesync/generate.go | 3 + .../github.com/moby/buildkit/session/grpc.go | 81 + .../moby/buildkit/session/grpchijack/dial.go | 162 + .../buildkit/session/grpchijack/hijack.go | 15 + .../moby/buildkit/session/manager.go | 220 + .../moby/buildkit/session/secrets/generate.go | 3 + .../moby/buildkit/session/secrets/secrets.go | 30 + .../buildkit/session/secrets/secrets.pb.go | 868 + .../buildkit/session/secrets/secrets.proto | 19 + .../moby/buildkit/session/session.go | 143 + .../moby/buildkit/session/sshforward/copy.go | 65 + .../buildkit/session/sshforward/generate.go | 3 + .../moby/buildkit/session/sshforward/ssh.go | 114 + .../buildkit/session/sshforward/ssh.pb.go | 900 + .../buildkit/session/sshforward/ssh.proto | 22 + .../moby/buildkit/session/upload/generate.go | 3 + .../moby/buildkit/session/upload/upload.go | 56 + .../moby/buildkit/session/upload/upload.pb.go | 506 + .../moby/buildkit/session/upload/upload.proto | 14 + .../snapshot/blobmapping/snapshotter.go | 151 + .../moby/buildkit/snapshot/localmounter.go | 74 + .../buildkit/snapshot/localmounter_unix.go | 29 + .../buildkit/snapshot/localmounter_windows.go | 26 + .../moby/buildkit/snapshot/snapshotter.go | 162 + .../solver/bboltcachestorage/storage.go | 459 + .../moby/buildkit/solver/cachekey.go | 66 + .../moby/buildkit/solver/cachemanager.go | 354 + .../moby/buildkit/solver/cachestorage.go | 51 + .../moby/buildkit/solver/combinedcache.go | 140 + .../github.com/moby/buildkit/solver/edge.go | 938 + .../moby/buildkit/solver/exporter.go | 208 + .../github.com/moby/buildkit/solver/index.go | 245 + .../buildkit/solver/internal/pipe/pipe.go | 199 + .../github.com/moby/buildkit/solver/jobs.go | 806 + .../moby/buildkit/solver/llbsolver/bridge.go | 237 + .../buildkit/solver/llbsolver/file/backend.go | 296 + .../solver/llbsolver/file/refmanager.go | 70 + .../buildkit/solver/llbsolver/file/unpack.go | 61 + .../solver/llbsolver/file/user_linux.go | 119 + .../solver/llbsolver/file/user_nolinux.go | 14 + .../buildkit/solver/llbsolver/ops/build.go | 136 + .../buildkit/solver/llbsolver/ops/exec.go | 872 + .../buildkit/solver/llbsolver/ops/file.go | 583 + .../solver/llbsolver/ops/fileoptypes/types.go | 28 + .../buildkit/solver/llbsolver/ops/source.go | 92 + .../moby/buildkit/solver/llbsolver/result.go | 74 + .../moby/buildkit/solver/llbsolver/solver.go | 393 + .../moby/buildkit/solver/llbsolver/vertex.go | 348 + .../buildkit/solver/memorycachestorage.go | 307 + .../moby/buildkit/solver/pb/attr.go | 25 + .../moby/buildkit/solver/pb/caps.go | 285 + .../moby/buildkit/solver/pb/const.go | 25 + .../moby/buildkit/solver/pb/generate.go | 3 + .../moby/buildkit/solver/pb/ops.pb.go | 9361 +++ .../moby/buildkit/solver/pb/ops.proto | 305 + .../moby/buildkit/solver/pb/platform.go | 41 + .../moby/buildkit/solver/progress.go | 109 + .../github.com/moby/buildkit/solver/result.go | 105 + .../moby/buildkit/solver/scheduler.go | 410 + .../github.com/moby/buildkit/solver/types.go | 168 + .../moby/buildkit/source/git/gitsource.go | 440 + .../buildkit/source/git/gitsource_unix.go | 35 + .../buildkit/source/git/gitsource_windows.go | 23 + .../moby/buildkit/source/gitidentifier.go | 70 + .../moby/buildkit/source/http/httpsource.go | 500 + .../moby/buildkit/source/http/transport.go | 60 + .../moby/buildkit/source/identifier.go | 275 + .../moby/buildkit/source/local/local.go | 279 + .../moby/buildkit/source/manager.go | 49 + .../moby/buildkit/util/apicaps/caps.go | 162 + .../moby/buildkit/util/apicaps/pb/caps.pb.go | 567 + .../moby/buildkit/util/apicaps/pb/caps.proto | 19 + .../moby/buildkit/util/apicaps/pb/generate.go | 3 + .../util/appdefaults/appdefaults_unix.go | 69 + .../util/appdefaults/appdefaults_windows.go | 23 + .../buildkit/util/binfmt_misc/386_binary.go | 8 + .../buildkit/util/binfmt_misc/386_check.go | 7 + .../util/binfmt_misc/386_check_386.go | 7 + .../buildkit/util/binfmt_misc/amd64_binary.go | 8 + .../buildkit/util/binfmt_misc/amd64_check.go | 7 + .../util/binfmt_misc/amd64_check_amd64.go | 7 + .../buildkit/util/binfmt_misc/arm64_binary.go | 8 + .../buildkit/util/binfmt_misc/arm64_check.go | 7 + .../util/binfmt_misc/arm64_check_arm64.go | 7 + .../buildkit/util/binfmt_misc/arm_binary.go | 8 + .../buildkit/util/binfmt_misc/arm_check.go | 7 + .../util/binfmt_misc/arm_check_arm.go | 7 + .../moby/buildkit/util/binfmt_misc/check.go | 42 + .../buildkit/util/binfmt_misc/check_unix.go | 14 + .../util/binfmt_misc/check_windows.go | 10 + .../moby/buildkit/util/binfmt_misc/detect.go | 103 + .../util/binfmt_misc/ppc64le_binary.go | 8 + .../util/binfmt_misc/ppc64le_check.go | 7 + .../util/binfmt_misc/ppc64le_check_ppc64le.go | 7 + .../util/binfmt_misc/riscv64_binary.go | 8 + .../util/binfmt_misc/riscv64_check.go | 7 + .../util/binfmt_misc/riscv64_check_riscv64.go | 7 + .../buildkit/util/binfmt_misc/s390x_binary.go | 8 + .../buildkit/util/binfmt_misc/s390x_check.go | 7 + .../util/binfmt_misc/s390x_check_s390x.go | 7 + .../moby/buildkit/util/cond/cond.go | 40 + .../moby/buildkit/util/contentutil/buffer.go | 156 + .../moby/buildkit/util/contentutil/copy.go | 81 + .../moby/buildkit/util/contentutil/fetcher.go | 73 + .../util/contentutil/multiprovider.go | 48 + .../moby/buildkit/util/contentutil/pusher.go | 58 + .../moby/buildkit/util/contentutil/refs.go | 98 + .../util/entitlements/entitlements.go | 60 + .../util/entitlements/security_linux.go | 67 + .../util/flightcontrol/flightcontrol.go | 339 + .../moby/buildkit/util/imageutil/config.go | 245 + .../moby/buildkit/util/imageutil/schema1.go | 87 + .../moby/buildkit/util/leaseutil/manager.go | 104 + .../moby/buildkit/util/network/host.go | 28 + .../moby/buildkit/util/network/network.go | 19 + .../moby/buildkit/util/network/none.go | 26 + .../moby/buildkit/util/progress/logs/logs.go | 53 + .../buildkit/util/progress/multireader.go | 77 + .../buildkit/util/progress/multiwriter.go | 105 + .../moby/buildkit/util/progress/progress.go | 256 + .../moby/buildkit/util/resolver/resolver.go | 45 + .../util/rootless/specconv/specconv_linux.go | 40 + .../moby/buildkit/util/system/path_unix.go | 14 + .../moby/buildkit/util/system/path_windows.go | 37 + .../buildkit/util/system/seccomp_linux.go | 29 + .../buildkit/util/system/seccomp_nolinux.go | 7 + .../buildkit/util/system/seccomp_noseccomp.go | 7 + .../moby/buildkit/util/throttle/throttle.go | 58 + .../moby/buildkit/util/tracing/multispan.go | 22 + .../moby/buildkit/util/tracing/tracing.go | 109 + .../moby/buildkit/worker/cacheresult.go | 100 + .../github.com/moby/buildkit/worker/filter.go | 33 + .../github.com/moby/buildkit/worker/result.go | 40 + .../github.com/moby/buildkit/worker/worker.go | 45 + .../moby/buildkit/worker/workercontroller.go | 77 + .../vendor/github.com/spf13/cobra/LICENSE.txt | 174 + .../vendor/github.com/spf13/cobra/README.md | 736 + engine/vendor/github.com/spf13/cobra/args.go | 89 + .../spf13/cobra/bash_completions.go | 584 + engine/vendor/github.com/spf13/cobra/cobra.go | 200 + .../vendor/github.com/spf13/cobra/command.go | 1517 + .../github.com/spf13/cobra/command_notwin.go | 5 + .../github.com/spf13/cobra/command_win.go | 20 + .../github.com/spf13/cobra/zsh_completions.go | 126 + engine/vendor/github.com/spf13/pflag/LICENSE | 28 + .../vendor/github.com/spf13/pflag/README.md | 296 + engine/vendor/github.com/spf13/pflag/bool.go | 94 + .../github.com/spf13/pflag/bool_slice.go | 147 + engine/vendor/github.com/spf13/pflag/bytes.go | 105 + engine/vendor/github.com/spf13/pflag/count.go | 96 + .../vendor/github.com/spf13/pflag/duration.go | 86 + .../github.com/spf13/pflag/duration_slice.go | 128 + engine/vendor/github.com/spf13/pflag/flag.go | 1223 + .../vendor/github.com/spf13/pflag/float32.go | 88 + .../vendor/github.com/spf13/pflag/float64.go | 84 + .../github.com/spf13/pflag/golangflag.go | 105 + engine/vendor/github.com/spf13/pflag/int.go | 84 + engine/vendor/github.com/spf13/pflag/int16.go | 88 + engine/vendor/github.com/spf13/pflag/int32.go | 88 + engine/vendor/github.com/spf13/pflag/int64.go | 84 + engine/vendor/github.com/spf13/pflag/int8.go | 88 + .../github.com/spf13/pflag/int_slice.go | 128 + engine/vendor/github.com/spf13/pflag/ip.go | 94 + .../vendor/github.com/spf13/pflag/ip_slice.go | 148 + .../vendor/github.com/spf13/pflag/ipmask.go | 122 + engine/vendor/github.com/spf13/pflag/ipnet.go | 98 + .../vendor/github.com/spf13/pflag/string.go | 80 + .../github.com/spf13/pflag/string_array.go | 103 + .../github.com/spf13/pflag/string_slice.go | 149 + engine/vendor/github.com/spf13/pflag/uint.go | 88 + .../vendor/github.com/spf13/pflag/uint16.go | 88 + .../vendor/github.com/spf13/pflag/uint32.go | 88 + .../vendor/github.com/spf13/pflag/uint64.go | 88 + engine/vendor/github.com/spf13/pflag/uint8.go | 88 + .../github.com/spf13/pflag/uint_slice.go | 126 + .../github.com/tonistiigi/fsutil/LICENSE | 22 + .../tonistiigi/fsutil/chtimes_linux.go | 20 + .../tonistiigi/fsutil/chtimes_nolinux.go | 20 + .../github.com/tonistiigi/fsutil/copy/copy.go | 408 + .../tonistiigi/fsutil/copy/copy_darwin.go | 84 + .../tonistiigi/fsutil/copy/copy_linux.go | 112 + .../tonistiigi/fsutil/copy/copy_nowindows.go | 28 + .../tonistiigi/fsutil/copy/copy_unix.go | 59 + .../tonistiigi/fsutil/copy/copy_windows.go | 48 + .../tonistiigi/fsutil/copy/hardlink.go | 27 + .../tonistiigi/fsutil/copy/hardlink_unix.go | 17 + .../fsutil/copy/hardlink_windows.go | 7 + .../tonistiigi/fsutil/copy/mkdir.go | 74 + .../tonistiigi/fsutil/copy/mkdir_unix.go | 32 + .../tonistiigi/fsutil/copy/mkdir_windows.go | 21 + .../github.com/tonistiigi/fsutil/diff.go | 45 + .../tonistiigi/fsutil/diff_containerd.go | 200 + .../fsutil/diff_containerd_linux.go | 37 + .../tonistiigi/fsutil/diskwriter.go | 352 + .../tonistiigi/fsutil/diskwriter_unix.go | 52 + .../tonistiigi/fsutil/diskwriter_windows.go | 18 + .../tonistiigi/fsutil/followlinks.go | 150 + .../vendor/github.com/tonistiigi/fsutil/fs.go | 118 + .../github.com/tonistiigi/fsutil/go.mod | 28 + .../github.com/tonistiigi/fsutil/hardlinks.go | 47 + .../github.com/tonistiigi/fsutil/readme.md | 45 + .../github.com/tonistiigi/fsutil/receive.go | 276 + .../github.com/tonistiigi/fsutil/send.go | 206 + .../github.com/tonistiigi/fsutil/stat.go | 64 + .../github.com/tonistiigi/fsutil/stat_unix.go | 71 + .../tonistiigi/fsutil/stat_windows.go | 16 + .../github.com/tonistiigi/fsutil/tarwriter.go | 72 + .../tonistiigi/fsutil/types/generate.go | 3 + .../tonistiigi/fsutil/types/stat.pb.go | 909 + .../tonistiigi/fsutil/types/stat.proto | 19 + .../tonistiigi/fsutil/types/wire.pb.go | 542 + .../tonistiigi/fsutil/types/wire.proto | 21 + .../github.com/tonistiigi/fsutil/validator.go | 92 + .../github.com/tonistiigi/fsutil/walker.go | 230 + engine/volume/drivers/adapter.go | 176 + engine/volume/drivers/extpoint.go | 235 + engine/volume/drivers/extpoint_test.go | 24 + engine/volume/drivers/proxy.go | 255 + engine/volume/drivers/proxy_test.go | 132 + engine/volume/local/local.go | 361 + engine/volume/local/local_test.go | 335 + engine/volume/local/local_unix.go | 120 + engine/volume/local/local_windows.go | 46 + engine/volume/mounts/lcow_parser.go | 34 + engine/volume/mounts/linux_parser.go | 423 + engine/volume/mounts/mounts.go | 181 + engine/volume/mounts/parser.go | 47 + engine/volume/mounts/parser_test.go | 530 + engine/volume/mounts/validate.go | 28 + engine/volume/mounts/validate_test.go | 73 + engine/volume/mounts/validate_unix_test.go | 8 + engine/volume/mounts/validate_windows_test.go | 6 + engine/volume/mounts/volume_copy.go | 23 + engine/volume/mounts/volume_unix.go | 18 + engine/volume/mounts/volume_windows.go | 8 + engine/volume/mounts/windows_parser.go | 456 + engine/volume/service/by.go | 89 + engine/volume/service/convert.go | 132 + engine/volume/service/db.go | 95 + engine/volume/service/db_test.go | 52 + engine/volume/service/default_driver.go | 21 + engine/volume/service/default_driver_stubs.go | 10 + engine/volume/service/errors.go | 111 + engine/volume/service/opts/opts.go | 89 + engine/volume/service/restore.go | 85 + engine/volume/service/restore_test.go | 58 + engine/volume/service/service.go | 263 + engine/volume/service/service_linux_test.go | 66 + engine/volume/service/service_test.go | 253 + engine/volume/service/store.go | 858 + engine/volume/service/store_test.go | 421 + engine/volume/service/store_unix.go | 9 + engine/volume/service/store_windows.go | 12 + engine/volume/testutils/testutils.go | 230 + engine/volume/volume.go | 69 + 4861 files changed, 907295 insertions(+) create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 Makefile create mode 100644 README.md create mode 100644 VERSION create mode 100644 cli/.dockerignore create mode 100644 cli/.mailmap create mode 100644 cli/AUTHORS create mode 100644 cli/CONTRIBUTING.md create mode 100644 cli/Jenkinsfile create mode 100644 cli/LICENSE create mode 100644 cli/MAINTAINERS create mode 100644 cli/Makefile create mode 100644 cli/NOTICE create mode 100644 cli/README.md create mode 100644 cli/TESTING.md create mode 100644 cli/VERSION create mode 100644 cli/appveyor.yml create mode 100644 cli/circle.yml create mode 100644 cli/cli-plugins/examples/helloworld/main.go create mode 100644 cli/cli-plugins/manager/candidate.go create mode 100644 cli/cli-plugins/manager/candidate_test.go create mode 100644 cli/cli-plugins/manager/cobra.go create mode 100644 cli/cli-plugins/manager/error.go create mode 100644 cli/cli-plugins/manager/error_test.go create mode 100644 cli/cli-plugins/manager/manager.go create mode 100644 cli/cli-plugins/manager/manager_test.go create mode 100644 cli/cli-plugins/manager/manager_unix.go create mode 100644 cli/cli-plugins/manager/manager_windows.go create mode 100644 cli/cli-plugins/manager/metadata.go create mode 100644 cli/cli-plugins/manager/plugin.go create mode 100644 cli/cli-plugins/manager/suffix_unix.go create mode 100644 cli/cli-plugins/manager/suffix_windows.go create mode 100644 cli/cli-plugins/plugin/plugin.go create mode 100644 cli/cli/cobra.go create mode 100644 cli/cli/cobra_test.go create mode 100644 cli/cli/command/builder/cmd.go create mode 100644 cli/cli/command/builder/prune.go create mode 100644 cli/cli/command/bundlefile/bundlefile.go create mode 100644 cli/cli/command/bundlefile/bundlefile_test.go create mode 100644 cli/cli/command/checkpoint/client_test.go create mode 100644 cli/cli/command/checkpoint/cmd.go create mode 100644 cli/cli/command/checkpoint/create.go create mode 100644 cli/cli/command/checkpoint/create_test.go create mode 100644 cli/cli/command/checkpoint/formatter.go create mode 100644 cli/cli/command/checkpoint/formatter_test.go create mode 100644 cli/cli/command/checkpoint/list.go create mode 100644 cli/cli/command/checkpoint/list_test.go create mode 100644 cli/cli/command/checkpoint/remove.go create mode 100644 cli/cli/command/checkpoint/remove_test.go create mode 100644 cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden create mode 100644 cli/cli/command/cli.go create mode 100644 cli/cli/command/cli_options.go create mode 100644 cli/cli/command/cli_options_test.go create mode 100644 cli/cli/command/cli_test.go create mode 100644 cli/cli/command/commands/commands.go create mode 100644 cli/cli/command/config/client_test.go create mode 100644 cli/cli/command/config/cmd.go create mode 100644 cli/cli/command/config/create.go create mode 100644 cli/cli/command/config/create_test.go create mode 100644 cli/cli/command/config/formatter.go create mode 100644 cli/cli/command/config/formatter_test.go create mode 100644 cli/cli/command/config/inspect.go create mode 100644 cli/cli/command/config/inspect_test.go create mode 100644 cli/cli/command/config/ls.go create mode 100644 cli/cli/command/config/ls_test.go create mode 100644 cli/cli/command/config/remove.go create mode 100644 cli/cli/command/config/remove_test.go create mode 100644 cli/cli/command/config/testdata/config-create-with-name.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-pretty.simple.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden create mode 100644 cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden create mode 100644 cli/cli/command/config/testdata/config-list-sort.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-config-format.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-filter.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-format.golden create mode 100644 cli/cli/command/config/testdata/config-list-with-quiet-option.golden create mode 100644 cli/cli/command/container/attach.go create mode 100644 cli/cli/command/container/attach_test.go create mode 100644 cli/cli/command/container/client_test.go create mode 100644 cli/cli/command/container/cmd.go create mode 100644 cli/cli/command/container/commit.go create mode 100644 cli/cli/command/container/cp.go create mode 100644 cli/cli/command/container/cp_test.go create mode 100644 cli/cli/command/container/create.go create mode 100644 cli/cli/command/container/create_test.go create mode 100644 cli/cli/command/container/diff.go create mode 100644 cli/cli/command/container/exec.go create mode 100644 cli/cli/command/container/exec_test.go create mode 100644 cli/cli/command/container/export.go create mode 100644 cli/cli/command/container/export_test.go create mode 100644 cli/cli/command/container/formatter_diff.go create mode 100644 cli/cli/command/container/formatter_diff_test.go create mode 100644 cli/cli/command/container/formatter_stats.go create mode 100644 cli/cli/command/container/formatter_stats_test.go create mode 100644 cli/cli/command/container/hijack.go create mode 100644 cli/cli/command/container/inspect.go create mode 100644 cli/cli/command/container/kill.go create mode 100644 cli/cli/command/container/list.go create mode 100644 cli/cli/command/container/list_test.go create mode 100644 cli/cli/command/container/logs.go create mode 100644 cli/cli/command/container/logs_test.go create mode 100644 cli/cli/command/container/opts.go create mode 100644 cli/cli/command/container/opts_test.go create mode 100644 cli/cli/command/container/pause.go create mode 100644 cli/cli/command/container/port.go create mode 100644 cli/cli/command/container/prune.go create mode 100644 cli/cli/command/container/ps_test.go create mode 100644 cli/cli/command/container/rename.go create mode 100644 cli/cli/command/container/restart.go create mode 100644 cli/cli/command/container/rm.go create mode 100644 cli/cli/command/container/run.go create mode 100644 cli/cli/command/container/run_test.go create mode 100644 cli/cli/command/container/start.go create mode 100644 cli/cli/command/container/stats.go create mode 100644 cli/cli/command/container/stats_helpers.go create mode 100644 cli/cli/command/container/stats_helpers_test.go create mode 100644 cli/cli/command/container/stats_unit_test.go create mode 100644 cli/cli/command/container/stop.go create mode 100644 cli/cli/command/container/testdata/container-create-localhost-dns-ipv6.golden create mode 100644 cli/cli/command/container/testdata/container-create-localhost-dns.golden create mode 100644 cli/cli/command/container/testdata/container-create-oom-kill-true-without-memory-limit.golden create mode 100644 cli/cli/command/container/testdata/container-create-oom-kill-without-memory-limit.golden create mode 100644 cli/cli/command/container/testdata/container-list-format-name-name.golden create mode 100644 cli/cli/command/container/testdata/container-list-format-with-arg.golden create mode 100644 cli/cli/command/container/testdata/container-list-with-config-format.golden create mode 100644 cli/cli/command/container/testdata/container-list-with-format.golden create mode 100644 cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden create mode 100644 cli/cli/command/container/testdata/container-list-without-format.golden create mode 100755 cli/cli/command/container/testdata/utf16.env create mode 100755 cli/cli/command/container/testdata/utf16be.env create mode 100755 cli/cli/command/container/testdata/utf8.env create mode 100644 cli/cli/command/container/testdata/valid.env create mode 100644 cli/cli/command/container/testdata/valid.label create mode 100644 cli/cli/command/container/top.go create mode 100644 cli/cli/command/container/tty.go create mode 100644 cli/cli/command/container/tty_test.go create mode 100644 cli/cli/command/container/unpause.go create mode 100644 cli/cli/command/container/update.go create mode 100644 cli/cli/command/container/utils.go create mode 100644 cli/cli/command/container/utils_test.go create mode 100644 cli/cli/command/container/wait.go create mode 100644 cli/cli/command/context.go create mode 100644 cli/cli/command/context/cmd.go create mode 100644 cli/cli/command/context/create.go create mode 100644 cli/cli/command/context/create_test.go create mode 100644 cli/cli/command/context/export-import_test.go create mode 100644 cli/cli/command/context/export.go create mode 100644 cli/cli/command/context/import.go create mode 100644 cli/cli/command/context/inspect.go create mode 100644 cli/cli/command/context/inspect_test.go create mode 100644 cli/cli/command/context/list.go create mode 100644 cli/cli/command/context/list_test.go create mode 100644 cli/cli/command/context/options.go create mode 100644 cli/cli/command/context/remove.go create mode 100644 cli/cli/command/context/remove_test.go create mode 100644 cli/cli/command/context/testdata/inspect.golden create mode 100644 cli/cli/command/context/testdata/list.golden create mode 100644 cli/cli/command/context/testdata/quiet-list.golden create mode 100644 cli/cli/command/context/testdata/test-kubeconfig create mode 100644 cli/cli/command/context/update.go create mode 100644 cli/cli/command/context/update_test.go create mode 100644 cli/cli/command/context/use.go create mode 100644 cli/cli/command/context/use_test.go create mode 100644 cli/cli/command/defaultcontextstore.go create mode 100644 cli/cli/command/defaultcontextstore_test.go create mode 100644 cli/cli/command/engine/activate.go create mode 100644 cli/cli/command/engine/activate_test.go create mode 100644 cli/cli/command/engine/activate_unix.go create mode 100644 cli/cli/command/engine/activate_windows.go create mode 100644 cli/cli/command/engine/auth.go create mode 100644 cli/cli/command/engine/check.go create mode 100644 cli/cli/command/engine/check_test.go create mode 100644 cli/cli/command/engine/client_test.go create mode 100644 cli/cli/command/engine/cmd.go create mode 100644 cli/cli/command/engine/cmd_test.go create mode 100644 cli/cli/command/engine/init.go create mode 100644 cli/cli/command/engine/licenses.go create mode 100644 cli/cli/command/engine/licenses_test.go create mode 100644 cli/cli/command/engine/testdata/check-all.golden create mode 100644 cli/cli/command/engine/testdata/check-no-downgrades.golden create mode 100644 cli/cli/command/engine/testdata/check-no-prerelease.golden create mode 100644 cli/cli/command/engine/testdata/check-patches-only.golden create mode 100644 cli/cli/command/engine/testdata/expired-hub-license-display-only.golden create mode 100644 cli/cli/command/engine/testdata/expired-license-display-only.golden create mode 100644 cli/cli/command/engine/update.go create mode 100644 cli/cli/command/engine/update_test.go create mode 100644 cli/cli/command/engine/updates.go create mode 100644 cli/cli/command/engine/updates_test.go create mode 100644 cli/cli/command/events_utils.go create mode 100644 cli/cli/command/formatter/buildcache.go create mode 100644 cli/cli/command/formatter/container.go create mode 100644 cli/cli/command/formatter/container_test.go create mode 100644 cli/cli/command/formatter/context.go create mode 100644 cli/cli/command/formatter/custom.go create mode 100644 cli/cli/command/formatter/custom_test.go create mode 100644 cli/cli/command/formatter/disk_usage.go create mode 100644 cli/cli/command/formatter/disk_usage_test.go create mode 100644 cli/cli/command/formatter/displayutils.go create mode 100644 cli/cli/command/formatter/displayutils_test.go create mode 100644 cli/cli/command/formatter/formatter.go create mode 100644 cli/cli/command/formatter/image.go create mode 100644 cli/cli/command/formatter/image_test.go create mode 100644 cli/cli/command/formatter/reflect.go create mode 100644 cli/cli/command/formatter/reflect_test.go create mode 100644 cli/cli/command/formatter/testdata/container-context-write-special-headers.golden create mode 100644 cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden create mode 100644 cli/cli/command/formatter/testdata/disk-usage-raw-format.golden create mode 100644 cli/cli/command/formatter/volume.go create mode 100644 cli/cli/command/formatter/volume_test.go create mode 100644 cli/cli/command/idresolver/client_test.go create mode 100644 cli/cli/command/idresolver/idresolver.go create mode 100644 cli/cli/command/idresolver/idresolver_test.go create mode 100644 cli/cli/command/image/build.go create mode 100644 cli/cli/command/image/build/context.go create mode 100644 cli/cli/command/image/build/context_test.go create mode 100644 cli/cli/command/image/build/context_unix.go create mode 100644 cli/cli/command/image/build/context_windows.go create mode 100644 cli/cli/command/image/build/dockerignore.go create mode 100644 cli/cli/command/image/build_buildkit.go create mode 100644 cli/cli/command/image/build_session.go create mode 100644 cli/cli/command/image/build_test.go create mode 100644 cli/cli/command/image/client_test.go create mode 100644 cli/cli/command/image/cmd.go create mode 100644 cli/cli/command/image/formatter_history.go create mode 100644 cli/cli/command/image/formatter_history_test.go create mode 100644 cli/cli/command/image/history.go create mode 100644 cli/cli/command/image/history_test.go create mode 100644 cli/cli/command/image/import.go create mode 100644 cli/cli/command/image/import_test.go create mode 100644 cli/cli/command/image/inspect.go create mode 100644 cli/cli/command/image/inspect_test.go create mode 100644 cli/cli/command/image/list.go create mode 100644 cli/cli/command/image/list_test.go create mode 100644 cli/cli/command/image/load.go create mode 100644 cli/cli/command/image/load_test.go create mode 100644 cli/cli/command/image/prune.go create mode 100644 cli/cli/command/image/prune_test.go create mode 100644 cli/cli/command/image/pull.go create mode 100644 cli/cli/command/image/pull_test.go create mode 100644 cli/cli/command/image/push.go create mode 100644 cli/cli/command/image/push_test.go create mode 100644 cli/cli/command/image/remove.go create mode 100644 cli/cli/command/image/remove_test.go create mode 100644 cli/cli/command/image/save.go create mode 100644 cli/cli/command/image/save_test.go create mode 100644 cli/cli/command/image/tag.go create mode 100644 cli/cli/command/image/tag_test.go create mode 100644 cli/cli/command/image/testdata/Dockerfile.test create mode 100644 cli/cli/command/image/testdata/gittar.test create mode 100644 cli/cli/command/image/testdata/history-command-success.non-human.golden create mode 100644 cli/cli/command/image/testdata/history-command-success.quiet-no-trunc.golden create mode 100644 cli/cli/command/image/testdata/history-command-success.quiet.golden create mode 100644 cli/cli/command/image/testdata/history-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/import-command-success.input.txt create mode 100644 cli/cli/command/image/testdata/inspect-command-success.format.golden create mode 100644 cli/cli/command/image/testdata/inspect-command-success.simple-many.golden create mode 100644 cli/cli/command/image/testdata/inspect-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.filters.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.format.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.match-name.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.quiet-format.golden create mode 100644 cli/cli/command/image/testdata/list-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/load-command-success.input-file.golden create mode 100644 cli/cli/command/image/testdata/load-command-success.input.txt create mode 100644 cli/cli/command/image/testdata/load-command-success.json.golden create mode 100644 cli/cli/command/image/testdata/load-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.all.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.force-deleted.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.force-untagged.golden create mode 100644 cli/cli/command/image/testdata/prune-command-success.label-filter.golden create mode 100644 cli/cli/command/image/testdata/pull-command-success.simple-no-tag.golden create mode 100644 cli/cli/command/image/testdata/pull-command-success.simple-quiet.golden create mode 100644 cli/cli/command/image/testdata/pull-command-success.simple.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image Deleted and Untagged.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image Deleted.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image Untagged.golden create mode 100644 cli/cli/command/image/testdata/remove-command-success.Image not found with force option.golden create mode 100644 cli/cli/command/image/testdata/tar.test create mode 100644 cli/cli/command/image/trust.go create mode 100644 cli/cli/command/image/trust_test.go create mode 100644 cli/cli/command/inspect/inspector.go create mode 100644 cli/cli/command/inspect/inspector_test.go create mode 100644 cli/cli/command/manifest/annotate.go create mode 100644 cli/cli/command/manifest/annotate_test.go create mode 100644 cli/cli/command/manifest/client_test.go create mode 100644 cli/cli/command/manifest/cmd.go create mode 100644 cli/cli/command/manifest/create_list.go create mode 100644 cli/cli/command/manifest/create_test.go create mode 100644 cli/cli/command/manifest/inspect.go create mode 100644 cli/cli/command/manifest/inspect_test.go create mode 100644 cli/cli/command/manifest/push.go create mode 100644 cli/cli/command/manifest/push_test.go create mode 100644 cli/cli/command/manifest/testdata/inspect-annotate.golden create mode 100644 cli/cli/command/manifest/testdata/inspect-manifest-list.golden create mode 100644 cli/cli/command/manifest/testdata/inspect-manifest.golden create mode 100644 cli/cli/command/manifest/util.go create mode 100644 cli/cli/command/network/client_test.go create mode 100644 cli/cli/command/network/cmd.go create mode 100644 cli/cli/command/network/connect.go create mode 100644 cli/cli/command/network/connect_test.go create mode 100644 cli/cli/command/network/create.go create mode 100644 cli/cli/command/network/create_test.go create mode 100644 cli/cli/command/network/disconnect.go create mode 100644 cli/cli/command/network/disconnect_test.go create mode 100644 cli/cli/command/network/formatter.go create mode 100644 cli/cli/command/network/formatter_test.go create mode 100644 cli/cli/command/network/inspect.go create mode 100644 cli/cli/command/network/list.go create mode 100644 cli/cli/command/network/list_test.go create mode 100644 cli/cli/command/network/prune.go create mode 100644 cli/cli/command/network/remove.go create mode 100644 cli/cli/command/network/testdata/network-list-sort.golden create mode 100644 cli/cli/command/network/testdata/network-list.golden create mode 100644 cli/cli/command/node/client_test.go create mode 100644 cli/cli/command/node/cmd.go create mode 100644 cli/cli/command/node/demote.go create mode 100644 cli/cli/command/node/demote_test.go create mode 100644 cli/cli/command/node/formatter.go create mode 100644 cli/cli/command/node/formatter_test.go create mode 100644 cli/cli/command/node/inspect.go create mode 100644 cli/cli/command/node/inspect_test.go create mode 100644 cli/cli/command/node/list.go create mode 100644 cli/cli/command/node/list_test.go create mode 100644 cli/cli/command/node/opts.go create mode 100644 cli/cli/command/node/promote.go create mode 100644 cli/cli/command/node/promote_test.go create mode 100644 cli/cli/command/node/ps.go create mode 100644 cli/cli/command/node/ps_test.go create mode 100644 cli/cli/command/node/remove.go create mode 100644 cli/cli/command/node/remove_test.go create mode 100644 cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden create mode 100644 cli/cli/command/node/testdata/node-inspect-pretty.manager.golden create mode 100644 cli/cli/command/node/testdata/node-inspect-pretty.simple.golden create mode 100644 cli/cli/command/node/testdata/node-list-format-flag.golden create mode 100644 cli/cli/command/node/testdata/node-list-format-from-config.golden create mode 100644 cli/cli/command/node/testdata/node-list-sort.golden create mode 100644 cli/cli/command/node/testdata/node-ps.simple.golden create mode 100644 cli/cli/command/node/testdata/node-ps.with-errors.golden create mode 100644 cli/cli/command/node/update.go create mode 100644 cli/cli/command/node/update_test.go create mode 100644 cli/cli/command/orchestrator.go create mode 100644 cli/cli/command/orchestrator_test.go create mode 100644 cli/cli/command/plugin/client_test.go create mode 100644 cli/cli/command/plugin/cmd.go create mode 100644 cli/cli/command/plugin/create.go create mode 100644 cli/cli/command/plugin/create_test.go create mode 100644 cli/cli/command/plugin/disable.go create mode 100644 cli/cli/command/plugin/disable_test.go create mode 100644 cli/cli/command/plugin/enable.go create mode 100644 cli/cli/command/plugin/enable_test.go create mode 100644 cli/cli/command/plugin/formatter.go create mode 100644 cli/cli/command/plugin/formatter_test.go create mode 100644 cli/cli/command/plugin/inspect.go create mode 100644 cli/cli/command/plugin/inspect_test.go create mode 100644 cli/cli/command/plugin/install.go create mode 100644 cli/cli/command/plugin/install_test.go create mode 100644 cli/cli/command/plugin/list.go create mode 100644 cli/cli/command/plugin/list_test.go create mode 100644 cli/cli/command/plugin/push.go create mode 100644 cli/cli/command/plugin/remove.go create mode 100644 cli/cli/command/plugin/remove_test.go create mode 100644 cli/cli/command/plugin/set.go create mode 100644 cli/cli/command/plugin/testdata/plugin-inspect-multiple-with-format.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-inspect-single-with-format.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-inspect-single-without-format.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-list-sort.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-list-with-format.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-list-with-no-trunc-option.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-list-with-quiet-option.golden create mode 100644 cli/cli/command/plugin/testdata/plugin-list-without-format.golden create mode 100644 cli/cli/command/plugin/upgrade.go create mode 100644 cli/cli/command/registry.go create mode 100644 cli/cli/command/registry/formatter_search.go create mode 100644 cli/cli/command/registry/formatter_search_test.go create mode 100644 cli/cli/command/registry/login.go create mode 100644 cli/cli/command/registry/login_test.go create mode 100644 cli/cli/command/registry/logout.go create mode 100644 cli/cli/command/registry/search.go create mode 100644 cli/cli/command/registry/testdata/search-context-write-stars-table.golden create mode 100644 cli/cli/command/registry/testdata/search-context-write-table.golden create mode 100644 cli/cli/command/registry_test.go create mode 100644 cli/cli/command/secret/client_test.go create mode 100644 cli/cli/command/secret/cmd.go create mode 100644 cli/cli/command/secret/create.go create mode 100644 cli/cli/command/secret/create_test.go create mode 100644 cli/cli/command/secret/formatter.go create mode 100644 cli/cli/command/secret/formatter_test.go create mode 100644 cli/cli/command/secret/inspect.go create mode 100644 cli/cli/command/secret/inspect_test.go create mode 100644 cli/cli/command/secret/ls.go create mode 100644 cli/cli/command/secret/ls_test.go create mode 100644 cli/cli/command/secret/remove.go create mode 100644 cli/cli/command/secret/remove_test.go create mode 100644 cli/cli/command/secret/testdata/secret-create-with-name.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden create mode 100644 cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-sort.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-config-format.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-filter.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-format.golden create mode 100644 cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden create mode 100644 cli/cli/command/service/client_test.go create mode 100644 cli/cli/command/service/cmd.go create mode 100644 cli/cli/command/service/create.go create mode 100644 cli/cli/command/service/create_test.go create mode 100644 cli/cli/command/service/formatter.go create mode 100644 cli/cli/command/service/formatter_test.go create mode 100644 cli/cli/command/service/generic_resource_opts.go create mode 100644 cli/cli/command/service/generic_resource_opts_test.go create mode 100644 cli/cli/command/service/helpers.go create mode 100644 cli/cli/command/service/inspect.go create mode 100644 cli/cli/command/service/inspect_test.go create mode 100644 cli/cli/command/service/list.go create mode 100644 cli/cli/command/service/list_test.go create mode 100644 cli/cli/command/service/logs.go create mode 100644 cli/cli/command/service/opts.go create mode 100644 cli/cli/command/service/opts_test.go create mode 100644 cli/cli/command/service/parse.go create mode 100644 cli/cli/command/service/progress/progress.go create mode 100644 cli/cli/command/service/progress/progress_test.go create mode 100644 cli/cli/command/service/ps.go create mode 100644 cli/cli/command/service/ps_test.go create mode 100644 cli/cli/command/service/remove.go create mode 100644 cli/cli/command/service/rollback.go create mode 100644 cli/cli/command/service/rollback_test.go create mode 100644 cli/cli/command/service/scale.go create mode 100644 cli/cli/command/service/testdata/service-context-write-raw.golden create mode 100644 cli/cli/command/service/testdata/service-list-sort.golden create mode 100644 cli/cli/command/service/trust.go create mode 100644 cli/cli/command/service/update.go create mode 100644 cli/cli/command/service/update_test.go create mode 100644 cli/cli/command/stack/client_test.go create mode 100644 cli/cli/command/stack/cmd.go create mode 100644 cli/cli/command/stack/common.go create mode 100644 cli/cli/command/stack/deploy.go create mode 100644 cli/cli/command/stack/deploy_test.go create mode 100644 cli/cli/command/stack/formatter/formatter.go create mode 100644 cli/cli/command/stack/formatter/formatter_test.go create mode 100644 cli/cli/command/stack/kubernetes/cli.go create mode 100644 cli/cli/command/stack/kubernetes/client.go create mode 100644 cli/cli/command/stack/kubernetes/conversion.go create mode 100644 cli/cli/command/stack/kubernetes/conversion_test.go create mode 100644 cli/cli/command/stack/kubernetes/convert.go create mode 100644 cli/cli/command/stack/kubernetes/convert_test.go create mode 100644 cli/cli/command/stack/kubernetes/deploy.go create mode 100644 cli/cli/command/stack/kubernetes/deploy_test.go create mode 100644 cli/cli/command/stack/kubernetes/list.go create mode 100644 cli/cli/command/stack/kubernetes/ps.go create mode 100644 cli/cli/command/stack/kubernetes/remove.go create mode 100644 cli/cli/command/stack/kubernetes/services.go create mode 100644 cli/cli/command/stack/kubernetes/services_test.go create mode 100644 cli/cli/command/stack/kubernetes/stack.go create mode 100644 cli/cli/command/stack/kubernetes/stackclient.go create mode 100644 cli/cli/command/stack/kubernetes/stackclient_test.go create mode 100644 cli/cli/command/stack/kubernetes/testdata/compose-with-expose.yml create mode 100644 cli/cli/command/stack/kubernetes/testdata/compose-with-pull-policy.yml create mode 100644 cli/cli/command/stack/kubernetes/testdata/compose-with-pull-secret.yml create mode 100644 cli/cli/command/stack/kubernetes/testdata/config create mode 100644 cli/cli/command/stack/kubernetes/testdata/secret create mode 100644 cli/cli/command/stack/kubernetes/testdata/warnings.golden create mode 100644 cli/cli/command/stack/kubernetes/warnings.go create mode 100644 cli/cli/command/stack/kubernetes/warnings_test.go create mode 100644 cli/cli/command/stack/kubernetes/watcher.go create mode 100644 cli/cli/command/stack/kubernetes/watcher_test.go create mode 100644 cli/cli/command/stack/list.go create mode 100644 cli/cli/command/stack/list_test.go create mode 100644 cli/cli/command/stack/loader/loader.go create mode 100644 cli/cli/command/stack/loader/loader_test.go create mode 100644 cli/cli/command/stack/options/opts.go create mode 100644 cli/cli/command/stack/ps.go create mode 100644 cli/cli/command/stack/ps_test.go create mode 100644 cli/cli/command/stack/remove.go create mode 100644 cli/cli/command/stack/remove_test.go create mode 100644 cli/cli/command/stack/services.go create mode 100644 cli/cli/command/stack/services_test.go create mode 100644 cli/cli/command/stack/swarm/client_test.go create mode 100644 cli/cli/command/stack/swarm/common.go create mode 100644 cli/cli/command/stack/swarm/deploy.go create mode 100644 cli/cli/command/stack/swarm/deploy_bundlefile.go create mode 100644 cli/cli/command/stack/swarm/deploy_bundlefile_test.go create mode 100644 cli/cli/command/stack/swarm/deploy_composefile.go create mode 100644 cli/cli/command/stack/swarm/deploy_composefile_test.go create mode 100644 cli/cli/command/stack/swarm/deploy_test.go create mode 100644 cli/cli/command/stack/swarm/list.go create mode 100644 cli/cli/command/stack/swarm/ps.go create mode 100644 cli/cli/command/stack/swarm/remove.go create mode 100644 cli/cli/command/stack/swarm/services.go create mode 100644 cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab create mode 100644 cli/cli/command/stack/testdata/stack-list-sort-natural.golden create mode 100644 cli/cli/command/stack/testdata/stack-list-sort.golden create mode 100644 cli/cli/command/stack/testdata/stack-list-with-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-list-without-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-config-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-ps-without-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-with-config-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-with-format.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden create mode 100644 cli/cli/command/stack/testdata/stack-services-without-format.golden create mode 100644 cli/cli/command/streams.go create mode 100644 cli/cli/command/swarm/ca.go create mode 100644 cli/cli/command/swarm/ca_test.go create mode 100644 cli/cli/command/swarm/client_test.go create mode 100644 cli/cli/command/swarm/cmd.go create mode 100644 cli/cli/command/swarm/init.go create mode 100644 cli/cli/command/swarm/init_test.go create mode 100644 cli/cli/command/swarm/join.go create mode 100644 cli/cli/command/swarm/join_test.go create mode 100644 cli/cli/command/swarm/join_token.go create mode 100644 cli/cli/command/swarm/join_token_test.go create mode 100644 cli/cli/command/swarm/leave.go create mode 100644 cli/cli/command/swarm/leave_test.go create mode 100644 cli/cli/command/swarm/opts.go create mode 100644 cli/cli/command/swarm/opts_test.go create mode 100644 cli/cli/command/swarm/progress/root_rotation.go create mode 100644 cli/cli/command/swarm/testdata/init-init-autolock.golden create mode 100644 cli/cli/command/swarm/testdata/init-init.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-manager.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/jointoken-worker.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden create mode 100644 cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden create mode 100644 cli/cli/command/swarm/testdata/update-all-flags-quiet.golden create mode 100644 cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden create mode 100644 cli/cli/command/swarm/testdata/update-noargs.golden create mode 100644 cli/cli/command/swarm/unlock.go create mode 100644 cli/cli/command/swarm/unlock_key.go create mode 100644 cli/cli/command/swarm/unlock_key_test.go create mode 100644 cli/cli/command/swarm/unlock_test.go create mode 100644 cli/cli/command/swarm/update.go create mode 100644 cli/cli/command/swarm/update_test.go create mode 100644 cli/cli/command/system/client_test.go create mode 100644 cli/cli/command/system/cmd.go create mode 100644 cli/cli/command/system/df.go create mode 100644 cli/cli/command/system/dial_stdio.go create mode 100644 cli/cli/command/system/events.go create mode 100644 cli/cli/command/system/info.go create mode 100644 cli/cli/command/system/info_test.go create mode 100644 cli/cli/command/system/inspect.go create mode 100644 cli/cli/command/system/prune.go create mode 100644 cli/cli/command/system/prune_test.go create mode 100644 cli/cli/command/system/testdata/docker-client-version.golden create mode 100644 cli/cli/command/system/testdata/docker-info-badsec.golden create mode 100644 cli/cli/command/system/testdata/docker-info-badsec.json.golden create mode 100644 cli/cli/command/system/testdata/docker-info-daemon-warnings.json.golden create mode 100644 cli/cli/command/system/testdata/docker-info-errors.golden create mode 100644 cli/cli/command/system/testdata/docker-info-errors.json.golden create mode 100644 cli/cli/command/system/testdata/docker-info-legacy-warnings.json.golden create mode 100644 cli/cli/command/system/testdata/docker-info-no-swarm.golden create mode 100644 cli/cli/command/system/testdata/docker-info-no-swarm.json.golden create mode 100644 cli/cli/command/system/testdata/docker-info-plugins-warnings.golden create mode 100644 cli/cli/command/system/testdata/docker-info-plugins.golden create mode 100644 cli/cli/command/system/testdata/docker-info-plugins.json.golden create mode 100644 cli/cli/command/system/testdata/docker-info-warnings.golden create mode 100644 cli/cli/command/system/testdata/docker-info-with-swarm.golden create mode 100644 cli/cli/command/system/testdata/docker-info-with-swarm.json.golden create mode 100644 cli/cli/command/system/version.go create mode 100644 cli/cli/command/system/version_test.go create mode 100644 cli/cli/command/task/client_test.go create mode 100644 cli/cli/command/task/formatter.go create mode 100644 cli/cli/command/task/formatter_test.go create mode 100644 cli/cli/command/task/print.go create mode 100644 cli/cli/command/task/print_test.go create mode 100644 cli/cli/command/task/testdata/task-context-write-table-custom.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-global-service.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-indentation.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-quiet-option.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-replicated-service.golden create mode 100644 cli/cli/command/task/testdata/task-print-with-resolution.golden create mode 100644 cli/cli/command/testdata/ca.pem create mode 100644 cli/cli/command/trust.go create mode 100644 cli/cli/command/trust/cmd.go create mode 100644 cli/cli/command/trust/common.go create mode 100644 cli/cli/command/trust/common_test.go create mode 100644 cli/cli/command/trust/formatter.go create mode 100644 cli/cli/command/trust/formatter_test.go create mode 100644 cli/cli/command/trust/helpers.go create mode 100644 cli/cli/command/trust/helpers_test.go create mode 100644 cli/cli/command/trust/inspect.go create mode 100644 cli/cli/command/trust/inspect_pretty.go create mode 100644 cli/cli/command/trust/inspect_pretty_test.go create mode 100644 cli/cli/command/trust/inspect_test.go create mode 100644 cli/cli/command/trust/key.go create mode 100644 cli/cli/command/trust/key_generate.go create mode 100644 cli/cli/command/trust/key_generate_test.go create mode 100644 cli/cli/command/trust/key_load.go create mode 100644 cli/cli/command/trust/key_load_test.go create mode 100644 cli/cli/command/trust/revoke.go create mode 100644 cli/cli/command/trust/revoke_test.go create mode 100644 cli/cli/command/trust/sign.go create mode 100644 cli/cli/command/trust/sign_test.go create mode 100644 cli/cli/command/trust/signer.go create mode 100644 cli/cli/command/trust/signer_add.go create mode 100644 cli/cli/command/trust/signer_add_test.go create mode 100644 cli/cli/command/trust/signer_remove.go create mode 100644 cli/cli/command/trust/signer_remove_test.go create mode 100644 cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden create mode 100644 cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden create mode 100644 cli/cli/command/utils.go create mode 100644 cli/cli/command/utils_test.go create mode 100644 cli/cli/command/volume/client_test.go create mode 100644 cli/cli/command/volume/cmd.go create mode 100644 cli/cli/command/volume/create.go create mode 100644 cli/cli/command/volume/create_test.go create mode 100644 cli/cli/command/volume/inspect.go create mode 100644 cli/cli/command/volume/inspect_test.go create mode 100644 cli/cli/command/volume/list.go create mode 100644 cli/cli/command/volume/list_test.go create mode 100644 cli/cli/command/volume/prune.go create mode 100644 cli/cli/command/volume/prune_test.go create mode 100644 cli/cli/command/volume/remove.go create mode 100644 cli/cli/command/volume/remove_test.go create mode 100644 cli/cli/command/volume/testdata/volume-inspect-with-format.json-template.golden create mode 100644 cli/cli/command/volume/testdata/volume-inspect-with-format.simple-template.golden create mode 100644 cli/cli/command/volume/testdata/volume-inspect-without-format.multiple-volume-with-labels.golden create mode 100644 cli/cli/command/volume/testdata/volume-inspect-without-format.single-volume.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-sort.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-with-config-format.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-with-format.golden create mode 100644 cli/cli/command/volume/testdata/volume-list-without-format.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune-no.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune-yes.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune.deletedVolumes.golden create mode 100644 cli/cli/command/volume/testdata/volume-prune.empty.golden create mode 100644 cli/cli/compose/convert/compose.go create mode 100644 cli/cli/compose/convert/compose_test.go create mode 100644 cli/cli/compose/convert/service.go create mode 100644 cli/cli/compose/convert/service_test.go create mode 100644 cli/cli/compose/convert/volume.go create mode 100644 cli/cli/compose/convert/volume_test.go create mode 100644 cli/cli/compose/interpolation/interpolation.go create mode 100644 cli/cli/compose/interpolation/interpolation_test.go create mode 100644 cli/cli/compose/loader/example1.env create mode 100644 cli/cli/compose/loader/example2.env create mode 100644 cli/cli/compose/loader/full-example.yml create mode 100644 cli/cli/compose/loader/full-struct_test.go create mode 100644 cli/cli/compose/loader/interpolate.go create mode 100644 cli/cli/compose/loader/loader.go create mode 100644 cli/cli/compose/loader/loader_test.go create mode 100644 cli/cli/compose/loader/merge.go create mode 100644 cli/cli/compose/loader/merge_test.go create mode 100644 cli/cli/compose/loader/types_test.go create mode 100644 cli/cli/compose/loader/volume.go create mode 100644 cli/cli/compose/loader/volume_test.go create mode 100644 cli/cli/compose/loader/windows_path.go create mode 100644 cli/cli/compose/loader/windows_path_test.go create mode 100644 cli/cli/compose/schema/bindata.go create mode 100644 cli/cli/compose/schema/data/config_schema_v3.0.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.1.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.2.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.3.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.4.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.5.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.6.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.7.json create mode 100644 cli/cli/compose/schema/data/config_schema_v3.8.json create mode 100644 cli/cli/compose/schema/data/doc.go create mode 100644 cli/cli/compose/schema/schema.go create mode 100644 cli/cli/compose/schema/schema_test.go create mode 100644 cli/cli/compose/template/template.go create mode 100644 cli/cli/compose/template/template_test.go create mode 100644 cli/cli/compose/types/types.go create mode 100644 cli/cli/config/config.go create mode 100644 cli/cli/config/config_test.go create mode 100644 cli/cli/config/configfile/file.go create mode 100644 cli/cli/config/configfile/file_test.go create mode 100644 cli/cli/config/configfile/testdata/plugin-config-2.golden create mode 100644 cli/cli/config/configfile/testdata/plugin-config.golden create mode 100644 cli/cli/config/credentials/credentials.go create mode 100644 cli/cli/config/credentials/default_store.go create mode 100644 cli/cli/config/credentials/default_store_darwin.go create mode 100644 cli/cli/config/credentials/default_store_linux.go create mode 100644 cli/cli/config/credentials/default_store_unsupported.go create mode 100644 cli/cli/config/credentials/default_store_windows.go create mode 100644 cli/cli/config/credentials/file_store.go create mode 100644 cli/cli/config/credentials/file_store_test.go create mode 100644 cli/cli/config/credentials/native_store.go create mode 100644 cli/cli/config/credentials/native_store_test.go create mode 100644 cli/cli/config/types/authconfig.go create mode 100644 cli/cli/connhelper/commandconn/commandconn.go create mode 100644 cli/cli/connhelper/commandconn/commandconn_unix_test.go create mode 100644 cli/cli/connhelper/commandconn/pdeathsig_linux.go create mode 100644 cli/cli/connhelper/commandconn/pdeathsig_nolinux.go create mode 100644 cli/cli/connhelper/commandconn/session_unix.go create mode 100644 cli/cli/connhelper/commandconn/session_windows.go create mode 100644 cli/cli/connhelper/connhelper.go create mode 100644 cli/cli/connhelper/ssh/ssh.go create mode 100644 cli/cli/connhelper/ssh/ssh_test.go create mode 100644 cli/cli/context/docker/constants.go create mode 100644 cli/cli/context/docker/load.go create mode 100644 cli/cli/context/endpoint.go create mode 100644 cli/cli/context/kubernetes/constants.go create mode 100644 cli/cli/context/kubernetes/endpoint_test.go create mode 100644 cli/cli/context/kubernetes/load.go create mode 100644 cli/cli/context/kubernetes/load_test.go create mode 100644 cli/cli/context/kubernetes/save.go create mode 100644 cli/cli/context/kubernetes/testdata/eks-kubeconfig create mode 100644 cli/cli/context/kubernetes/testdata/gke-kubeconfig create mode 100644 cli/cli/context/kubernetes/testdata/test-kubeconfig create mode 100644 cli/cli/context/store/doc.go create mode 100644 cli/cli/context/store/io_utils.go create mode 100644 cli/cli/context/store/io_utils_test.go create mode 100644 cli/cli/context/store/metadata_test.go create mode 100644 cli/cli/context/store/metadatastore.go create mode 100644 cli/cli/context/store/store.go create mode 100644 cli/cli/context/store/store_test.go create mode 100644 cli/cli/context/store/storeconfig.go create mode 100644 cli/cli/context/store/storeconfig_test.go create mode 100644 cli/cli/context/store/tlsstore.go create mode 100644 cli/cli/context/store/tlsstore_test.go create mode 100644 cli/cli/context/tlsdata.go create mode 100644 cli/cli/debug/debug.go create mode 100644 cli/cli/debug/debug_test.go create mode 100644 cli/cli/error.go create mode 100644 cli/cli/flags/client.go create mode 100644 cli/cli/flags/common.go create mode 100644 cli/cli/flags/common_test.go create mode 100644 cli/cli/manifest/store/store.go create mode 100644 cli/cli/manifest/store/store_test.go create mode 100644 cli/cli/manifest/types/types.go create mode 100644 cli/cli/registry/client/client.go create mode 100644 cli/cli/registry/client/endpoint.go create mode 100644 cli/cli/registry/client/fetcher.go create mode 100644 cli/cli/required.go create mode 100644 cli/cli/required_test.go create mode 100644 cli/cli/streams/in.go create mode 100644 cli/cli/streams/out.go create mode 100644 cli/cli/streams/stream.go create mode 100644 cli/cli/trust/trust.go create mode 100644 cli/cli/trust/trust_test.go create mode 100644 cli/cli/version/version.go create mode 100644 cli/cli/winresources/res_windows.go create mode 100644 cli/cmd/docker/docker.go create mode 100644 cli/cmd/docker/docker_test.go create mode 100644 cli/cmd/docker/docker_windows.go create mode 100644 cli/codecov.yml create mode 100644 cli/contrib/completion/bash/docker create mode 100644 cli/contrib/completion/fish/docker.fish create mode 100644 cli/contrib/completion/powershell/readme.txt create mode 100644 cli/contrib/completion/zsh/REVIEWERS create mode 100644 cli/contrib/completion/zsh/_docker create mode 100644 cli/docker.Makefile create mode 100644 cli/dockerfiles/Dockerfile.binary-native create mode 100644 cli/dockerfiles/Dockerfile.cross create mode 100644 cli/dockerfiles/Dockerfile.dev create mode 100644 cli/dockerfiles/Dockerfile.e2e create mode 100644 cli/dockerfiles/Dockerfile.lint create mode 100644 cli/dockerfiles/Dockerfile.shellcheck create mode 100644 cli/docs/README.md create mode 100644 cli/docs/deprecated.md create mode 100644 cli/docs/extend/EBS_volume.md create mode 100644 cli/docs/extend/cli_plugins.md create mode 100644 cli/docs/extend/config.md create mode 100644 cli/docs/extend/images/authz_additional_info.png create mode 100644 cli/docs/extend/images/authz_allow.png create mode 100644 cli/docs/extend/images/authz_chunked.png create mode 100644 cli/docs/extend/images/authz_connection_hijack.png create mode 100644 cli/docs/extend/images/authz_deny.png create mode 100644 cli/docs/extend/index.md create mode 100644 cli/docs/extend/legacy_plugins.md create mode 100644 cli/docs/extend/plugin_api.md create mode 100644 cli/docs/extend/plugins_authorization.md create mode 100644 cli/docs/extend/plugins_graphdriver.md create mode 100644 cli/docs/extend/plugins_logging.md create mode 100644 cli/docs/extend/plugins_metrics.md create mode 100644 cli/docs/extend/plugins_network.md create mode 100644 cli/docs/extend/plugins_services.md create mode 100644 cli/docs/extend/plugins_volume.md create mode 100644 cli/docs/reference/builder.md create mode 100644 cli/docs/reference/commandline/attach.md create mode 100644 cli/docs/reference/commandline/build.md create mode 100644 cli/docs/reference/commandline/cli.md create mode 100644 cli/docs/reference/commandline/commit.md create mode 100644 cli/docs/reference/commandline/container.md create mode 100644 cli/docs/reference/commandline/container_prune.md create mode 100644 cli/docs/reference/commandline/context_create.md create mode 100644 cli/docs/reference/commandline/context_export.md create mode 100644 cli/docs/reference/commandline/context_import.md create mode 100644 cli/docs/reference/commandline/context_inspect.md create mode 100644 cli/docs/reference/commandline/context_ls.md create mode 100644 cli/docs/reference/commandline/context_rm.md create mode 100644 cli/docs/reference/commandline/context_update.md create mode 100644 cli/docs/reference/commandline/context_use.md create mode 100644 cli/docs/reference/commandline/cp.md create mode 100644 cli/docs/reference/commandline/create.md create mode 100644 cli/docs/reference/commandline/deploy.md create mode 100644 cli/docs/reference/commandline/diff.md create mode 100644 cli/docs/reference/commandline/dockerd.md create mode 100644 cli/docs/reference/commandline/events.md create mode 100644 cli/docs/reference/commandline/exec.md create mode 100644 cli/docs/reference/commandline/export.md create mode 100644 cli/docs/reference/commandline/history.md create mode 100644 cli/docs/reference/commandline/image.md create mode 100644 cli/docs/reference/commandline/image_prune.md create mode 100644 cli/docs/reference/commandline/images.md create mode 100644 cli/docs/reference/commandline/import.md create mode 100644 cli/docs/reference/commandline/index.md create mode 100644 cli/docs/reference/commandline/info.md create mode 100644 cli/docs/reference/commandline/inspect.md create mode 100644 cli/docs/reference/commandline/kill.md create mode 100644 cli/docs/reference/commandline/load.md create mode 100644 cli/docs/reference/commandline/login.md create mode 100644 cli/docs/reference/commandline/logout.md create mode 100644 cli/docs/reference/commandline/logs.md create mode 100644 cli/docs/reference/commandline/manifest.md create mode 100644 cli/docs/reference/commandline/network.md create mode 100644 cli/docs/reference/commandline/network_connect.md create mode 100644 cli/docs/reference/commandline/network_create.md create mode 100644 cli/docs/reference/commandline/network_disconnect.md create mode 100644 cli/docs/reference/commandline/network_inspect.md create mode 100644 cli/docs/reference/commandline/network_ls.md create mode 100644 cli/docs/reference/commandline/network_prune.md create mode 100644 cli/docs/reference/commandline/network_rm.md create mode 100644 cli/docs/reference/commandline/node.md create mode 100644 cli/docs/reference/commandline/node_demote.md create mode 100644 cli/docs/reference/commandline/node_inspect.md create mode 100644 cli/docs/reference/commandline/node_ls.md create mode 100644 cli/docs/reference/commandline/node_promote.md create mode 100644 cli/docs/reference/commandline/node_ps.md create mode 100644 cli/docs/reference/commandline/node_rm.md create mode 100644 cli/docs/reference/commandline/node_update.md create mode 100644 cli/docs/reference/commandline/pause.md create mode 100644 cli/docs/reference/commandline/plugin.md create mode 100644 cli/docs/reference/commandline/plugin_create.md create mode 100644 cli/docs/reference/commandline/plugin_disable.md create mode 100644 cli/docs/reference/commandline/plugin_enable.md create mode 100644 cli/docs/reference/commandline/plugin_inspect.md create mode 100644 cli/docs/reference/commandline/plugin_install.md create mode 100644 cli/docs/reference/commandline/plugin_ls.md create mode 100644 cli/docs/reference/commandline/plugin_push.md create mode 100644 cli/docs/reference/commandline/plugin_rm.md create mode 100644 cli/docs/reference/commandline/plugin_set.md create mode 100644 cli/docs/reference/commandline/plugin_upgrade.md create mode 100644 cli/docs/reference/commandline/port.md create mode 100644 cli/docs/reference/commandline/ps.md create mode 100644 cli/docs/reference/commandline/pull.md create mode 100644 cli/docs/reference/commandline/push.md create mode 100644 cli/docs/reference/commandline/rename.md create mode 100644 cli/docs/reference/commandline/restart.md create mode 100644 cli/docs/reference/commandline/rm.md create mode 100644 cli/docs/reference/commandline/rmi.md create mode 100644 cli/docs/reference/commandline/run.md create mode 100644 cli/docs/reference/commandline/save.md create mode 100644 cli/docs/reference/commandline/search.md create mode 100644 cli/docs/reference/commandline/secret.md create mode 100644 cli/docs/reference/commandline/secret_create.md create mode 100644 cli/docs/reference/commandline/secret_inspect.md create mode 100644 cli/docs/reference/commandline/secret_ls.md create mode 100644 cli/docs/reference/commandline/secret_rm.md create mode 100644 cli/docs/reference/commandline/service.md create mode 100644 cli/docs/reference/commandline/service_create.md create mode 100644 cli/docs/reference/commandline/service_inspect.md create mode 100644 cli/docs/reference/commandline/service_logs.md create mode 100644 cli/docs/reference/commandline/service_ls.md create mode 100644 cli/docs/reference/commandline/service_ps.md create mode 100644 cli/docs/reference/commandline/service_rm.md create mode 100644 cli/docs/reference/commandline/service_rollback.md create mode 100644 cli/docs/reference/commandline/service_scale.md create mode 100644 cli/docs/reference/commandline/service_update.md create mode 100644 cli/docs/reference/commandline/stack.md create mode 100644 cli/docs/reference/commandline/stack_deploy.md create mode 100644 cli/docs/reference/commandline/stack_ls.md create mode 100644 cli/docs/reference/commandline/stack_ps.md create mode 100644 cli/docs/reference/commandline/stack_rm.md create mode 100644 cli/docs/reference/commandline/stack_services.md create mode 100644 cli/docs/reference/commandline/start.md create mode 100644 cli/docs/reference/commandline/stats.md create mode 100644 cli/docs/reference/commandline/stop.md create mode 100644 cli/docs/reference/commandline/swarm.md create mode 100644 cli/docs/reference/commandline/swarm_ca.md create mode 100644 cli/docs/reference/commandline/swarm_init.md create mode 100644 cli/docs/reference/commandline/swarm_join.md create mode 100644 cli/docs/reference/commandline/swarm_join_token.md create mode 100644 cli/docs/reference/commandline/swarm_leave.md create mode 100644 cli/docs/reference/commandline/swarm_unlock.md create mode 100644 cli/docs/reference/commandline/swarm_unlock_key.md create mode 100644 cli/docs/reference/commandline/swarm_update.md create mode 100644 cli/docs/reference/commandline/system.md create mode 100644 cli/docs/reference/commandline/system_df.md create mode 100644 cli/docs/reference/commandline/system_events.md create mode 100644 cli/docs/reference/commandline/system_prune.md create mode 100644 cli/docs/reference/commandline/tag.md create mode 100644 cli/docs/reference/commandline/top.md create mode 100644 cli/docs/reference/commandline/trust_inspect.md create mode 100644 cli/docs/reference/commandline/trust_key_generate.md create mode 100644 cli/docs/reference/commandline/trust_key_load.md create mode 100644 cli/docs/reference/commandline/trust_revoke.md create mode 100644 cli/docs/reference/commandline/trust_sign.md create mode 100644 cli/docs/reference/commandline/trust_signer_add.md create mode 100644 cli/docs/reference/commandline/trust_signer_remove.md create mode 100644 cli/docs/reference/commandline/unpause.md create mode 100644 cli/docs/reference/commandline/update.md create mode 100644 cli/docs/reference/commandline/version.md create mode 100644 cli/docs/reference/commandline/volume.md create mode 100644 cli/docs/reference/commandline/volume_create.md create mode 100644 cli/docs/reference/commandline/volume_inspect.md create mode 100644 cli/docs/reference/commandline/volume_ls.md create mode 100644 cli/docs/reference/commandline/volume_prune.md create mode 100644 cli/docs/reference/commandline/volume_rm.md create mode 100644 cli/docs/reference/commandline/wait.md create mode 100644 cli/docs/reference/glossary.md create mode 100644 cli/docs/reference/index.md create mode 100644 cli/docs/reference/run.md create mode 100644 cli/docs/yaml/Dockerfile create mode 100644 cli/docs/yaml/generate.go create mode 100644 cli/docs/yaml/yaml.go create mode 100644 cli/e2e/cli-plugins/config_test.go create mode 100644 cli/e2e/cli-plugins/dial_test.go create mode 100644 cli/e2e/cli-plugins/flags_test.go create mode 100644 cli/e2e/cli-plugins/help_test.go create mode 100644 cli/e2e/cli-plugins/main_test.go create mode 100644 cli/e2e/cli-plugins/plugins/badmeta/main.go create mode 100644 cli/e2e/cli-plugins/plugins/nopersistentprerun/main.go create mode 100644 cli/e2e/cli-plugins/run_test.go create mode 100644 cli/e2e/cli-plugins/testdata/docker-badmeta-err.golden create mode 100644 cli/e2e/cli-plugins/testdata/docker-help-badmeta-err.golden create mode 100644 cli/e2e/cli-plugins/testdata/docker-help-helloworld-goodbye.golden create mode 100644 cli/e2e/cli-plugins/testdata/docker-help-helloworld.golden create mode 100644 cli/e2e/cli-plugins/testdata/docker-help-nonexistent-err.golden create mode 100644 cli/e2e/cli-plugins/testdata/docker-nonexistent-err.golden create mode 100644 cli/e2e/cli-plugins/util_test.go create mode 100644 cli/e2e/compose-env.connhelper-ssh.yaml create mode 100644 cli/e2e/compose-env.experimental.yaml create mode 100644 cli/e2e/compose-env.yaml create mode 100644 cli/e2e/container/attach_test.go create mode 100644 cli/e2e/container/create_test.go create mode 100644 cli/e2e/container/kill_test.go create mode 100644 cli/e2e/container/main_test.go create mode 100644 cli/e2e/container/run_test.go create mode 100644 cli/e2e/container/testdata/run-attached-from-remote-and-remove.golden create mode 100644 cli/e2e/context/context_test.go create mode 100644 cli/e2e/context/main_test.go create mode 100644 cli/e2e/context/testdata/context-ls.golden create mode 100644 cli/e2e/context/testdata/test-dockerconfig/config.json create mode 100644 cli/e2e/context/testdata/test-dockerconfig/contexts/meta/b71199ebd070b36beab7317920c2c2f1d777df8d05e5527d8458fda57cb17a7a/meta.json create mode 100644 cli/e2e/context/testdata/test-kubeconfig create mode 100644 cli/e2e/image/build_test.go create mode 100644 cli/e2e/image/main_test.go create mode 100644 cli/e2e/image/pull_test.go create mode 100644 cli/e2e/image/push_test.go create mode 100644 cli/e2e/image/testdata/notary/delgkey1.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey1.key create mode 100644 cli/e2e/image/testdata/notary/delgkey2.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey2.key create mode 100644 cli/e2e/image/testdata/notary/delgkey3.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey3.key create mode 100644 cli/e2e/image/testdata/notary/delgkey4.crt create mode 100644 cli/e2e/image/testdata/notary/delgkey4.key create mode 100755 cli/e2e/image/testdata/notary/gen.sh create mode 100644 cli/e2e/image/testdata/notary/localhost.cert create mode 100644 cli/e2e/image/testdata/notary/localhost.key create mode 100644 cli/e2e/image/testdata/pull-with-content-trust-err.golden create mode 100644 cli/e2e/image/testdata/pull-with-content-trust.golden create mode 100644 cli/e2e/image/testdata/push-with-content-trust-err.golden create mode 100644 cli/e2e/internal/fixtures/fixtures.go create mode 100644 cli/e2e/plugin/basic/basic.go create mode 100644 cli/e2e/plugin/main_test.go create mode 100644 cli/e2e/plugin/trust_test.go create mode 100644 cli/e2e/stack/deploy_test.go create mode 100644 cli/e2e/stack/help_test.go create mode 100644 cli/e2e/stack/main_test.go create mode 100644 cli/e2e/stack/remove_test.go create mode 100644 cli/e2e/stack/testdata/data create mode 100644 cli/e2e/stack/testdata/full-stack.yml create mode 100644 cli/e2e/stack/testdata/stack-deploy-help-kubernetes.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-help-swarm.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-with-names-kubernetes.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-with-names-swarm.golden create mode 100644 cli/e2e/stack/testdata/stack-deploy-with-names.golden create mode 100644 cli/e2e/stack/testdata/stack-remove-kubernetes-success.golden create mode 100644 cli/e2e/stack/testdata/stack-remove-swarm-success.golden create mode 100644 cli/e2e/stack/testdata/stack-with-named-resources.yml create mode 100644 cli/e2e/system/inspect_test.go create mode 100644 cli/e2e/system/main_test.go create mode 100644 cli/e2e/testdata/Dockerfile.connhelper-ssh create mode 100644 cli/e2e/testdata/Dockerfile.evil-notary-server create mode 100644 cli/e2e/testdata/Dockerfile.notary-server create mode 100755 cli/e2e/testdata/connhelper-ssh/entrypoint.sh create mode 100644 cli/e2e/testdata/notary-evil/notary-config.json create mode 100644 cli/e2e/testdata/notary-evil/notary-server.cert create mode 100644 cli/e2e/testdata/notary-evil/notary-server.key create mode 100644 cli/e2e/testdata/notary-evil/root-ca.cert create mode 100644 cli/e2e/testdata/notary/notary-config.json create mode 100644 cli/e2e/testdata/notary/notary-server.cert create mode 100644 cli/e2e/testdata/notary/notary-server.key create mode 100644 cli/e2e/testdata/notary/root-ca.cert create mode 100644 cli/e2e/trust/main_test.go create mode 100644 cli/e2e/trust/revoke_test.go create mode 100644 cli/e2e/trust/sign_test.go create mode 100644 cli/experimental/README.md create mode 100644 cli/experimental/checkpoint-restore.md create mode 100644 cli/experimental/docker-stacks-and-bundles.md create mode 100644 cli/experimental/images/ipvlan-l3.gliffy create mode 100644 cli/experimental/images/ipvlan-l3.png create mode 100644 cli/experimental/images/ipvlan-l3.svg create mode 100644 cli/experimental/images/ipvlan_l2_simple.gliffy create mode 100644 cli/experimental/images/ipvlan_l2_simple.png create mode 100644 cli/experimental/images/ipvlan_l2_simple.svg create mode 100644 cli/experimental/images/macvlan-bridge-ipvlan-l2.gliffy create mode 100644 cli/experimental/images/macvlan-bridge-ipvlan-l2.png create mode 100644 cli/experimental/images/macvlan-bridge-ipvlan-l2.svg create mode 100644 cli/experimental/images/multi_tenant_8021q_vlans.gliffy create mode 100644 cli/experimental/images/multi_tenant_8021q_vlans.png create mode 100644 cli/experimental/images/multi_tenant_8021q_vlans.svg create mode 100644 cli/experimental/images/vlans-deeper-look.gliffy create mode 100644 cli/experimental/images/vlans-deeper-look.png create mode 100644 cli/experimental/images/vlans-deeper-look.svg create mode 100644 cli/experimental/vlan-networks.md create mode 100644 cli/gometalinter.json create mode 100644 cli/internal/containerizedengine/client_test.go create mode 100644 cli/internal/containerizedengine/containerd.go create mode 100644 cli/internal/containerizedengine/containerd_test.go create mode 100644 cli/internal/containerizedengine/progress.go create mode 100644 cli/internal/containerizedengine/types.go create mode 100644 cli/internal/containerizedengine/update.go create mode 100644 cli/internal/containerizedengine/update_test.go create mode 100644 cli/internal/licenseutils/client_test.go create mode 100644 cli/internal/licenseutils/types.go create mode 100644 cli/internal/licenseutils/utils.go create mode 100644 cli/internal/licenseutils/utils_test.go create mode 100644 cli/internal/pkg/containerized/hostpaths.go create mode 100644 cli/internal/pkg/containerized/hostpaths_test.go create mode 100644 cli/internal/pkg/containerized/pauseandrun.go create mode 100644 cli/internal/pkg/containerized/signal_unix.go create mode 100644 cli/internal/pkg/containerized/signal_windows.go create mode 100644 cli/internal/pkg/containerized/snapshot.go create mode 100644 cli/internal/test/builders/config.go create mode 100644 cli/internal/test/builders/container.go create mode 100644 cli/internal/test/builders/doc.go create mode 100644 cli/internal/test/builders/network.go create mode 100644 cli/internal/test/builders/node.go create mode 100644 cli/internal/test/builders/secret.go create mode 100644 cli/internal/test/builders/service.go create mode 100644 cli/internal/test/builders/swarm.go create mode 100644 cli/internal/test/builders/task.go create mode 100644 cli/internal/test/builders/volume.go create mode 100644 cli/internal/test/cli.go create mode 100644 cli/internal/test/doc.go create mode 100644 cli/internal/test/environment/testenv.go create mode 100644 cli/internal/test/network/client.go create mode 100644 cli/internal/test/notary/client.go create mode 100644 cli/internal/test/output/output.go create mode 100644 cli/internal/test/store.go create mode 100644 cli/internal/test/strings.go create mode 100644 cli/internal/versions/versions.go create mode 100644 cli/internal/versions/versions_test.go create mode 100644 cli/kubernetes/README.md create mode 100644 cli/kubernetes/check.go create mode 100644 cli/kubernetes/check_test.go create mode 100644 cli/kubernetes/client/clientset/clientset.go create mode 100644 cli/kubernetes/client/clientset/scheme/register.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta1/compose_client.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta1/stack.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta2/compose_client.go create mode 100644 cli/kubernetes/client/clientset/typed/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/client/informers/compose/interface.go create mode 100644 cli/kubernetes/client/informers/compose/v1beta2/interface.go create mode 100644 cli/kubernetes/client/informers/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/client/informers/factory.go create mode 100644 cli/kubernetes/client/informers/generic.go create mode 100644 cli/kubernetes/client/informers/internalinterfaces/factory_interfaces.go create mode 100644 cli/kubernetes/client/listers/compose/v1beta2/expansion_generated.go create mode 100644 cli/kubernetes/client/listers/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/compose/clone/maps.go create mode 100644 cli/kubernetes/compose/clone/slices.go create mode 100644 cli/kubernetes/compose/doc.go create mode 100644 cli/kubernetes/compose/impersonation/impersonationconfig.go create mode 100644 cli/kubernetes/compose/v1beta1/doc.go create mode 100644 cli/kubernetes/compose/v1beta1/owner.go create mode 100644 cli/kubernetes/compose/v1beta1/parsing.go create mode 100644 cli/kubernetes/compose/v1beta1/register.go create mode 100644 cli/kubernetes/compose/v1beta1/stack.go create mode 100644 cli/kubernetes/compose/v1beta1/stack_test.go create mode 100644 cli/kubernetes/compose/v1beta2/composefile_stack_types.go create mode 100644 cli/kubernetes/compose/v1beta2/doc.go create mode 100644 cli/kubernetes/compose/v1beta2/owner.go create mode 100644 cli/kubernetes/compose/v1beta2/register.go create mode 100644 cli/kubernetes/compose/v1beta2/scale.go create mode 100644 cli/kubernetes/compose/v1beta2/stack.go create mode 100644 cli/kubernetes/doc.go create mode 100644 cli/kubernetes/labels/labels.go create mode 100644 cli/man/Dockerfile.5.md create mode 100644 cli/man/README.md create mode 100644 cli/man/docker-build.1.md create mode 100644 cli/man/docker-config-json.5.md create mode 100644 cli/man/docker-run.1.md create mode 100644 cli/man/docker.1.md create mode 100644 cli/man/dockerd.8.md create mode 100644 cli/man/generate.go create mode 100644 cli/man/import.go create mode 100755 cli/man/md2man-all.sh create mode 100644 cli/man/src/attach.md create mode 100644 cli/man/src/commit.md create mode 100644 cli/man/src/container/attach.md create mode 100644 cli/man/src/container/commit.md create mode 100644 cli/man/src/container/cp.md create mode 100644 cli/man/src/container/create-example.md create mode 100644 cli/man/src/container/create.md create mode 100644 cli/man/src/container/diff.md create mode 100644 cli/man/src/container/exec.md create mode 100644 cli/man/src/container/export.md create mode 100644 cli/man/src/container/kill.md create mode 100644 cli/man/src/container/logs.md create mode 100644 cli/man/src/container/ls.md create mode 100644 cli/man/src/container/pause.md create mode 100644 cli/man/src/container/port.md create mode 100644 cli/man/src/container/rename.md create mode 100644 cli/man/src/container/restart.md create mode 100644 cli/man/src/container/rm.md create mode 100644 cli/man/src/container/run.md create mode 100644 cli/man/src/container/start.md create mode 100644 cli/man/src/container/stats.md create mode 100644 cli/man/src/container/stop.md create mode 100644 cli/man/src/container/top.md create mode 100644 cli/man/src/container/unpause.md create mode 100644 cli/man/src/container/update.md create mode 100644 cli/man/src/container/wait.md create mode 100644 cli/man/src/cp.md create mode 100644 cli/man/src/create.md create mode 100644 cli/man/src/diff.md create mode 100644 cli/man/src/events.md create mode 100644 cli/man/src/exec.md create mode 100644 cli/man/src/export.md create mode 100644 cli/man/src/history.md create mode 100644 cli/man/src/image/build.md create mode 100644 cli/man/src/image/history.md create mode 100644 cli/man/src/image/import.md create mode 100644 cli/man/src/image/load.md create mode 100644 cli/man/src/image/ls.md create mode 100644 cli/man/src/image/pull.md create mode 100644 cli/man/src/image/push.md create mode 100644 cli/man/src/image/rm.md create mode 100644 cli/man/src/image/save.md create mode 100644 cli/man/src/image/tag.md create mode 100644 cli/man/src/images.md create mode 100644 cli/man/src/import.md create mode 100644 cli/man/src/info.md create mode 100644 cli/man/src/inspect.md create mode 100644 cli/man/src/kill.md create mode 100644 cli/man/src/load.md create mode 100644 cli/man/src/login.md create mode 100644 cli/man/src/logout.md create mode 100644 cli/man/src/logs.md create mode 100644 cli/man/src/network/connect.md create mode 100644 cli/man/src/network/create.md create mode 100644 cli/man/src/network/disconnect.md create mode 100644 cli/man/src/network/inspect.md create mode 100644 cli/man/src/network/ls.md create mode 100644 cli/man/src/network/rm.md create mode 100644 cli/man/src/pause.md create mode 100644 cli/man/src/plugin/ls.md create mode 100644 cli/man/src/port.md create mode 100644 cli/man/src/ps.md create mode 100644 cli/man/src/pull.md create mode 100644 cli/man/src/push.md create mode 100644 cli/man/src/rename.md create mode 100644 cli/man/src/restart.md create mode 100644 cli/man/src/rm.md create mode 100644 cli/man/src/rmi.md create mode 100644 cli/man/src/save.md create mode 100644 cli/man/src/search.md create mode 100644 cli/man/src/start.md create mode 100644 cli/man/src/stats.md create mode 100644 cli/man/src/stop.md create mode 100644 cli/man/src/system/events.md create mode 100644 cli/man/src/system/info.md create mode 100644 cli/man/src/tag.md create mode 100644 cli/man/src/top.md create mode 100644 cli/man/src/unpause.md create mode 100644 cli/man/src/update.md create mode 100644 cli/man/src/version.md create mode 100644 cli/man/src/volume.md create mode 100644 cli/man/src/volume/create.md create mode 100644 cli/man/src/volume/inspect.md create mode 100644 cli/man/src/volume/ls.md create mode 100644 cli/man/src/wait.md create mode 100644 cli/opts/config.go create mode 100644 cli/opts/duration.go create mode 100644 cli/opts/duration_test.go create mode 100644 cli/opts/env.go create mode 100644 cli/opts/env_test.go create mode 100644 cli/opts/envfile.go create mode 100644 cli/opts/envfile_test.go create mode 100644 cli/opts/file.go create mode 100644 cli/opts/gpus.go create mode 100644 cli/opts/gpus_test.go create mode 100644 cli/opts/hosts.go create mode 100644 cli/opts/hosts_test.go create mode 100644 cli/opts/hosts_unix.go create mode 100644 cli/opts/hosts_windows.go create mode 100644 cli/opts/ip.go create mode 100644 cli/opts/ip_test.go create mode 100644 cli/opts/mount.go create mode 100644 cli/opts/mount_test.go create mode 100644 cli/opts/network.go create mode 100644 cli/opts/network_test.go create mode 100644 cli/opts/opts.go create mode 100644 cli/opts/opts_test.go create mode 100644 cli/opts/opts_unix.go create mode 100644 cli/opts/opts_windows.go create mode 100644 cli/opts/parse.go create mode 100644 cli/opts/port.go create mode 100644 cli/opts/port_test.go create mode 100644 cli/opts/quotedstring.go create mode 100644 cli/opts/quotedstring_test.go create mode 100644 cli/opts/runtime.go create mode 100644 cli/opts/secret.go create mode 100644 cli/opts/secret_test.go create mode 100644 cli/opts/throttledevice.go create mode 100644 cli/opts/ulimit.go create mode 100644 cli/opts/ulimit_test.go create mode 100644 cli/opts/weightdevice.go create mode 100644 cli/poule.yml create mode 100755 cli/scripts/build/.variables create mode 100755 cli/scripts/build/binary create mode 100755 cli/scripts/build/cross create mode 100755 cli/scripts/build/dynbinary create mode 100755 cli/scripts/build/osx create mode 100755 cli/scripts/build/plugins create mode 100755 cli/scripts/build/plugins-osx create mode 100755 cli/scripts/build/plugins-windows create mode 100755 cli/scripts/build/windows create mode 100755 cli/scripts/docs/generate-authors.sh create mode 100755 cli/scripts/docs/generate-man.sh create mode 100755 cli/scripts/docs/generate-yaml.sh create mode 100755 cli/scripts/gen/windows-resources create mode 100644 cli/scripts/make.ps1 create mode 100755 cli/scripts/test/e2e/entry create mode 100755 cli/scripts/test/e2e/load-image create mode 100755 cli/scripts/test/e2e/run create mode 100755 cli/scripts/test/e2e/wait-on-daemon create mode 100755 cli/scripts/test/e2e/wrapper create mode 100755 cli/scripts/validate/check-git-diff create mode 100755 cli/scripts/validate/shellcheck create mode 100755 cli/scripts/warn-outside-container create mode 100644 cli/scripts/winresources/common.rc create mode 100644 cli/scripts/winresources/docker.exe.manifest create mode 100644 cli/scripts/winresources/docker.ico create mode 100644 cli/scripts/winresources/docker.png create mode 100644 cli/scripts/winresources/docker.rc create mode 100644 cli/service/logs/parse_logs.go create mode 100644 cli/service/logs/parse_logs_test.go create mode 100644 cli/templates/templates.go create mode 100644 cli/templates/templates_test.go create mode 100644 cli/types/types.go create mode 100755 cli/vendor.conf create mode 100644 cli/vendor/github.com/containerd/ttrpc/LICENSE create mode 100644 cli/vendor/github.com/containerd/ttrpc/README.md create mode 100644 cli/vendor/github.com/containerd/ttrpc/channel.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/client.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/codec.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/config.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/handshake.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/server.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/services.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/types.go create mode 100644 cli/vendor/github.com/containerd/ttrpc/unixcreds_linux.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/LICENSE create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/README.md create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/clientset.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/scheme/register.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1alpha3/compose_client.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1alpha3/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1/compose_client.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2/compose_client.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/compose/interface.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/compose/v1alpha3/interface.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/compose/v1alpha3/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/compose/v1beta2/interface.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/compose/v1beta2/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/factory.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/generic.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/informers/internalinterfaces/factory_interfaces.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/listers/compose/v1alpha3/expansion_generated.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/listers/compose/v1alpha3/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/listers/compose/v1beta2/expansion_generated.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/client/listers/compose/v1beta2/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/clone/maps.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/clone/slices.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/impersonation/doc.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/impersonation/impersonationconfig.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/composefile_stack_types.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/conversion_custom.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/conversion_generated.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/deepcopy_generated.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/doc.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/owner.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/register.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/scale.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1alpha3/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta1/doc.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta1/owner.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta1/parsing.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta1/register.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta1/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/composefile_stack_types.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/deepcopy_generated.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/doc.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/owner.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/register.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/scale.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/compose/v1beta2/stack.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/config.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/doc.go create mode 100644 cli/vendor/github.com/docker/compose-on-kubernetes/api/labels/labels.go create mode 100644 cli/vendor/github.com/docker/licensing/LICENSE create mode 100644 cli/vendor/github.com/docker/licensing/README.md create mode 100644 cli/vendor/github.com/docker/licensing/client.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/errors/error.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/errors/herror.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/errors/stack.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/errors/wrap.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-auth/README.md create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-auth/identity/identity.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-auth/jwt/context.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-auth/jwt/jwt.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-clientlib/README.md create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-clientlib/client.go create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-validation/README.md create mode 100644 cli/vendor/github.com/docker/licensing/lib/go-validation/validation.go create mode 100644 cli/vendor/github.com/docker/licensing/license.go create mode 100644 cli/vendor/github.com/docker/licensing/model/license.go create mode 100644 cli/vendor/github.com/docker/licensing/model/subscriptions.go create mode 100644 cli/vendor/github.com/docker/licensing/model/users.go create mode 100644 cli/vendor/github.com/docker/licensing/storage.go create mode 100644 cli/vendor/github.com/docker/licensing/subscriptions.go create mode 100644 cli/vendor/github.com/docker/licensing/types/types.go create mode 100644 cli/vendor/github.com/docker/licensing/users.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/README.rst create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/README.md create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/command_line.proto create mode 100644 cli/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/store.proto create mode 100644 cli/vendor/github.com/jaguilar/vt100/LICENSE create mode 100644 cli/vendor/github.com/jaguilar/vt100/README.md create mode 100644 cli/vendor/github.com/jaguilar/vt100/command.go create mode 100644 cli/vendor/github.com/jaguilar/vt100/go.mod create mode 100644 cli/vendor/github.com/jaguilar/vt100/scanner.go create mode 100644 cli/vendor/github.com/jaguilar/vt100/vt100.go create mode 100644 cli/vendor/github.com/moby/buildkit/LICENSE create mode 100644 cli/vendor/github.com/moby/buildkit/README.md create mode 100644 cli/vendor/github.com/moby/buildkit/api/services/control/control.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/services/control/control.proto create mode 100644 cli/vendor/github.com/moby/buildkit/api/services/control/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/types/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/types/worker.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/api/types/worker.proto create mode 100644 cli/vendor/github.com/moby/buildkit/client/build.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/buildid/metadata.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/client.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/client_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/client_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/diskusage.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/exporters.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/filter.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/graph.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/exec.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/fileop.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/marshal.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/meta.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/resolver.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/source.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/llb/state.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/prune.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/solve.go create mode 100644 cli/vendor/github.com/moby/buildkit/client/workers.go create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto create mode 100644 cli/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/go.mod create mode 100644 cli/vendor/github.com/moby/buildkit/identity/randomid.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/auth.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/auth.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/auth.proto create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/auth/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/content/attachable.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/content/caller.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/context.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/filesync.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/filesync.proto create mode 100644 cli/vendor/github.com/moby/buildkit/session/filesync/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/grpc.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/grpchijack/dial.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/manager.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/secrets/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/secrets/secrets.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/secrets/secrets.proto create mode 100644 cli/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/file.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/session.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/sshforward/copy.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/sshforward/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/sshforward/ssh.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto create mode 100644 cli/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/attr.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/caps.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/const.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/ops.proto create mode 100644 cli/vendor/github.com/moby/buildkit/solver/pb/platform.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/apicaps/caps.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto create mode 100644 cli/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/entitlements/security_linux.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/progress/progressui/display.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/path_unix.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/path_windows.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go create mode 100644 cli/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go create mode 100644 cli/vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 cli/vendor/github.com/spf13/cobra/README.md create mode 100644 cli/vendor/github.com/spf13/cobra/args.go create mode 100644 cli/vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 cli/vendor/github.com/spf13/cobra/cobra.go create mode 100644 cli/vendor/github.com/spf13/cobra/command.go create mode 100644 cli/vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 cli/vendor/github.com/spf13/cobra/command_win.go create mode 100644 cli/vendor/github.com/spf13/cobra/doc/man_docs.go create mode 100644 cli/vendor/github.com/spf13/cobra/doc/md_docs.go create mode 100644 cli/vendor/github.com/spf13/cobra/doc/rest_docs.go create mode 100644 cli/vendor/github.com/spf13/cobra/doc/util.go create mode 100644 cli/vendor/github.com/spf13/cobra/doc/yaml_docs.go create mode 100644 cli/vendor/github.com/spf13/cobra/zsh_completions.go create mode 100644 cli/vendor/github.com/spf13/pflag/LICENSE create mode 100644 cli/vendor/github.com/spf13/pflag/README.md create mode 100644 cli/vendor/github.com/spf13/pflag/bool.go create mode 100644 cli/vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 cli/vendor/github.com/spf13/pflag/bytes.go create mode 100644 cli/vendor/github.com/spf13/pflag/count.go create mode 100644 cli/vendor/github.com/spf13/pflag/duration.go create mode 100644 cli/vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 cli/vendor/github.com/spf13/pflag/flag.go create mode 100644 cli/vendor/github.com/spf13/pflag/float32.go create mode 100644 cli/vendor/github.com/spf13/pflag/float64.go create mode 100644 cli/vendor/github.com/spf13/pflag/golangflag.go create mode 100644 cli/vendor/github.com/spf13/pflag/int.go create mode 100644 cli/vendor/github.com/spf13/pflag/int16.go create mode 100644 cli/vendor/github.com/spf13/pflag/int32.go create mode 100644 cli/vendor/github.com/spf13/pflag/int64.go create mode 100644 cli/vendor/github.com/spf13/pflag/int8.go create mode 100644 cli/vendor/github.com/spf13/pflag/int_slice.go create mode 100644 cli/vendor/github.com/spf13/pflag/ip.go create mode 100644 cli/vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 cli/vendor/github.com/spf13/pflag/ipmask.go create mode 100644 cli/vendor/github.com/spf13/pflag/ipnet.go create mode 100644 cli/vendor/github.com/spf13/pflag/ipnet_slice.go create mode 100644 cli/vendor/github.com/spf13/pflag/string.go create mode 100644 cli/vendor/github.com/spf13/pflag/string_array.go create mode 100644 cli/vendor/github.com/spf13/pflag/string_slice.go create mode 100644 cli/vendor/github.com/spf13/pflag/string_to_int.go create mode 100644 cli/vendor/github.com/spf13/pflag/string_to_string.go create mode 100644 cli/vendor/github.com/spf13/pflag/uint.go create mode 100644 cli/vendor/github.com/spf13/pflag/uint16.go create mode 100644 cli/vendor/github.com/spf13/pflag/uint32.go create mode 100644 cli/vendor/github.com/spf13/pflag/uint64.go create mode 100644 cli/vendor/github.com/spf13/pflag/uint8.go create mode 100644 cli/vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/LICENSE create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diff.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diff_containerd.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diskwriter.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/followlinks.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/fs.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/go.mod create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/hardlinks.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/readme.md create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/receive.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/send.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/stat.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/stat_unix.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/stat_windows.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/tarwriter.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/types/generate.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/types/stat.proto create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/types/wire.proto create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/validator.go create mode 100644 cli/vendor/github.com/tonistiigi/fsutil/walker.go create mode 100644 cli/vendor/github.com/tonistiigi/units/LICENSE create mode 100644 cli/vendor/github.com/tonistiigi/units/bytes.go create mode 100644 cli/vendor/github.com/tonistiigi/units/readme.md create mode 100644 cli/vendor/k8s.io/api/LICENSE create mode 100644 cli/vendor/k8s.io/api/README.md create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/apps/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/apps/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/apps/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/apps/v1/register.go create mode 100644 cli/vendor/k8s.io/api/apps/v1/types.go create mode 100644 cli/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/doc.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/generated.proto create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/register.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/types.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authentication/v1/register.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/types.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authorization/v1/register.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/types.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/register.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/types.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/register.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/types.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/doc.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/register.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/types.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/batch/v1/register.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/types.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/coordination/v1/register.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1/types.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/core/v1/annotation_key_constants.go create mode 100644 cli/vendor/k8s.io/api/core/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/core/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/core/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/core/v1/objectreference.go create mode 100644 cli/vendor/k8s.io/api/core/v1/register.go create mode 100644 cli/vendor/k8s.io/api/core/v1/resource.go create mode 100644 cli/vendor/k8s.io/api/core/v1/taint.go create mode 100644 cli/vendor/k8s.io/api/core/v1/toleration.go create mode 100644 cli/vendor/k8s.io/api/core/v1/types.go create mode 100644 cli/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/core/v1/well_known_labels.go create mode 100644 cli/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/networking/v1/register.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/types.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/rbac/v1/register.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/types.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/register.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/types.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/doc.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/generated.proto create mode 100644 cli/vendor/k8s.io/api/storage/v1/register.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/types.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/generated.proto create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/LICENSE create mode 100644 cli/vendor/k8s.io/apimachinery/README.md create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/help.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/math.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/converter.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/helper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/fields.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/requirements.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/fields/selector.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/labels.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/selector.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/codec.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/converter.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/error.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/extension.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/helper.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/register.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/selection/operator.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/nodename.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/patch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/types/uid.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/json/json.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/http.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/interface.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/net/util.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/int.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/sets/string.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/version/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/version/helpers.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/version/types.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/doc.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/filter.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/mux.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/watch.go create mode 100644 cli/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go create mode 100644 cli/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go create mode 100644 cli/vendor/k8s.io/client-go/LICENSE create mode 100644 cli/vendor/k8s.io/client-go/README.md create mode 100644 cli/vendor/k8s.io/client-go/discovery/discovery_client.go create mode 100644 cli/vendor/k8s.io/client-go/discovery/doc.go create mode 100644 cli/vendor/k8s.io/client-go/discovery/fake/discovery.go create mode 100644 cli/vendor/k8s.io/client-go/discovery/helper.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/clientset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/fake/register.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/import.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/scheme/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/scheme/register.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go create mode 100644 cli/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/version/base.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/version/doc.go create mode 100644 cli/vendor/k8s.io/client-go/pkg/version/version.go create mode 100644 cli/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go create mode 100644 cli/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go create mode 100644 cli/vendor/k8s.io/client-go/rest/client.go create mode 100644 cli/vendor/k8s.io/client-go/rest/config.go create mode 100644 cli/vendor/k8s.io/client-go/rest/plugin.go create mode 100644 cli/vendor/k8s.io/client-go/rest/request.go create mode 100644 cli/vendor/k8s.io/client-go/rest/transport.go create mode 100644 cli/vendor/k8s.io/client-go/rest/url_utils.go create mode 100644 cli/vendor/k8s.io/client-go/rest/urlbackoff.go create mode 100644 cli/vendor/k8s.io/client-go/rest/watch/decoder.go create mode 100644 cli/vendor/k8s.io/client-go/rest/watch/encoder.go create mode 100644 cli/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/testing/actions.go create mode 100644 cli/vendor/k8s.io/client-go/testing/fake.go create mode 100644 cli/vendor/k8s.io/client-go/testing/fixture.go create mode 100644 cli/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go create mode 100644 cli/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go create mode 100644 cli/vendor/k8s.io/client-go/tools/auth/clientauth.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/controller.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/delta_fifo.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/expiration_cache.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/fifo.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/index.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/listers.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/listwatch.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/mutation_cache.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/mutation_detector.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/reflector.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/shared_informer.go create mode 100755 cli/vendor/k8s.io/client-go/tools/cache/store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/cache/undelta_store.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/register.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/types.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/client_config.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/config.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/doc.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/flag.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/helpers.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/loader.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/overrides.go create mode 100644 cli/vendor/k8s.io/client-go/tools/clientcmd/validation.go create mode 100644 cli/vendor/k8s.io/client-go/tools/metrics/metrics.go create mode 100644 cli/vendor/k8s.io/client-go/tools/pager/pager.go create mode 100644 cli/vendor/k8s.io/client-go/tools/reference/ref.go create mode 100644 cli/vendor/k8s.io/client-go/transport/cache.go create mode 100644 cli/vendor/k8s.io/client-go/transport/config.go create mode 100644 cli/vendor/k8s.io/client-go/transport/round_trippers.go create mode 100644 cli/vendor/k8s.io/client-go/transport/token_source.go create mode 100644 cli/vendor/k8s.io/client-go/transport/transport.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/cert.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/csr.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/io.go create mode 100644 cli/vendor/k8s.io/client-go/util/cert/pem.go create mode 100644 cli/vendor/k8s.io/client-go/util/connrotation/connrotation.go create mode 100644 cli/vendor/k8s.io/client-go/util/flowcontrol/backoff.go create mode 100644 cli/vendor/k8s.io/client-go/util/flowcontrol/throttle.go create mode 100644 cli/vendor/k8s.io/client-go/util/homedir/homedir.go create mode 100644 cli/vendor/k8s.io/client-go/util/jsonpath/doc.go create mode 100644 cli/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go create mode 100644 cli/vendor/k8s.io/client-go/util/jsonpath/node.go create mode 100644 cli/vendor/k8s.io/client-go/util/jsonpath/parser.go create mode 100644 cli/vendor/k8s.io/client-go/util/keyutil/key.go create mode 100644 cli/vendor/k8s.io/client-go/util/retry/util.go create mode 100644 cli/vendor/k8s.io/klog/LICENSE create mode 100644 cli/vendor/k8s.io/klog/README.md create mode 100644 cli/vendor/k8s.io/klog/klog.go create mode 100644 cli/vendor/k8s.io/klog/klog_file.go create mode 100644 cli/vendor/k8s.io/kube-openapi/LICENSE create mode 100644 cli/vendor/k8s.io/kube-openapi/README.md create mode 100644 cli/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 cli/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 cli/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go create mode 100644 cli/vendor/k8s.io/kubernetes/LICENSE create mode 100644 cli/vendor/k8s.io/kubernetes/README.md create mode 100644 cli/vendor/k8s.io/kubernetes/build/README.md create mode 100644 cli/vendor/k8s.io/kubernetes/build/pause/orphan.c create mode 100644 cli/vendor/k8s.io/kubernetes/build/pause/pause.c create mode 100644 cli/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go create mode 100644 cli/vendor/k8s.io/kubernetes/third_party/protobuf/google/protobuf/compiler/plugin.proto create mode 100644 cli/vendor/k8s.io/kubernetes/third_party/protobuf/google/protobuf/descriptor.proto create mode 100644 cli/vendor/k8s.io/utils/LICENSE create mode 100644 cli/vendor/k8s.io/utils/README.md create mode 100644 cli/vendor/k8s.io/utils/buffer/ring_growing.go create mode 100644 cli/vendor/k8s.io/utils/integer/integer.go create mode 100644 cli/vendor/k8s.io/utils/trace/trace.go create mode 100644 cli/vendor/sigs.k8s.io/yaml/LICENSE create mode 100644 cli/vendor/sigs.k8s.io/yaml/README.md create mode 100644 cli/vendor/sigs.k8s.io/yaml/fields.go create mode 100644 cli/vendor/sigs.k8s.io/yaml/yaml.go create mode 100644 cli/vendor/sigs.k8s.io/yaml/yaml_go110.go create mode 100644 cli/vendor/vbom.ml/util/LICENSE create mode 100644 cli/vendor/vbom.ml/util/README.md create mode 100644 cli/vendor/vbom.ml/util/sortorder/README.md create mode 100644 cli/vendor/vbom.ml/util/sortorder/doc.go create mode 100644 cli/vendor/vbom.ml/util/sortorder/natsort.go create mode 100644 components.conf create mode 100644 engine/.DEREK.yml create mode 100644 engine/.dockerignore create mode 100644 engine/.mailmap create mode 100644 engine/AUTHORS create mode 100644 engine/CHANGELOG.md create mode 100644 engine/CONTRIBUTING.md create mode 100644 engine/Dockerfile create mode 100644 engine/Dockerfile.e2e create mode 100644 engine/Dockerfile.simple create mode 100644 engine/Dockerfile.windows create mode 100644 engine/Jenkinsfile create mode 100644 engine/LICENSE create mode 100644 engine/MAINTAINERS create mode 100644 engine/Makefile create mode 100644 engine/NOTICE create mode 100644 engine/README.md create mode 100644 engine/ROADMAP.md create mode 100644 engine/TESTING.md create mode 100644 engine/VENDORING.md create mode 100644 engine/api/README.md create mode 100644 engine/api/common.go create mode 100644 engine/api/common_unix.go create mode 100644 engine/api/common_windows.go create mode 100644 engine/api/server/backend/build/backend.go create mode 100644 engine/api/server/backend/build/tag.go create mode 100644 engine/api/server/httputils/decoder.go create mode 100644 engine/api/server/httputils/errors_deprecated.go create mode 100644 engine/api/server/httputils/form.go create mode 100644 engine/api/server/httputils/form_test.go create mode 100644 engine/api/server/httputils/httputils.go create mode 100644 engine/api/server/httputils/httputils_test.go create mode 100644 engine/api/server/httputils/httputils_write_json.go create mode 100644 engine/api/server/httputils/write_log_stream.go create mode 100644 engine/api/server/middleware.go create mode 100644 engine/api/server/middleware/cors.go create mode 100644 engine/api/server/middleware/debug.go create mode 100644 engine/api/server/middleware/debug_test.go create mode 100644 engine/api/server/middleware/experimental.go create mode 100644 engine/api/server/middleware/middleware.go create mode 100644 engine/api/server/middleware/version.go create mode 100644 engine/api/server/middleware/version_test.go create mode 100644 engine/api/server/router/build/backend.go create mode 100644 engine/api/server/router/build/build.go create mode 100644 engine/api/server/router/build/build_routes.go create mode 100644 engine/api/server/router/checkpoint/backend.go create mode 100644 engine/api/server/router/checkpoint/checkpoint.go create mode 100644 engine/api/server/router/checkpoint/checkpoint_routes.go create mode 100644 engine/api/server/router/container/backend.go create mode 100644 engine/api/server/router/container/container.go create mode 100644 engine/api/server/router/container/container_routes.go create mode 100644 engine/api/server/router/container/copy.go create mode 100644 engine/api/server/router/container/exec.go create mode 100644 engine/api/server/router/container/inspect.go create mode 100644 engine/api/server/router/debug/debug.go create mode 100644 engine/api/server/router/debug/debug_routes.go create mode 100644 engine/api/server/router/distribution/backend.go create mode 100644 engine/api/server/router/distribution/distribution.go create mode 100644 engine/api/server/router/distribution/distribution_routes.go create mode 100644 engine/api/server/router/experimental.go create mode 100644 engine/api/server/router/grpc/backend.go create mode 100644 engine/api/server/router/grpc/grpc.go create mode 100644 engine/api/server/router/grpc/grpc_routes.go create mode 100644 engine/api/server/router/image/backend.go create mode 100644 engine/api/server/router/image/image.go create mode 100644 engine/api/server/router/image/image_routes.go create mode 100644 engine/api/server/router/local.go create mode 100644 engine/api/server/router/network/backend.go create mode 100644 engine/api/server/router/network/filter.go create mode 100644 engine/api/server/router/network/network.go create mode 100644 engine/api/server/router/network/network_routes.go create mode 100644 engine/api/server/router/plugin/backend.go create mode 100644 engine/api/server/router/plugin/plugin.go create mode 100644 engine/api/server/router/plugin/plugin_routes.go create mode 100644 engine/api/server/router/router.go create mode 100644 engine/api/server/router/session/backend.go create mode 100644 engine/api/server/router/session/session.go create mode 100644 engine/api/server/router/session/session_routes.go create mode 100644 engine/api/server/router/swarm/backend.go create mode 100644 engine/api/server/router/swarm/cluster.go create mode 100644 engine/api/server/router/swarm/cluster_routes.go create mode 100644 engine/api/server/router/swarm/helpers.go create mode 100644 engine/api/server/router/swarm/helpers_test.go create mode 100644 engine/api/server/router/system/backend.go create mode 100644 engine/api/server/router/system/system.go create mode 100644 engine/api/server/router/system/system_routes.go create mode 100644 engine/api/server/router/volume/backend.go create mode 100644 engine/api/server/router/volume/volume.go create mode 100644 engine/api/server/router/volume/volume_routes.go create mode 100644 engine/api/server/router_swapper.go create mode 100644 engine/api/server/server.go create mode 100644 engine/api/server/server_test.go create mode 100644 engine/api/swagger-gen.yaml create mode 100644 engine/api/swagger.yaml create mode 100644 engine/api/templates/server/operation.gotmpl create mode 100644 engine/api/types/auth.go create mode 100644 engine/api/types/backend/backend.go create mode 100644 engine/api/types/backend/build.go create mode 100644 engine/api/types/blkiodev/blkio.go create mode 100644 engine/api/types/client.go create mode 100644 engine/api/types/configs.go create mode 100644 engine/api/types/container/config.go create mode 100644 engine/api/types/container/container_changes.go create mode 100644 engine/api/types/container/container_create.go create mode 100644 engine/api/types/container/container_top.go create mode 100644 engine/api/types/container/container_update.go create mode 100644 engine/api/types/container/container_wait.go create mode 100644 engine/api/types/container/host_config.go create mode 100644 engine/api/types/container/hostconfig_unix.go create mode 100644 engine/api/types/container/hostconfig_windows.go create mode 100644 engine/api/types/container/waitcondition.go create mode 100644 engine/api/types/error_response.go create mode 100644 engine/api/types/events/events.go create mode 100644 engine/api/types/filters/example_test.go create mode 100644 engine/api/types/filters/parse.go create mode 100644 engine/api/types/filters/parse_test.go create mode 100644 engine/api/types/graph_driver_data.go create mode 100644 engine/api/types/id_response.go create mode 100644 engine/api/types/image/image_history.go create mode 100644 engine/api/types/image_delete_response_item.go create mode 100644 engine/api/types/image_summary.go create mode 100644 engine/api/types/mount/mount.go create mode 100644 engine/api/types/network/network.go create mode 100644 engine/api/types/plugin.go create mode 100644 engine/api/types/plugin_device.go create mode 100644 engine/api/types/plugin_env.go create mode 100644 engine/api/types/plugin_interface_type.go create mode 100644 engine/api/types/plugin_mount.go create mode 100644 engine/api/types/plugin_responses.go create mode 100644 engine/api/types/plugins/logdriver/entry.pb.go create mode 100644 engine/api/types/plugins/logdriver/entry.proto create mode 100644 engine/api/types/plugins/logdriver/gen.go create mode 100644 engine/api/types/plugins/logdriver/io.go create mode 100644 engine/api/types/port.go create mode 100644 engine/api/types/registry/authenticate.go create mode 100644 engine/api/types/registry/registry.go create mode 100644 engine/api/types/seccomp.go create mode 100644 engine/api/types/service_update_response.go create mode 100644 engine/api/types/stats.go create mode 100644 engine/api/types/strslice/strslice.go create mode 100644 engine/api/types/strslice/strslice_test.go create mode 100644 engine/api/types/swarm/common.go create mode 100644 engine/api/types/swarm/config.go create mode 100644 engine/api/types/swarm/container.go create mode 100644 engine/api/types/swarm/network.go create mode 100644 engine/api/types/swarm/node.go create mode 100644 engine/api/types/swarm/runtime.go create mode 100644 engine/api/types/swarm/runtime/gen.go create mode 100644 engine/api/types/swarm/runtime/plugin.pb.go create mode 100644 engine/api/types/swarm/runtime/plugin.proto create mode 100644 engine/api/types/swarm/secret.go create mode 100644 engine/api/types/swarm/service.go create mode 100644 engine/api/types/swarm/swarm.go create mode 100644 engine/api/types/swarm/task.go create mode 100644 engine/api/types/time/duration_convert.go create mode 100644 engine/api/types/time/duration_convert_test.go create mode 100644 engine/api/types/time/timestamp.go create mode 100644 engine/api/types/time/timestamp_test.go create mode 100644 engine/api/types/types.go create mode 100644 engine/api/types/versions/README.md create mode 100644 engine/api/types/versions/compare.go create mode 100644 engine/api/types/versions/compare_test.go create mode 100644 engine/api/types/versions/v1p19/types.go create mode 100644 engine/api/types/versions/v1p20/types.go create mode 100644 engine/api/types/volume.go create mode 100644 engine/api/types/volume/volume_create.go create mode 100644 engine/api/types/volume/volume_list.go create mode 100644 engine/builder/builder-next/adapters/containerimage/pull.go create mode 100644 engine/builder/builder-next/adapters/localinlinecache/inlinecache.go create mode 100644 engine/builder/builder-next/adapters/snapshot/layer.go create mode 100644 engine/builder/builder-next/adapters/snapshot/snapshot.go create mode 100644 engine/builder/builder-next/builder.go create mode 100644 engine/builder/builder-next/controller.go create mode 100644 engine/builder/builder-next/executor_unix.go create mode 100644 engine/builder/builder-next/executor_windows.go create mode 100644 engine/builder/builder-next/exporter/export.go create mode 100644 engine/builder/builder-next/exporter/writer.go create mode 100644 engine/builder/builder-next/imagerefchecker/checker.go create mode 100644 engine/builder/builder-next/reqbodyhandler.go create mode 100644 engine/builder/builder-next/worker/gc.go create mode 100644 engine/builder/builder-next/worker/gc_unix.go create mode 100644 engine/builder/builder-next/worker/gc_windows.go create mode 100644 engine/builder/builder-next/worker/worker.go create mode 100644 engine/builder/builder.go create mode 100644 engine/builder/dockerfile/buildargs.go create mode 100644 engine/builder/dockerfile/buildargs_test.go create mode 100644 engine/builder/dockerfile/builder.go create mode 100644 engine/builder/dockerfile/builder_unix.go create mode 100644 engine/builder/dockerfile/builder_windows.go create mode 100644 engine/builder/dockerfile/clientsession.go create mode 100644 engine/builder/dockerfile/containerbackend.go create mode 100644 engine/builder/dockerfile/copy.go create mode 100644 engine/builder/dockerfile/copy_test.go create mode 100644 engine/builder/dockerfile/copy_unix.go create mode 100644 engine/builder/dockerfile/copy_windows.go create mode 100644 engine/builder/dockerfile/dispatchers.go create mode 100644 engine/builder/dockerfile/dispatchers_test.go create mode 100644 engine/builder/dockerfile/dispatchers_unix.go create mode 100644 engine/builder/dockerfile/dispatchers_unix_test.go create mode 100644 engine/builder/dockerfile/dispatchers_windows.go create mode 100644 engine/builder/dockerfile/dispatchers_windows_test.go create mode 100644 engine/builder/dockerfile/evaluator.go create mode 100644 engine/builder/dockerfile/evaluator_test.go create mode 100644 engine/builder/dockerfile/imagecontext.go create mode 100644 engine/builder/dockerfile/imageprobe.go create mode 100644 engine/builder/dockerfile/internals.go create mode 100644 engine/builder/dockerfile/internals_linux.go create mode 100644 engine/builder/dockerfile/internals_linux_test.go create mode 100644 engine/builder/dockerfile/internals_test.go create mode 100644 engine/builder/dockerfile/internals_windows.go create mode 100644 engine/builder/dockerfile/internals_windows_test.go create mode 100644 engine/builder/dockerfile/metrics.go create mode 100644 engine/builder/dockerfile/mockbackend_test.go create mode 100644 engine/builder/dockerfile/utils_test.go create mode 100644 engine/builder/dockerignore/dockerignore.go create mode 100644 engine/builder/dockerignore/dockerignore_test.go create mode 100644 engine/builder/fscache/fscache.go create mode 100644 engine/builder/fscache/fscache_test.go create mode 100644 engine/builder/fscache/naivedriver.go create mode 100644 engine/builder/remotecontext/archive.go create mode 100644 engine/builder/remotecontext/detect.go create mode 100644 engine/builder/remotecontext/detect_test.go create mode 100644 engine/builder/remotecontext/filehash.go create mode 100644 engine/builder/remotecontext/generate.go create mode 100644 engine/builder/remotecontext/git.go create mode 100644 engine/builder/remotecontext/git/gitutils.go create mode 100644 engine/builder/remotecontext/git/gitutils_test.go create mode 100644 engine/builder/remotecontext/lazycontext.go create mode 100644 engine/builder/remotecontext/mimetype.go create mode 100644 engine/builder/remotecontext/mimetype_test.go create mode 100644 engine/builder/remotecontext/remote.go create mode 100644 engine/builder/remotecontext/remote_test.go create mode 100644 engine/builder/remotecontext/tarsum.go create mode 100644 engine/builder/remotecontext/tarsum.pb.go create mode 100644 engine/builder/remotecontext/tarsum.proto create mode 100644 engine/builder/remotecontext/tarsum_test.go create mode 100644 engine/builder/remotecontext/utils_test.go create mode 100644 engine/cli/cobra.go create mode 100644 engine/cli/config/configdir.go create mode 100644 engine/cli/debug/debug.go create mode 100644 engine/cli/debug/debug_test.go create mode 100644 engine/cli/error.go create mode 100644 engine/cli/required.go create mode 100644 engine/client/README.md create mode 100644 engine/client/build_cancel.go create mode 100644 engine/client/build_prune.go create mode 100644 engine/client/checkpoint_create.go create mode 100644 engine/client/checkpoint_create_test.go create mode 100644 engine/client/checkpoint_delete.go create mode 100644 engine/client/checkpoint_delete_test.go create mode 100644 engine/client/checkpoint_list.go create mode 100644 engine/client/checkpoint_list_test.go create mode 100644 engine/client/client.go create mode 100644 engine/client/client_deprecated.go create mode 100644 engine/client/client_mock_test.go create mode 100644 engine/client/client_test.go create mode 100644 engine/client/client_unix.go create mode 100644 engine/client/client_windows.go create mode 100644 engine/client/config_create.go create mode 100644 engine/client/config_create_test.go create mode 100644 engine/client/config_inspect.go create mode 100644 engine/client/config_inspect_test.go create mode 100644 engine/client/config_list.go create mode 100644 engine/client/config_list_test.go create mode 100644 engine/client/config_remove.go create mode 100644 engine/client/config_remove_test.go create mode 100644 engine/client/config_update.go create mode 100644 engine/client/config_update_test.go create mode 100644 engine/client/container_attach.go create mode 100644 engine/client/container_commit.go create mode 100644 engine/client/container_commit_test.go create mode 100644 engine/client/container_copy.go create mode 100644 engine/client/container_copy_test.go create mode 100644 engine/client/container_create.go create mode 100644 engine/client/container_create_test.go create mode 100644 engine/client/container_diff.go create mode 100644 engine/client/container_diff_test.go create mode 100644 engine/client/container_exec.go create mode 100644 engine/client/container_exec_test.go create mode 100644 engine/client/container_export.go create mode 100644 engine/client/container_export_test.go create mode 100644 engine/client/container_inspect.go create mode 100644 engine/client/container_inspect_test.go create mode 100644 engine/client/container_kill.go create mode 100644 engine/client/container_kill_test.go create mode 100644 engine/client/container_list.go create mode 100644 engine/client/container_list_test.go create mode 100644 engine/client/container_logs.go create mode 100644 engine/client/container_logs_test.go create mode 100644 engine/client/container_pause.go create mode 100644 engine/client/container_pause_test.go create mode 100644 engine/client/container_prune.go create mode 100644 engine/client/container_prune_test.go create mode 100644 engine/client/container_remove.go create mode 100644 engine/client/container_remove_test.go create mode 100644 engine/client/container_rename.go create mode 100644 engine/client/container_rename_test.go create mode 100644 engine/client/container_resize.go create mode 100644 engine/client/container_resize_test.go create mode 100644 engine/client/container_restart.go create mode 100644 engine/client/container_restart_test.go create mode 100644 engine/client/container_start.go create mode 100644 engine/client/container_start_test.go create mode 100644 engine/client/container_stats.go create mode 100644 engine/client/container_stats_test.go create mode 100644 engine/client/container_stop.go create mode 100644 engine/client/container_stop_test.go create mode 100644 engine/client/container_top.go create mode 100644 engine/client/container_top_test.go create mode 100644 engine/client/container_unpause.go create mode 100644 engine/client/container_unpause_test.go create mode 100644 engine/client/container_update.go create mode 100644 engine/client/container_update_test.go create mode 100644 engine/client/container_wait.go create mode 100644 engine/client/container_wait_test.go create mode 100644 engine/client/disk_usage.go create mode 100644 engine/client/disk_usage_test.go create mode 100644 engine/client/distribution_inspect.go create mode 100644 engine/client/distribution_inspect_test.go create mode 100644 engine/client/errors.go create mode 100644 engine/client/events.go create mode 100644 engine/client/events_test.go create mode 100644 engine/client/hijack.go create mode 100644 engine/client/hijack_test.go create mode 100644 engine/client/image_build.go create mode 100644 engine/client/image_build_test.go create mode 100644 engine/client/image_create.go create mode 100644 engine/client/image_create_test.go create mode 100644 engine/client/image_history.go create mode 100644 engine/client/image_history_test.go create mode 100644 engine/client/image_import.go create mode 100644 engine/client/image_import_test.go create mode 100644 engine/client/image_inspect.go create mode 100644 engine/client/image_inspect_test.go create mode 100644 engine/client/image_list.go create mode 100644 engine/client/image_list_test.go create mode 100644 engine/client/image_load.go create mode 100644 engine/client/image_load_test.go create mode 100644 engine/client/image_prune.go create mode 100644 engine/client/image_prune_test.go create mode 100644 engine/client/image_pull.go create mode 100644 engine/client/image_pull_test.go create mode 100644 engine/client/image_push.go create mode 100644 engine/client/image_push_test.go create mode 100644 engine/client/image_remove.go create mode 100644 engine/client/image_remove_test.go create mode 100644 engine/client/image_save.go create mode 100644 engine/client/image_save_test.go create mode 100644 engine/client/image_search.go create mode 100644 engine/client/image_search_test.go create mode 100644 engine/client/image_tag.go create mode 100644 engine/client/image_tag_test.go create mode 100644 engine/client/info.go create mode 100644 engine/client/info_test.go create mode 100644 engine/client/interface.go create mode 100644 engine/client/interface_experimental.go create mode 100644 engine/client/interface_stable.go create mode 100644 engine/client/login.go create mode 100644 engine/client/network_connect.go create mode 100644 engine/client/network_connect_test.go create mode 100644 engine/client/network_create.go create mode 100644 engine/client/network_create_test.go create mode 100644 engine/client/network_disconnect.go create mode 100644 engine/client/network_disconnect_test.go create mode 100644 engine/client/network_inspect.go create mode 100644 engine/client/network_inspect_test.go create mode 100644 engine/client/network_list.go create mode 100644 engine/client/network_list_test.go create mode 100644 engine/client/network_prune.go create mode 100644 engine/client/network_prune_test.go create mode 100644 engine/client/network_remove.go create mode 100644 engine/client/network_remove_test.go create mode 100644 engine/client/node_inspect.go create mode 100644 engine/client/node_inspect_test.go create mode 100644 engine/client/node_list.go create mode 100644 engine/client/node_list_test.go create mode 100644 engine/client/node_remove.go create mode 100644 engine/client/node_remove_test.go create mode 100644 engine/client/node_update.go create mode 100644 engine/client/node_update_test.go create mode 100644 engine/client/options.go create mode 100644 engine/client/options_test.go create mode 100644 engine/client/ping.go create mode 100644 engine/client/ping_test.go create mode 100644 engine/client/plugin_create.go create mode 100644 engine/client/plugin_disable.go create mode 100644 engine/client/plugin_disable_test.go create mode 100644 engine/client/plugin_enable.go create mode 100644 engine/client/plugin_enable_test.go create mode 100644 engine/client/plugin_inspect.go create mode 100644 engine/client/plugin_inspect_test.go create mode 100644 engine/client/plugin_install.go create mode 100644 engine/client/plugin_list.go create mode 100644 engine/client/plugin_list_test.go create mode 100644 engine/client/plugin_push.go create mode 100644 engine/client/plugin_push_test.go create mode 100644 engine/client/plugin_remove.go create mode 100644 engine/client/plugin_remove_test.go create mode 100644 engine/client/plugin_set.go create mode 100644 engine/client/plugin_set_test.go create mode 100644 engine/client/plugin_upgrade.go create mode 100644 engine/client/request.go create mode 100644 engine/client/request_test.go create mode 100644 engine/client/secret_create.go create mode 100644 engine/client/secret_create_test.go create mode 100644 engine/client/secret_inspect.go create mode 100644 engine/client/secret_inspect_test.go create mode 100644 engine/client/secret_list.go create mode 100644 engine/client/secret_list_test.go create mode 100644 engine/client/secret_remove.go create mode 100644 engine/client/secret_remove_test.go create mode 100644 engine/client/secret_update.go create mode 100644 engine/client/secret_update_test.go create mode 100644 engine/client/service_create.go create mode 100644 engine/client/service_create_test.go create mode 100644 engine/client/service_inspect.go create mode 100644 engine/client/service_inspect_test.go create mode 100644 engine/client/service_list.go create mode 100644 engine/client/service_list_test.go create mode 100644 engine/client/service_logs.go create mode 100644 engine/client/service_logs_test.go create mode 100644 engine/client/service_remove.go create mode 100644 engine/client/service_remove_test.go create mode 100644 engine/client/service_update.go create mode 100644 engine/client/service_update_test.go create mode 100644 engine/client/swarm_get_unlock_key.go create mode 100644 engine/client/swarm_get_unlock_key_test.go create mode 100644 engine/client/swarm_init.go create mode 100644 engine/client/swarm_init_test.go create mode 100644 engine/client/swarm_inspect.go create mode 100644 engine/client/swarm_inspect_test.go create mode 100644 engine/client/swarm_join.go create mode 100644 engine/client/swarm_join_test.go create mode 100644 engine/client/swarm_leave.go create mode 100644 engine/client/swarm_leave_test.go create mode 100644 engine/client/swarm_unlock.go create mode 100644 engine/client/swarm_unlock_test.go create mode 100644 engine/client/swarm_update.go create mode 100644 engine/client/swarm_update_test.go create mode 100644 engine/client/task_inspect.go create mode 100644 engine/client/task_inspect_test.go create mode 100644 engine/client/task_list.go create mode 100644 engine/client/task_list_test.go create mode 100644 engine/client/task_logs.go create mode 100644 engine/client/testdata/ca.pem create mode 100644 engine/client/testdata/cert.pem create mode 100644 engine/client/testdata/key.pem create mode 100644 engine/client/transport.go create mode 100644 engine/client/utils.go create mode 100644 engine/client/version.go create mode 100644 engine/client/volume_create.go create mode 100644 engine/client/volume_create_test.go create mode 100644 engine/client/volume_inspect.go create mode 100644 engine/client/volume_inspect_test.go create mode 100644 engine/client/volume_list.go create mode 100644 engine/client/volume_list_test.go create mode 100644 engine/client/volume_prune.go create mode 100644 engine/client/volume_remove.go create mode 100644 engine/client/volume_remove_test.go create mode 100644 engine/cmd/dockerd/README.md create mode 100644 engine/cmd/dockerd/config.go create mode 100644 engine/cmd/dockerd/config_common_unix.go create mode 100644 engine/cmd/dockerd/config_unix.go create mode 100644 engine/cmd/dockerd/config_unix_test.go create mode 100644 engine/cmd/dockerd/config_windows.go create mode 100644 engine/cmd/dockerd/daemon.go create mode 100644 engine/cmd/dockerd/daemon_freebsd.go create mode 100644 engine/cmd/dockerd/daemon_linux.go create mode 100644 engine/cmd/dockerd/daemon_test.go create mode 100644 engine/cmd/dockerd/daemon_unix.go create mode 100644 engine/cmd/dockerd/daemon_unix_test.go create mode 100644 engine/cmd/dockerd/daemon_windows.go create mode 100644 engine/cmd/dockerd/docker.go create mode 100644 engine/cmd/dockerd/docker_unix.go create mode 100644 engine/cmd/dockerd/docker_windows.go create mode 100644 engine/cmd/dockerd/hack/malformed_host_override.go create mode 100644 engine/cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 engine/cmd/dockerd/metrics.go create mode 100644 engine/cmd/dockerd/options.go create mode 100644 engine/cmd/dockerd/options_test.go create mode 100644 engine/cmd/dockerd/service_unsupported.go create mode 100644 engine/cmd/dockerd/service_windows.go create mode 100644 engine/codecov.yml create mode 100644 engine/container/archive.go create mode 100644 engine/container/container.go create mode 100644 engine/container/container_unit_test.go create mode 100644 engine/container/container_unix.go create mode 100644 engine/container/container_windows.go create mode 100644 engine/container/env.go create mode 100644 engine/container/env_test.go create mode 100644 engine/container/health.go create mode 100644 engine/container/history.go create mode 100644 engine/container/memory_store.go create mode 100644 engine/container/memory_store_test.go create mode 100644 engine/container/monitor.go create mode 100644 engine/container/mounts_unix.go create mode 100644 engine/container/mounts_windows.go create mode 100644 engine/container/state.go create mode 100644 engine/container/state_test.go create mode 100644 engine/container/store.go create mode 100644 engine/container/stream/attach.go create mode 100644 engine/container/stream/streams.go create mode 100644 engine/container/view.go create mode 100644 engine/container/view_test.go create mode 100644 engine/contrib/README.md create mode 100644 engine/contrib/REVIEWERS create mode 100644 engine/contrib/apparmor/main.go create mode 100644 engine/contrib/apparmor/template.go create mode 100755 engine/contrib/check-config.sh create mode 100644 engine/contrib/desktop-integration/README.md create mode 100644 engine/contrib/desktop-integration/chromium/Dockerfile create mode 100644 engine/contrib/desktop-integration/gparted/Dockerfile create mode 100644 engine/contrib/docker-device-tool/README.md create mode 100644 engine/contrib/docker-device-tool/device_tool.go create mode 100644 engine/contrib/docker-device-tool/device_tool_windows.go create mode 100755 engine/contrib/docker-machine-install-bundle.sh create mode 100755 engine/contrib/dockerd-rootless.sh create mode 100755 engine/contrib/dockerize-disk.sh create mode 100755 engine/contrib/download-frozen-image-v1.sh create mode 100755 engine/contrib/download-frozen-image-v2.sh create mode 100644 engine/contrib/editorconfig create mode 100644 engine/contrib/gitdm/aliases create mode 100644 engine/contrib/gitdm/domain-map create mode 100755 engine/contrib/gitdm/generate_aliases.sh create mode 100644 engine/contrib/gitdm/gitdm.config create mode 100644 engine/contrib/httpserver/Dockerfile create mode 100644 engine/contrib/httpserver/server.go create mode 100644 engine/contrib/init/openrc/docker.confd create mode 100644 engine/contrib/init/openrc/docker.initd create mode 100644 engine/contrib/init/systemd/REVIEWERS create mode 100644 engine/contrib/init/systemd/docker.service create mode 100644 engine/contrib/init/systemd/docker.service.rpm create mode 100644 engine/contrib/init/systemd/docker.socket create mode 100755 engine/contrib/init/sysvinit-debian/docker create mode 100644 engine/contrib/init/sysvinit-debian/docker.default create mode 100755 engine/contrib/init/sysvinit-redhat/docker create mode 100644 engine/contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 engine/contrib/init/upstart/REVIEWERS create mode 100644 engine/contrib/init/upstart/docker.conf create mode 100755 engine/contrib/mac-install-bundle.sh create mode 100755 engine/contrib/mkimage-alpine.sh create mode 100644 engine/contrib/mkimage-arch-pacman.conf create mode 100755 engine/contrib/mkimage-arch.sh create mode 100644 engine/contrib/mkimage-archarm-pacman.conf create mode 100755 engine/contrib/mkimage-crux.sh create mode 100755 engine/contrib/mkimage-pld.sh create mode 100755 engine/contrib/mkimage-yum.sh create mode 100755 engine/contrib/mkimage.sh create mode 100755 engine/contrib/mkimage/.febootstrap-minimize create mode 100755 engine/contrib/mkimage/busybox-static create mode 100755 engine/contrib/mkimage/debootstrap create mode 100755 engine/contrib/mkimage/mageia-urpmi create mode 100755 engine/contrib/mkimage/rinse create mode 100644 engine/contrib/nnp-test/Dockerfile create mode 100644 engine/contrib/nnp-test/nnp-test.c create mode 100755 engine/contrib/nuke-graph-directory.sh create mode 100755 engine/contrib/report-issue.sh create mode 100644 engine/contrib/syntax/nano/Dockerfile.nanorc create mode 100644 engine/contrib/syntax/nano/README.md create mode 100644 engine/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 engine/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 engine/contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 engine/contrib/syntax/textmate/README.md create mode 100644 engine/contrib/syntax/textmate/REVIEWERS create mode 100644 engine/contrib/syntax/vim/LICENSE create mode 100644 engine/contrib/syntax/vim/README.md create mode 100644 engine/contrib/syntax/vim/doc/dockerfile.txt create mode 100644 engine/contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 engine/contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 engine/contrib/syscall-test/Dockerfile create mode 100644 engine/contrib/syscall-test/acct.c create mode 100644 engine/contrib/syscall-test/exit32.s create mode 100644 engine/contrib/syscall-test/ns.c create mode 100644 engine/contrib/syscall-test/raw.c create mode 100644 engine/contrib/syscall-test/setgid.c create mode 100644 engine/contrib/syscall-test/setuid.c create mode 100644 engine/contrib/syscall-test/socket.c create mode 100644 engine/contrib/syscall-test/userns.c create mode 100644 engine/contrib/udev/80-docker.rules create mode 100644 engine/contrib/vagrant-docker/README.md create mode 100644 engine/daemon/apparmor_default.go create mode 100644 engine/daemon/apparmor_default_unsupported.go create mode 100644 engine/daemon/archive.go create mode 100644 engine/daemon/archive_tarcopyoptions.go create mode 100644 engine/daemon/archive_tarcopyoptions_unix.go create mode 100644 engine/daemon/archive_tarcopyoptions_windows.go create mode 100644 engine/daemon/archive_unix.go create mode 100644 engine/daemon/archive_windows.go create mode 100644 engine/daemon/attach.go create mode 100644 engine/daemon/auth.go create mode 100644 engine/daemon/changes.go create mode 100644 engine/daemon/checkpoint.go create mode 100644 engine/daemon/cluster.go create mode 100644 engine/daemon/cluster/cluster.go create mode 100644 engine/daemon/cluster/configs.go create mode 100644 engine/daemon/cluster/controllers/plugin/controller.go create mode 100644 engine/daemon/cluster/controllers/plugin/controller_test.go create mode 100644 engine/daemon/cluster/convert/config.go create mode 100644 engine/daemon/cluster/convert/container.go create mode 100644 engine/daemon/cluster/convert/network.go create mode 100644 engine/daemon/cluster/convert/network_test.go create mode 100644 engine/daemon/cluster/convert/node.go create mode 100644 engine/daemon/cluster/convert/secret.go create mode 100644 engine/daemon/cluster/convert/service.go create mode 100644 engine/daemon/cluster/convert/service_test.go create mode 100644 engine/daemon/cluster/convert/swarm.go create mode 100644 engine/daemon/cluster/convert/task.go create mode 100644 engine/daemon/cluster/errors.go create mode 100644 engine/daemon/cluster/executor/backend.go create mode 100644 engine/daemon/cluster/executor/container/adapter.go create mode 100644 engine/daemon/cluster/executor/container/adapter_test.go create mode 100644 engine/daemon/cluster/executor/container/attachment.go create mode 100644 engine/daemon/cluster/executor/container/container.go create mode 100644 engine/daemon/cluster/executor/container/container_test.go create mode 100644 engine/daemon/cluster/executor/container/controller.go create mode 100644 engine/daemon/cluster/executor/container/errors.go create mode 100644 engine/daemon/cluster/executor/container/executor.go create mode 100644 engine/daemon/cluster/executor/container/health_test.go create mode 100644 engine/daemon/cluster/executor/container/validate.go create mode 100644 engine/daemon/cluster/executor/container/validate_test.go create mode 100644 engine/daemon/cluster/executor/container/validate_unix_test.go create mode 100644 engine/daemon/cluster/executor/container/validate_windows_test.go create mode 100644 engine/daemon/cluster/filters.go create mode 100644 engine/daemon/cluster/filters_test.go create mode 100644 engine/daemon/cluster/helpers.go create mode 100644 engine/daemon/cluster/listen_addr.go create mode 100644 engine/daemon/cluster/listen_addr_linux.go create mode 100644 engine/daemon/cluster/listen_addr_others.go create mode 100644 engine/daemon/cluster/networks.go create mode 100644 engine/daemon/cluster/noderunner.go create mode 100644 engine/daemon/cluster/nodes.go create mode 100644 engine/daemon/cluster/provider/network.go create mode 100644 engine/daemon/cluster/secrets.go create mode 100644 engine/daemon/cluster/services.go create mode 100644 engine/daemon/cluster/swarm.go create mode 100644 engine/daemon/cluster/tasks.go create mode 100644 engine/daemon/cluster/utils.go create mode 100644 engine/daemon/commit.go create mode 100644 engine/daemon/config/builder.go create mode 100644 engine/daemon/config/config.go create mode 100644 engine/daemon/config/config_common_unix.go create mode 100644 engine/daemon/config/config_common_unix_test.go create mode 100644 engine/daemon/config/config_test.go create mode 100644 engine/daemon/config/config_unix.go create mode 100644 engine/daemon/config/config_unix_test.go create mode 100644 engine/daemon/config/config_windows.go create mode 100644 engine/daemon/config/config_windows_test.go create mode 100644 engine/daemon/config/opts.go create mode 100644 engine/daemon/configs.go create mode 100644 engine/daemon/configs_linux.go create mode 100644 engine/daemon/configs_unsupported.go create mode 100644 engine/daemon/configs_windows.go create mode 100644 engine/daemon/container.go create mode 100644 engine/daemon/container_linux.go create mode 100644 engine/daemon/container_operations.go create mode 100644 engine/daemon/container_operations_unix.go create mode 100644 engine/daemon/container_operations_windows.go create mode 100644 engine/daemon/container_unix_test.go create mode 100644 engine/daemon/container_windows.go create mode 100644 engine/daemon/create.go create mode 100644 engine/daemon/create_test.go create mode 100644 engine/daemon/create_unix.go create mode 100644 engine/daemon/create_windows.go create mode 100644 engine/daemon/daemon.go create mode 100644 engine/daemon/daemon_linux.go create mode 100644 engine/daemon/daemon_linux_test.go create mode 100644 engine/daemon/daemon_test.go create mode 100644 engine/daemon/daemon_unix.go create mode 100644 engine/daemon/daemon_unix_test.go create mode 100644 engine/daemon/daemon_unsupported.go create mode 100644 engine/daemon/daemon_windows.go create mode 100644 engine/daemon/daemon_windows_test.go create mode 100644 engine/daemon/debugtrap_unix.go create mode 100644 engine/daemon/debugtrap_unsupported.go create mode 100644 engine/daemon/debugtrap_windows.go create mode 100644 engine/daemon/delete.go create mode 100644 engine/daemon/delete_test.go create mode 100644 engine/daemon/dependency.go create mode 100644 engine/daemon/devices_linux.go create mode 100644 engine/daemon/discovery/discovery.go create mode 100644 engine/daemon/discovery/discovery_test.go create mode 100644 engine/daemon/disk_usage.go create mode 100644 engine/daemon/errors.go create mode 100644 engine/daemon/events.go create mode 100644 engine/daemon/events/events.go create mode 100644 engine/daemon/events/events_test.go create mode 100644 engine/daemon/events/filter.go create mode 100644 engine/daemon/events/metrics.go create mode 100644 engine/daemon/events/testutils/testutils.go create mode 100644 engine/daemon/events_test.go create mode 100644 engine/daemon/exec.go create mode 100644 engine/daemon/exec/exec.go create mode 100644 engine/daemon/exec_linux.go create mode 100644 engine/daemon/exec_linux_test.go create mode 100644 engine/daemon/exec_windows.go create mode 100644 engine/daemon/export.go create mode 100644 engine/daemon/graphdriver/aufs/aufs.go create mode 100644 engine/daemon/graphdriver/aufs/aufs_test.go create mode 100644 engine/daemon/graphdriver/aufs/dirs.go create mode 100644 engine/daemon/graphdriver/aufs/mount.go create mode 100644 engine/daemon/graphdriver/btrfs/btrfs.go create mode 100644 engine/daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 engine/daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 engine/daemon/graphdriver/btrfs/version.go create mode 100644 engine/daemon/graphdriver/btrfs/version_none.go create mode 100644 engine/daemon/graphdriver/btrfs/version_test.go create mode 100644 engine/daemon/graphdriver/copy/copy.go create mode 100644 engine/daemon/graphdriver/copy/copy_cgo.go create mode 100644 engine/daemon/graphdriver/copy/copy_nocgo.go create mode 100644 engine/daemon/graphdriver/copy/copy_test.go create mode 100644 engine/daemon/graphdriver/counter.go create mode 100644 engine/daemon/graphdriver/devmapper/README.md create mode 100644 engine/daemon/graphdriver/devmapper/device_setup.go create mode 100644 engine/daemon/graphdriver/devmapper/deviceset.go create mode 100644 engine/daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 engine/daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 engine/daemon/graphdriver/devmapper/driver.go create mode 100644 engine/daemon/graphdriver/devmapper/mount.go create mode 100644 engine/daemon/graphdriver/driver.go create mode 100644 engine/daemon/graphdriver/driver_freebsd.go create mode 100644 engine/daemon/graphdriver/driver_linux.go create mode 100644 engine/daemon/graphdriver/driver_test.go create mode 100644 engine/daemon/graphdriver/driver_unsupported.go create mode 100644 engine/daemon/graphdriver/driver_windows.go create mode 100644 engine/daemon/graphdriver/errors.go create mode 100644 engine/daemon/graphdriver/fsdiff.go create mode 100644 engine/daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 engine/daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 engine/daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 engine/daemon/graphdriver/graphtest/testutil.go create mode 100644 engine/daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 engine/daemon/graphdriver/lcow/lcow.go create mode 100644 engine/daemon/graphdriver/lcow/lcow_svm.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs_file.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs_filedriver.go create mode 100644 engine/daemon/graphdriver/lcow/remotefs_pathdriver.go create mode 100644 engine/daemon/graphdriver/overlay/overlay.go create mode 100644 engine/daemon/graphdriver/overlay/overlay_test.go create mode 100644 engine/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 engine/daemon/graphdriver/overlay2/check.go create mode 100644 engine/daemon/graphdriver/overlay2/mount.go create mode 100644 engine/daemon/graphdriver/overlay2/overlay.go create mode 100644 engine/daemon/graphdriver/overlay2/overlay_test.go create mode 100644 engine/daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 engine/daemon/graphdriver/overlay2/randomid.go create mode 100644 engine/daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 engine/daemon/graphdriver/plugin.go create mode 100644 engine/daemon/graphdriver/proxy.go create mode 100644 engine/daemon/graphdriver/quota/errors.go create mode 100644 engine/daemon/graphdriver/quota/projectquota.go create mode 100644 engine/daemon/graphdriver/quota/projectquota_test.go create mode 100644 engine/daemon/graphdriver/quota/projectquota_unsupported.go create mode 100644 engine/daemon/graphdriver/quota/types.go create mode 100644 engine/daemon/graphdriver/register/register_aufs.go create mode 100644 engine/daemon/graphdriver/register/register_btrfs.go create mode 100644 engine/daemon/graphdriver/register/register_devicemapper.go create mode 100644 engine/daemon/graphdriver/register/register_overlay.go create mode 100644 engine/daemon/graphdriver/register/register_overlay2.go create mode 100644 engine/daemon/graphdriver/register/register_vfs.go create mode 100644 engine/daemon/graphdriver/register/register_windows.go create mode 100644 engine/daemon/graphdriver/register/register_zfs.go create mode 100644 engine/daemon/graphdriver/vfs/copy_linux.go create mode 100644 engine/daemon/graphdriver/vfs/copy_unsupported.go create mode 100644 engine/daemon/graphdriver/vfs/driver.go create mode 100644 engine/daemon/graphdriver/vfs/quota_linux.go create mode 100644 engine/daemon/graphdriver/vfs/quota_unsupported.go create mode 100644 engine/daemon/graphdriver/vfs/vfs_test.go create mode 100644 engine/daemon/graphdriver/windows/windows.go create mode 100644 engine/daemon/graphdriver/zfs/MAINTAINERS create mode 100644 engine/daemon/graphdriver/zfs/zfs.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_linux.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_test.go create mode 100644 engine/daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 engine/daemon/health.go create mode 100644 engine/daemon/health_test.go create mode 100644 engine/daemon/images/cache.go create mode 100644 engine/daemon/images/image.go create mode 100644 engine/daemon/images/image_builder.go create mode 100644 engine/daemon/images/image_commit.go create mode 100644 engine/daemon/images/image_delete.go create mode 100644 engine/daemon/images/image_events.go create mode 100644 engine/daemon/images/image_exporter.go create mode 100644 engine/daemon/images/image_history.go create mode 100644 engine/daemon/images/image_import.go create mode 100644 engine/daemon/images/image_inspect.go create mode 100644 engine/daemon/images/image_prune.go create mode 100644 engine/daemon/images/image_pull.go create mode 100644 engine/daemon/images/image_push.go create mode 100644 engine/daemon/images/image_search.go create mode 100644 engine/daemon/images/image_search_test.go create mode 100644 engine/daemon/images/image_tag.go create mode 100644 engine/daemon/images/image_unix.go create mode 100644 engine/daemon/images/image_windows.go create mode 100644 engine/daemon/images/images.go create mode 100644 engine/daemon/images/locals.go create mode 100644 engine/daemon/images/service.go create mode 100644 engine/daemon/info.go create mode 100644 engine/daemon/info_test.go create mode 100644 engine/daemon/info_unix.go create mode 100644 engine/daemon/info_unix_test.go create mode 100644 engine/daemon/info_windows.go create mode 100644 engine/daemon/initlayer/setup_unix.go create mode 100644 engine/daemon/initlayer/setup_windows.go create mode 100644 engine/daemon/inspect.go create mode 100644 engine/daemon/inspect_linux.go create mode 100644 engine/daemon/inspect_test.go create mode 100644 engine/daemon/inspect_windows.go create mode 100644 engine/daemon/keys.go create mode 100644 engine/daemon/keys_unsupported.go create mode 100644 engine/daemon/kill.go create mode 100644 engine/daemon/licensing.go create mode 100644 engine/daemon/licensing_test.go create mode 100644 engine/daemon/links.go create mode 100644 engine/daemon/links/links.go create mode 100644 engine/daemon/links/links_test.go create mode 100644 engine/daemon/list.go create mode 100644 engine/daemon/list_test.go create mode 100644 engine/daemon/list_unix.go create mode 100644 engine/daemon/list_windows.go create mode 100644 engine/daemon/listeners/group_unix.go create mode 100644 engine/daemon/listeners/listeners_linux.go create mode 100644 engine/daemon/listeners/listeners_windows.go create mode 100644 engine/daemon/logdrivers_linux.go create mode 100644 engine/daemon/logdrivers_windows.go create mode 100644 engine/daemon/logger/adapter.go create mode 100644 engine/daemon/logger/adapter_test.go create mode 100644 engine/daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 engine/daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 engine/daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 engine/daemon/logger/copier.go create mode 100644 engine/daemon/logger/copier_test.go create mode 100644 engine/daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 engine/daemon/logger/factory.go create mode 100644 engine/daemon/logger/fluentd/fluentd.go create mode 100644 engine/daemon/logger/gcplogs/gcplogging.go create mode 100644 engine/daemon/logger/gcplogs/gcplogging_linux.go create mode 100644 engine/daemon/logger/gcplogs/gcplogging_others.go create mode 100644 engine/daemon/logger/gelf/gelf.go create mode 100644 engine/daemon/logger/gelf/gelf_test.go create mode 100644 engine/daemon/logger/journald/journald.go create mode 100644 engine/daemon/logger/journald/journald_test.go create mode 100644 engine/daemon/logger/journald/journald_unsupported.go create mode 100644 engine/daemon/logger/journald/read.go create mode 100644 engine/daemon/logger/journald/read_native.go create mode 100644 engine/daemon/logger/journald/read_native_compat.go create mode 100644 engine/daemon/logger/journald/read_unsupported.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/jsonlog.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go create mode 100644 engine/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go create mode 100644 engine/daemon/logger/jsonfilelog/read.go create mode 100644 engine/daemon/logger/jsonfilelog/read_test.go create mode 100644 engine/daemon/logger/local/config.go create mode 100644 engine/daemon/logger/local/doc.go create mode 100644 engine/daemon/logger/local/local.go create mode 100644 engine/daemon/logger/local/local_test.go create mode 100644 engine/daemon/logger/local/read.go create mode 100644 engine/daemon/logger/logentries/logentries.go create mode 100644 engine/daemon/logger/logger.go create mode 100644 engine/daemon/logger/logger_test.go create mode 100644 engine/daemon/logger/loggerutils/log_tag.go create mode 100644 engine/daemon/logger/loggerutils/log_tag_test.go create mode 100644 engine/daemon/logger/loggerutils/logfile.go create mode 100644 engine/daemon/logger/loggerutils/logfile_test.go create mode 100644 engine/daemon/logger/loginfo.go create mode 100644 engine/daemon/logger/metrics.go create mode 100644 engine/daemon/logger/plugin.go create mode 100644 engine/daemon/logger/plugin_unix.go create mode 100644 engine/daemon/logger/plugin_unsupported.go create mode 100644 engine/daemon/logger/proxy.go create mode 100644 engine/daemon/logger/ring.go create mode 100644 engine/daemon/logger/ring_test.go create mode 100644 engine/daemon/logger/splunk/splunk.go create mode 100644 engine/daemon/logger/splunk/splunk_test.go create mode 100644 engine/daemon/logger/splunk/splunkhecmock_test.go create mode 100644 engine/daemon/logger/syslog/syslog.go create mode 100644 engine/daemon/logger/syslog/syslog_test.go create mode 100644 engine/daemon/logger/templates/templates.go create mode 100644 engine/daemon/logger/templates/templates_test.go create mode 100644 engine/daemon/logs.go create mode 100644 engine/daemon/logs_test.go create mode 100644 engine/daemon/metrics.go create mode 100644 engine/daemon/metrics_unix.go create mode 100644 engine/daemon/metrics_unsupported.go create mode 100644 engine/daemon/monitor.go create mode 100644 engine/daemon/mounts.go create mode 100644 engine/daemon/names.go create mode 100644 engine/daemon/names/names.go create mode 100644 engine/daemon/network.go create mode 100644 engine/daemon/network/filter.go create mode 100644 engine/daemon/network/filter_test.go create mode 100644 engine/daemon/network/settings.go create mode 100644 engine/daemon/nvidia_linux.go create mode 100644 engine/daemon/oci_linux.go create mode 100644 engine/daemon/oci_linux_test.go create mode 100644 engine/daemon/oci_utils.go create mode 100644 engine/daemon/oci_windows.go create mode 100644 engine/daemon/oci_windows_test.go create mode 100644 engine/daemon/pause.go create mode 100644 engine/daemon/prune.go create mode 100644 engine/daemon/reload.go create mode 100644 engine/daemon/reload_test.go create mode 100644 engine/daemon/reload_unix.go create mode 100644 engine/daemon/reload_windows.go create mode 100644 engine/daemon/rename.go create mode 100644 engine/daemon/resize.go create mode 100644 engine/daemon/resize_test.go create mode 100644 engine/daemon/restart.go create mode 100644 engine/daemon/seccomp_disabled.go create mode 100644 engine/daemon/seccomp_linux.go create mode 100644 engine/daemon/seccomp_unsupported.go create mode 100644 engine/daemon/secrets.go create mode 100644 engine/daemon/secrets_linux.go create mode 100644 engine/daemon/secrets_unsupported.go create mode 100644 engine/daemon/secrets_windows.go create mode 100644 engine/daemon/selinux_linux.go create mode 100644 engine/daemon/selinux_unsupported.go create mode 100644 engine/daemon/start.go create mode 100644 engine/daemon/start_unix.go create mode 100644 engine/daemon/start_windows.go create mode 100644 engine/daemon/stats.go create mode 100644 engine/daemon/stats/collector.go create mode 100644 engine/daemon/stats/collector_unix.go create mode 100644 engine/daemon/stats/collector_windows.go create mode 100644 engine/daemon/stats_collector.go create mode 100644 engine/daemon/stats_unix.go create mode 100644 engine/daemon/stats_windows.go create mode 100644 engine/daemon/stop.go create mode 100644 engine/daemon/testdata/keyfile create mode 100644 engine/daemon/top_unix.go create mode 100644 engine/daemon/top_unix_test.go create mode 100644 engine/daemon/top_windows.go create mode 100644 engine/daemon/trustkey.go create mode 100644 engine/daemon/trustkey_test.go create mode 100644 engine/daemon/unpause.go create mode 100644 engine/daemon/update.go create mode 100644 engine/daemon/update_linux.go create mode 100644 engine/daemon/update_windows.go create mode 100644 engine/daemon/util_test.go create mode 100644 engine/daemon/volumes.go create mode 100644 engine/daemon/volumes_linux.go create mode 100644 engine/daemon/volumes_linux_test.go create mode 100644 engine/daemon/volumes_unit_test.go create mode 100644 engine/daemon/volumes_unix.go create mode 100644 engine/daemon/volumes_unix_test.go create mode 100644 engine/daemon/volumes_windows.go create mode 100644 engine/daemon/wait.go create mode 100644 engine/daemon/workdir.go create mode 100644 engine/distribution/config.go create mode 100644 engine/distribution/errors.go create mode 100644 engine/distribution/errors_test.go create mode 100644 engine/distribution/fixtures/validate_manifest/bad_manifest create mode 100644 engine/distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 engine/distribution/fixtures/validate_manifest/good_manifest create mode 100644 engine/distribution/metadata/metadata.go create mode 100644 engine/distribution/metadata/v1_id_service.go create mode 100644 engine/distribution/metadata/v1_id_service_test.go create mode 100644 engine/distribution/metadata/v2_metadata_service.go create mode 100644 engine/distribution/metadata/v2_metadata_service_test.go create mode 100644 engine/distribution/oci.go create mode 100644 engine/distribution/pull.go create mode 100644 engine/distribution/pull_v2.go create mode 100644 engine/distribution/pull_v2_test.go create mode 100644 engine/distribution/pull_v2_unix.go create mode 100644 engine/distribution/pull_v2_windows.go create mode 100644 engine/distribution/push.go create mode 100644 engine/distribution/push_v2.go create mode 100644 engine/distribution/push_v2_test.go create mode 100644 engine/distribution/registry.go create mode 100644 engine/distribution/registry_unit_test.go create mode 100644 engine/distribution/utils/progress.go create mode 100644 engine/distribution/xfer/download.go create mode 100644 engine/distribution/xfer/download_test.go create mode 100644 engine/distribution/xfer/transfer.go create mode 100644 engine/distribution/xfer/transfer_test.go create mode 100644 engine/distribution/xfer/upload.go create mode 100644 engine/distribution/xfer/upload_test.go create mode 100644 engine/dockerversion/useragent.go create mode 100644 engine/dockerversion/version_lib.go create mode 100644 engine/docs/api/v1.18.md create mode 100644 engine/docs/api/v1.19.md create mode 100644 engine/docs/api/v1.20.md create mode 100644 engine/docs/api/v1.21.md create mode 100644 engine/docs/api/v1.22.md create mode 100644 engine/docs/api/v1.23.md create mode 100644 engine/docs/api/v1.24.md create mode 100644 engine/docs/api/version-history.md create mode 100644 engine/docs/contributing/README.md create mode 100644 engine/docs/contributing/images/branch-sig.png create mode 100644 engine/docs/contributing/images/contributor-edit.png create mode 100644 engine/docs/contributing/images/copy_url.png create mode 100644 engine/docs/contributing/images/fork_docker.png create mode 100644 engine/docs/contributing/images/git_bash.png create mode 100644 engine/docs/contributing/images/list_example.png create mode 100644 engine/docs/contributing/set-up-dev-env.md create mode 100644 engine/docs/contributing/set-up-git.md create mode 100644 engine/docs/contributing/software-req-win.md create mode 100644 engine/docs/contributing/software-required.md create mode 100644 engine/docs/contributing/test.md create mode 100644 engine/docs/contributing/who-written-for.md create mode 100644 engine/docs/rootless.md create mode 100644 engine/docs/static_files/contributors.png create mode 100644 engine/docs/static_files/moby-project-logo.png create mode 100644 engine/errdefs/defs.go create mode 100644 engine/errdefs/doc.go create mode 100644 engine/errdefs/helpers.go create mode 100644 engine/errdefs/helpers_test.go create mode 100644 engine/errdefs/http_helpers.go create mode 100644 engine/errdefs/http_helpers_test.go create mode 100644 engine/errdefs/is.go create mode 100644 engine/hack/README.md create mode 100755 engine/hack/ci/arm create mode 100755 engine/hack/ci/experimental create mode 100755 engine/hack/ci/janky create mode 100755 engine/hack/ci/master create mode 100755 engine/hack/ci/powerpc create mode 100644 engine/hack/ci/windows.ps1 create mode 100755 engine/hack/ci/z create mode 100755 engine/hack/dind create mode 100755 engine/hack/dockerfile/install/containerd.installer create mode 100755 engine/hack/dockerfile/install/dockercli.installer create mode 100755 engine/hack/dockerfile/install/gometalinter.installer create mode 100755 engine/hack/dockerfile/install/gotestsum.installer create mode 100755 engine/hack/dockerfile/install/install.sh create mode 100755 engine/hack/dockerfile/install/proxy.installer create mode 100755 engine/hack/dockerfile/install/rootlesskit.installer create mode 100755 engine/hack/dockerfile/install/runc.installer create mode 100755 engine/hack/dockerfile/install/tini.installer create mode 100755 engine/hack/dockerfile/install/tomlv.installer create mode 100755 engine/hack/dockerfile/install/vndr.installer create mode 100755 engine/hack/generate-authors.sh create mode 100755 engine/hack/generate-swagger-api.sh create mode 100644 engine/hack/make.ps1 create mode 100755 engine/hack/make.sh create mode 100644 engine/hack/make/.binary create mode 100644 engine/hack/make/.binary-setup create mode 100644 engine/hack/make/.detect-daemon-osarch create mode 100644 engine/hack/make/.ensure-emptyfs create mode 100644 engine/hack/make/.go-autogen create mode 100644 engine/hack/make/.go-autogen.ps1 create mode 100644 engine/hack/make/.integration-daemon-setup create mode 100644 engine/hack/make/.integration-daemon-start create mode 100644 engine/hack/make/.integration-daemon-stop create mode 100644 engine/hack/make/.integration-test-helpers create mode 100644 engine/hack/make/.resources-windows/common.rc create mode 100644 engine/hack/make/.resources-windows/docker.exe.manifest create mode 100644 engine/hack/make/.resources-windows/docker.ico create mode 100644 engine/hack/make/.resources-windows/docker.png create mode 100644 engine/hack/make/.resources-windows/docker.rc create mode 100644 engine/hack/make/.resources-windows/dockerd.rc create mode 100644 engine/hack/make/.resources-windows/event_messages.mc create mode 100644 engine/hack/make/.resources-windows/resources.go create mode 100644 engine/hack/make/README.md create mode 100644 engine/hack/make/binary create mode 100644 engine/hack/make/binary-daemon create mode 100755 engine/hack/make/build-integration-test-binary create mode 100644 engine/hack/make/containerutility create mode 100644 engine/hack/make/cross create mode 100644 engine/hack/make/cross-platform-dependent create mode 100644 engine/hack/make/dynbinary create mode 100644 engine/hack/make/dynbinary-daemon create mode 100644 engine/hack/make/install-binary create mode 100644 engine/hack/make/run create mode 100644 engine/hack/make/test-docker-py create mode 100755 engine/hack/make/test-integration create mode 100755 engine/hack/make/test-integration-cli create mode 100644 engine/hack/make/test-integration-flaky create mode 100644 engine/hack/make/test-integration-shell create mode 100755 engine/hack/test/e2e-run.sh create mode 100755 engine/hack/test/unit create mode 100644 engine/hack/validate/.swagger-yamllint create mode 100644 engine/hack/validate/.validate create mode 100755 engine/hack/validate/all create mode 100755 engine/hack/validate/changelog-date-descending create mode 100755 engine/hack/validate/changelog-well-formed create mode 100755 engine/hack/validate/dco create mode 100755 engine/hack/validate/default create mode 100755 engine/hack/validate/default-seccomp create mode 100755 engine/hack/validate/deprecate-integration-cli create mode 100755 engine/hack/validate/gometalinter create mode 100644 engine/hack/validate/gometalinter.json create mode 100755 engine/hack/validate/pkg-imports create mode 100755 engine/hack/validate/swagger create mode 100755 engine/hack/validate/swagger-gen create mode 100755 engine/hack/validate/toml create mode 100755 engine/hack/validate/vendor create mode 100755 engine/hack/vendor.sh create mode 100644 engine/image/cache/cache.go create mode 100644 engine/image/cache/compare.go create mode 100644 engine/image/cache/compare_test.go create mode 100644 engine/image/fs.go create mode 100644 engine/image/fs_test.go create mode 100644 engine/image/image.go create mode 100644 engine/image/image_test.go create mode 100644 engine/image/rootfs.go create mode 100644 engine/image/spec/README.md create mode 100644 engine/image/spec/v1.1.md create mode 100644 engine/image/spec/v1.2.md create mode 100644 engine/image/spec/v1.md create mode 100644 engine/image/store.go create mode 100644 engine/image/store_test.go create mode 100644 engine/image/tarexport/load.go create mode 100644 engine/image/tarexport/save.go create mode 100644 engine/image/tarexport/tarexport.go create mode 100644 engine/image/v1/imagev1.go create mode 100644 engine/image/v1/imagev1_test.go create mode 100644 engine/integration-cli/benchmark_test.go create mode 100644 engine/integration-cli/check_test.go create mode 100644 engine/integration-cli/checker/checker.go create mode 100644 engine/integration-cli/cli/build/build.go create mode 100644 engine/integration-cli/cli/cli.go create mode 100644 engine/integration-cli/daemon/daemon.go create mode 100644 engine/integration-cli/daemon/daemon_swarm.go create mode 100644 engine/integration-cli/daemon_swarm_hack_test.go create mode 100644 engine/integration-cli/docker_api_attach_test.go create mode 100644 engine/integration-cli/docker_api_build_test.go create mode 100644 engine/integration-cli/docker_api_build_windows_test.go create mode 100644 engine/integration-cli/docker_api_containers_test.go create mode 100644 engine/integration-cli/docker_api_containers_windows_test.go create mode 100644 engine/integration-cli/docker_api_exec_resize_test.go create mode 100644 engine/integration-cli/docker_api_exec_test.go create mode 100644 engine/integration-cli/docker_api_images_test.go create mode 100644 engine/integration-cli/docker_api_inspect_test.go create mode 100644 engine/integration-cli/docker_api_logs_test.go create mode 100644 engine/integration-cli/docker_api_network_test.go create mode 100644 engine/integration-cli/docker_api_stats_test.go create mode 100644 engine/integration-cli/docker_api_swarm_node_test.go create mode 100644 engine/integration-cli/docker_api_swarm_service_test.go create mode 100644 engine/integration-cli/docker_api_swarm_test.go create mode 100644 engine/integration-cli/docker_api_test.go create mode 100644 engine/integration-cli/docker_cli_attach_test.go create mode 100644 engine/integration-cli/docker_cli_attach_unix_test.go create mode 100644 engine/integration-cli/docker_cli_build_test.go create mode 100644 engine/integration-cli/docker_cli_build_unix_test.go create mode 100644 engine/integration-cli/docker_cli_by_digest_test.go create mode 100644 engine/integration-cli/docker_cli_commit_test.go create mode 100644 engine/integration-cli/docker_cli_cp_from_container_test.go create mode 100644 engine/integration-cli/docker_cli_cp_test.go create mode 100644 engine/integration-cli/docker_cli_cp_to_container_test.go create mode 100644 engine/integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 engine/integration-cli/docker_cli_cp_utils_test.go create mode 100644 engine/integration-cli/docker_cli_create_test.go create mode 100644 engine/integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 engine/integration-cli/docker_cli_daemon_test.go create mode 100644 engine/integration-cli/docker_cli_events_test.go create mode 100644 engine/integration-cli/docker_cli_events_unix_test.go create mode 100644 engine/integration-cli/docker_cli_exec_test.go create mode 100644 engine/integration-cli/docker_cli_exec_unix_test.go create mode 100644 engine/integration-cli/docker_cli_external_volume_driver_test.go create mode 100644 engine/integration-cli/docker_cli_health_test.go create mode 100644 engine/integration-cli/docker_cli_history_test.go create mode 100644 engine/integration-cli/docker_cli_images_test.go create mode 100644 engine/integration-cli/docker_cli_import_test.go create mode 100644 engine/integration-cli/docker_cli_info_test.go create mode 100644 engine/integration-cli/docker_cli_info_unix_test.go create mode 100644 engine/integration-cli/docker_cli_inspect_test.go create mode 100644 engine/integration-cli/docker_cli_links_test.go create mode 100644 engine/integration-cli/docker_cli_login_test.go create mode 100644 engine/integration-cli/docker_cli_logout_test.go create mode 100644 engine/integration-cli/docker_cli_logs_bench_test.go create mode 100644 engine/integration-cli/docker_cli_logs_test.go create mode 100644 engine/integration-cli/docker_cli_netmode_test.go create mode 100644 engine/integration-cli/docker_cli_network_test.go create mode 100644 engine/integration-cli/docker_cli_network_unix_test.go create mode 100644 engine/integration-cli/docker_cli_plugins_logdriver_test.go create mode 100644 engine/integration-cli/docker_cli_plugins_test.go create mode 100644 engine/integration-cli/docker_cli_port_test.go create mode 100644 engine/integration-cli/docker_cli_proxy_test.go create mode 100644 engine/integration-cli/docker_cli_prune_unix_test.go create mode 100644 engine/integration-cli/docker_cli_ps_test.go create mode 100644 engine/integration-cli/docker_cli_pull_local_test.go create mode 100644 engine/integration-cli/docker_cli_pull_test.go create mode 100644 engine/integration-cli/docker_cli_push_test.go create mode 100644 engine/integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 engine/integration-cli/docker_cli_restart_test.go create mode 100644 engine/integration-cli/docker_cli_rmi_test.go create mode 100644 engine/integration-cli/docker_cli_run_test.go create mode 100644 engine/integration-cli/docker_cli_run_unix_test.go create mode 100644 engine/integration-cli/docker_cli_save_load_test.go create mode 100644 engine/integration-cli/docker_cli_save_load_unix_test.go create mode 100644 engine/integration-cli/docker_cli_search_test.go create mode 100644 engine/integration-cli/docker_cli_service_create_test.go create mode 100644 engine/integration-cli/docker_cli_service_health_test.go create mode 100644 engine/integration-cli/docker_cli_service_logs_test.go create mode 100644 engine/integration-cli/docker_cli_service_scale_test.go create mode 100644 engine/integration-cli/docker_cli_sni_test.go create mode 100644 engine/integration-cli/docker_cli_start_test.go create mode 100644 engine/integration-cli/docker_cli_stats_test.go create mode 100644 engine/integration-cli/docker_cli_swarm_test.go create mode 100644 engine/integration-cli/docker_cli_swarm_unix_test.go create mode 100644 engine/integration-cli/docker_cli_top_test.go create mode 100644 engine/integration-cli/docker_cli_update_unix_test.go create mode 100644 engine/integration-cli/docker_cli_userns_test.go create mode 100644 engine/integration-cli/docker_cli_v2_only_test.go create mode 100644 engine/integration-cli/docker_cli_volume_test.go create mode 100644 engine/integration-cli/docker_deprecated_api_v124_test.go create mode 100644 engine/integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 engine/integration-cli/docker_hub_pull_suite_test.go create mode 100644 engine/integration-cli/docker_utils_test.go create mode 100644 engine/integration-cli/environment/environment.go create mode 100644 engine/integration-cli/events_utils_test.go create mode 100755 engine/integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 engine/integration-cli/fixtures/credentialspecs/valid.json create mode 120000 engine/integration-cli/fixtures/https/ca.pem create mode 120000 engine/integration-cli/fixtures/https/client-cert.pem create mode 120000 engine/integration-cli/fixtures/https/client-key.pem create mode 100644 engine/integration-cli/fixtures/https/client-rogue-cert.pem create mode 100644 engine/integration-cli/fixtures/https/client-rogue-key.pem create mode 120000 engine/integration-cli/fixtures/https/server-cert.pem create mode 120000 engine/integration-cli/fixtures/https/server-key.pem create mode 100644 engine/integration-cli/fixtures/https/server-rogue-cert.pem create mode 100644 engine/integration-cli/fixtures/https/server-rogue-key.pem create mode 100644 engine/integration-cli/fixtures/registry/cert.pem create mode 100644 engine/integration-cli/fixtures_linux_daemon_test.go create mode 100644 engine/integration-cli/requirement/requirement.go create mode 100644 engine/integration-cli/requirements_test.go create mode 100644 engine/integration-cli/requirements_unix_test.go create mode 100644 engine/integration-cli/test_vars_noseccomp_test.go create mode 100644 engine/integration-cli/test_vars_seccomp_test.go create mode 100644 engine/integration-cli/test_vars_test.go create mode 100644 engine/integration-cli/test_vars_unix_test.go create mode 100644 engine/integration-cli/test_vars_windows_test.go create mode 100644 engine/integration-cli/testdata/emptyLayer.tar create mode 100644 engine/integration-cli/utils_test.go create mode 100644 engine/integration/build/build_session_test.go create mode 100644 engine/integration/build/build_squash_test.go create mode 100644 engine/integration/build/build_test.go create mode 100644 engine/integration/build/main_test.go create mode 100644 engine/integration/build/testdata/Dockerfile.TestBuildMultiStageCopy create mode 100644 engine/integration/build/testdata/Dockerfile.testBuildPreserveOwnership create mode 100644 engine/integration/config/config_test.go create mode 100644 engine/integration/config/main_test.go create mode 100644 engine/integration/container/checkpoint_test.go create mode 100644 engine/integration/container/container_test.go create mode 100644 engine/integration/container/copy_test.go create mode 100644 engine/integration/container/create_test.go create mode 100644 engine/integration/container/daemon_linux_test.go create mode 100644 engine/integration/container/diff_test.go create mode 100644 engine/integration/container/exec_test.go create mode 100644 engine/integration/container/export_test.go create mode 100644 engine/integration/container/health_test.go create mode 100644 engine/integration/container/inspect_test.go create mode 100644 engine/integration/container/ipcmode_linux_test.go create mode 100644 engine/integration/container/kill_test.go create mode 100644 engine/integration/container/links_linux_test.go create mode 100644 engine/integration/container/logs_test.go create mode 100644 engine/integration/container/main_test.go create mode 100644 engine/integration/container/mounts_linux_test.go create mode 100644 engine/integration/container/nat_test.go create mode 100644 engine/integration/container/pause_test.go create mode 100644 engine/integration/container/ps_test.go create mode 100644 engine/integration/container/remove_test.go create mode 100644 engine/integration/container/rename_test.go create mode 100644 engine/integration/container/resize_test.go create mode 100644 engine/integration/container/restart_test.go create mode 100644 engine/integration/container/run_linux_test.go create mode 100644 engine/integration/container/stats_test.go create mode 100644 engine/integration/container/stop_linux_test.go create mode 100644 engine/integration/container/stop_test.go create mode 100644 engine/integration/container/stop_windows_test.go create mode 100644 engine/integration/container/update_linux_test.go create mode 100644 engine/integration/container/update_test.go create mode 100644 engine/integration/container/wait_test.go create mode 100644 engine/integration/doc.go create mode 100644 engine/integration/image/commit_test.go create mode 100644 engine/integration/image/import_test.go create mode 100644 engine/integration/image/list_test.go create mode 100644 engine/integration/image/main_test.go create mode 100644 engine/integration/image/pull_test.go create mode 100644 engine/integration/image/remove_test.go create mode 100644 engine/integration/image/tag_test.go create mode 100644 engine/integration/internal/container/container.go create mode 100644 engine/integration/internal/container/exec.go create mode 100644 engine/integration/internal/container/ops.go create mode 100644 engine/integration/internal/container/states.go create mode 100644 engine/integration/internal/network/network.go create mode 100644 engine/integration/internal/network/ops.go create mode 100644 engine/integration/internal/network/states.go create mode 100644 engine/integration/internal/requirement/requirement.go create mode 100644 engine/integration/internal/requirement/requirement_linux.go create mode 100644 engine/integration/internal/requirement/requirement_windows.go create mode 100644 engine/integration/internal/swarm/service.go create mode 100644 engine/integration/internal/swarm/states.go create mode 100644 engine/integration/network/delete_test.go create mode 100644 engine/integration/network/helpers.go create mode 100644 engine/integration/network/helpers_windows.go create mode 100644 engine/integration/network/inspect_test.go create mode 100644 engine/integration/network/ipvlan/ipvlan_test.go create mode 100644 engine/integration/network/ipvlan/main_test.go create mode 100644 engine/integration/network/ipvlan/main_windows_test.go create mode 100644 engine/integration/network/macvlan/macvlan_test.go create mode 100644 engine/integration/network/macvlan/main_test.go create mode 100644 engine/integration/network/macvlan/main_windows_test.go create mode 100644 engine/integration/network/main_test.go create mode 100644 engine/integration/network/network_test.go create mode 100644 engine/integration/network/service_test.go create mode 100644 engine/integration/plugin/authz/authz_plugin_test.go create mode 100644 engine/integration/plugin/authz/authz_plugin_v2_test.go create mode 100644 engine/integration/plugin/authz/main_test.go create mode 100644 engine/integration/plugin/authz/main_windows_test.go create mode 100644 engine/integration/plugin/common/main_test.go create mode 100644 engine/integration/plugin/common/plugin_test.go create mode 100644 engine/integration/plugin/graphdriver/external_test.go create mode 100644 engine/integration/plugin/graphdriver/main_test.go create mode 100644 engine/integration/plugin/logging/cmd/close_on_start/main.go create mode 100644 engine/integration/plugin/logging/cmd/dummy/main.go create mode 100644 engine/integration/plugin/logging/helpers_test.go create mode 100644 engine/integration/plugin/logging/logging_linux_test.go create mode 100644 engine/integration/plugin/logging/main_test.go create mode 100644 engine/integration/plugin/logging/validation_test.go create mode 100644 engine/integration/plugin/pkg_test.go create mode 100644 engine/integration/plugin/volumes/cmd/dummy/main.go create mode 100644 engine/integration/plugin/volumes/helpers_test.go create mode 100644 engine/integration/plugin/volumes/main_test.go create mode 100644 engine/integration/plugin/volumes/mounts_test.go create mode 100644 engine/integration/secret/main_test.go create mode 100644 engine/integration/secret/secret_test.go create mode 100644 engine/integration/service/create_test.go create mode 100644 engine/integration/service/inspect_test.go create mode 100644 engine/integration/service/main_test.go create mode 100644 engine/integration/service/network_test.go create mode 100644 engine/integration/service/plugin_test.go create mode 100644 engine/integration/service/update_test.go create mode 100644 engine/integration/session/main_test.go create mode 100644 engine/integration/session/session_test.go create mode 100644 engine/integration/system/cgroupdriver_systemd_test.go create mode 100644 engine/integration/system/event_test.go create mode 100644 engine/integration/system/info_linux_test.go create mode 100644 engine/integration/system/info_test.go create mode 100644 engine/integration/system/login_test.go create mode 100644 engine/integration/system/main_test.go create mode 100644 engine/integration/system/ping_test.go create mode 100644 engine/integration/system/version_test.go create mode 100644 engine/integration/testdata/https/ca.pem create mode 100644 engine/integration/testdata/https/client-cert.pem create mode 100644 engine/integration/testdata/https/client-key.pem create mode 100644 engine/integration/testdata/https/server-cert.pem create mode 100644 engine/integration/testdata/https/server-key.pem create mode 100644 engine/integration/volume/main_test.go create mode 100644 engine/integration/volume/volume_test.go create mode 100644 engine/internal/test/daemon/config.go create mode 100644 engine/internal/test/daemon/container.go create mode 100644 engine/internal/test/daemon/daemon.go create mode 100644 engine/internal/test/daemon/daemon_unix.go create mode 100644 engine/internal/test/daemon/daemon_windows.go create mode 100644 engine/internal/test/daemon/node.go create mode 100644 engine/internal/test/daemon/ops.go create mode 100644 engine/internal/test/daemon/plugin.go create mode 100644 engine/internal/test/daemon/secret.go create mode 100644 engine/internal/test/daemon/service.go create mode 100644 engine/internal/test/daemon/swarm.go create mode 100644 engine/internal/test/environment/clean.go create mode 100644 engine/internal/test/environment/environment.go create mode 100644 engine/internal/test/environment/protect.go create mode 100644 engine/internal/test/fakecontext/context.go create mode 100644 engine/internal/test/fakegit/fakegit.go create mode 100644 engine/internal/test/fakestorage/fixtures.go create mode 100644 engine/internal/test/fakestorage/storage.go create mode 100644 engine/internal/test/fixtures/load/frozen.go create mode 100644 engine/internal/test/fixtures/plugin/basic/basic.go create mode 100644 engine/internal/test/fixtures/plugin/plugin.go create mode 100644 engine/internal/test/helper.go create mode 100644 engine/internal/test/registry/ops.go create mode 100644 engine/internal/test/registry/registry.go create mode 100644 engine/internal/test/registry/registry_mock.go create mode 100644 engine/internal/test/request/npipe.go create mode 100644 engine/internal/test/request/npipe_windows.go create mode 100644 engine/internal/test/request/ops.go create mode 100644 engine/internal/test/request/request.go create mode 100644 engine/internal/test/suite/interfaces.go create mode 100644 engine/internal/test/suite/suite.go create mode 100644 engine/internal/test/suite/testify.LICENSE create mode 100644 engine/internal/testutil/helpers.go create mode 100644 engine/internal/testutil/stringutils.go create mode 100644 engine/internal/testutil/stringutils_test.go create mode 100644 engine/layer/empty.go create mode 100644 engine/layer/empty_test.go create mode 100644 engine/layer/filestore.go create mode 100644 engine/layer/filestore_test.go create mode 100644 engine/layer/filestore_unix.go create mode 100644 engine/layer/filestore_windows.go create mode 100644 engine/layer/layer.go create mode 100644 engine/layer/layer_store.go create mode 100644 engine/layer/layer_store_windows.go create mode 100644 engine/layer/layer_test.go create mode 100644 engine/layer/layer_unix.go create mode 100644 engine/layer/layer_unix_test.go create mode 100644 engine/layer/layer_windows.go create mode 100644 engine/layer/migration.go create mode 100644 engine/layer/migration_test.go create mode 100644 engine/layer/mount_test.go create mode 100644 engine/layer/mounted_layer.go create mode 100644 engine/layer/ro_layer.go create mode 100644 engine/layer/ro_layer_windows.go create mode 100644 engine/libcontainerd/libcontainerd_linux.go create mode 100644 engine/libcontainerd/libcontainerd_windows.go create mode 100644 engine/libcontainerd/local/local_windows.go create mode 100644 engine/libcontainerd/local/process_windows.go create mode 100644 engine/libcontainerd/local/utils_windows.go create mode 100644 engine/libcontainerd/local/utils_windows_test.go create mode 100644 engine/libcontainerd/queue/queue.go create mode 100644 engine/libcontainerd/queue/queue_test.go create mode 100644 engine/libcontainerd/remote/client.go create mode 100644 engine/libcontainerd/remote/client_io_windows.go create mode 100644 engine/libcontainerd/remote/client_linux.go create mode 100644 engine/libcontainerd/remote/client_windows.go create mode 100644 engine/libcontainerd/supervisor/remote_daemon.go create mode 100644 engine/libcontainerd/supervisor/remote_daemon_linux.go create mode 100644 engine/libcontainerd/supervisor/remote_daemon_options.go create mode 100644 engine/libcontainerd/supervisor/remote_daemon_options_linux.go create mode 100644 engine/libcontainerd/supervisor/remote_daemon_windows.go create mode 100644 engine/libcontainerd/supervisor/utils_linux.go create mode 100644 engine/libcontainerd/supervisor/utils_windows.go create mode 100644 engine/libcontainerd/types/types.go create mode 100644 engine/libcontainerd/types/types_linux.go create mode 100644 engine/libcontainerd/types/types_windows.go create mode 100644 engine/oci/caps/utils.go create mode 100644 engine/oci/defaults.go create mode 100644 engine/oci/devices_linux.go create mode 100644 engine/oci/devices_unsupported.go create mode 100644 engine/oci/namespaces.go create mode 100644 engine/oci/oci.go create mode 100644 engine/opts/address_pools.go create mode 100644 engine/opts/address_pools_test.go create mode 100644 engine/opts/env.go create mode 100644 engine/opts/env_test.go create mode 100644 engine/opts/hosts.go create mode 100644 engine/opts/hosts_test.go create mode 100644 engine/opts/hosts_unix.go create mode 100644 engine/opts/hosts_windows.go create mode 100644 engine/opts/ip.go create mode 100644 engine/opts/ip_test.go create mode 100644 engine/opts/opts.go create mode 100644 engine/opts/opts_test.go create mode 100644 engine/opts/opts_unix.go create mode 100644 engine/opts/opts_windows.go create mode 100644 engine/opts/quotedstring.go create mode 100644 engine/opts/quotedstring_test.go create mode 100644 engine/opts/runtime.go create mode 100644 engine/opts/ulimit.go create mode 100644 engine/opts/ulimit_test.go create mode 100644 engine/pkg/README.md create mode 100644 engine/pkg/aaparser/aaparser.go create mode 100644 engine/pkg/aaparser/aaparser_test.go create mode 100644 engine/pkg/archive/README.md create mode 100644 engine/pkg/archive/archive.go create mode 100644 engine/pkg/archive/archive_linux.go create mode 100644 engine/pkg/archive/archive_linux_test.go create mode 100644 engine/pkg/archive/archive_other.go create mode 100644 engine/pkg/archive/archive_test.go create mode 100644 engine/pkg/archive/archive_unix.go create mode 100644 engine/pkg/archive/archive_unix_test.go create mode 100644 engine/pkg/archive/archive_windows.go create mode 100644 engine/pkg/archive/archive_windows_test.go create mode 100644 engine/pkg/archive/changes.go create mode 100644 engine/pkg/archive/changes_linux.go create mode 100644 engine/pkg/archive/changes_other.go create mode 100644 engine/pkg/archive/changes_posix_test.go create mode 100644 engine/pkg/archive/changes_test.go create mode 100644 engine/pkg/archive/changes_unix.go create mode 100644 engine/pkg/archive/changes_windows.go create mode 100644 engine/pkg/archive/copy.go create mode 100644 engine/pkg/archive/copy_unix.go create mode 100644 engine/pkg/archive/copy_unix_test.go create mode 100644 engine/pkg/archive/copy_windows.go create mode 100644 engine/pkg/archive/diff.go create mode 100644 engine/pkg/archive/diff_test.go create mode 100644 engine/pkg/archive/example_changes.go create mode 100644 engine/pkg/archive/testdata/broken.tar create mode 100644 engine/pkg/archive/time_linux.go create mode 100644 engine/pkg/archive/time_unsupported.go create mode 100644 engine/pkg/archive/utils_test.go create mode 100644 engine/pkg/archive/whiteouts.go create mode 100644 engine/pkg/archive/wrap.go create mode 100644 engine/pkg/archive/wrap_test.go create mode 100644 engine/pkg/authorization/api.go create mode 100644 engine/pkg/authorization/api_test.go create mode 100644 engine/pkg/authorization/authz.go create mode 100644 engine/pkg/authorization/authz_unix_test.go create mode 100644 engine/pkg/authorization/middleware.go create mode 100644 engine/pkg/authorization/middleware_test.go create mode 100644 engine/pkg/authorization/middleware_unix_test.go create mode 100644 engine/pkg/authorization/plugin.go create mode 100644 engine/pkg/authorization/response.go create mode 100644 engine/pkg/broadcaster/unbuffered.go create mode 100644 engine/pkg/broadcaster/unbuffered_test.go create mode 100644 engine/pkg/capabilities/caps.go create mode 100644 engine/pkg/capabilities/caps_test.go create mode 100644 engine/pkg/chrootarchive/archive.go create mode 100644 engine/pkg/chrootarchive/archive_test.go create mode 100644 engine/pkg/chrootarchive/archive_unix.go create mode 100644 engine/pkg/chrootarchive/archive_unix_test.go create mode 100644 engine/pkg/chrootarchive/archive_windows.go create mode 100644 engine/pkg/chrootarchive/chroot_linux.go create mode 100644 engine/pkg/chrootarchive/chroot_unix.go create mode 100644 engine/pkg/chrootarchive/diff.go create mode 100644 engine/pkg/chrootarchive/diff_unix.go create mode 100644 engine/pkg/chrootarchive/diff_windows.go create mode 100644 engine/pkg/chrootarchive/init_unix.go create mode 100644 engine/pkg/chrootarchive/init_windows.go create mode 100644 engine/pkg/containerfs/archiver.go create mode 100644 engine/pkg/containerfs/containerfs.go create mode 100644 engine/pkg/containerfs/containerfs_unix.go create mode 100644 engine/pkg/containerfs/containerfs_windows.go create mode 100644 engine/pkg/devicemapper/devmapper.go create mode 100644 engine/pkg/devicemapper/devmapper_log.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_dynamic.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go create mode 100644 engine/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 engine/pkg/devicemapper/ioctl.go create mode 100644 engine/pkg/devicemapper/log.go create mode 100644 engine/pkg/directory/directory.go create mode 100644 engine/pkg/directory/directory_test.go create mode 100644 engine/pkg/directory/directory_unix.go create mode 100644 engine/pkg/directory/directory_windows.go create mode 100644 engine/pkg/discovery/README.md create mode 100644 engine/pkg/discovery/backends.go create mode 100644 engine/pkg/discovery/discovery.go create mode 100644 engine/pkg/discovery/discovery_test.go create mode 100644 engine/pkg/discovery/entry.go create mode 100644 engine/pkg/discovery/file/file.go create mode 100644 engine/pkg/discovery/file/file_test.go create mode 100644 engine/pkg/discovery/generator.go create mode 100644 engine/pkg/discovery/generator_test.go create mode 100644 engine/pkg/discovery/kv/kv.go create mode 100644 engine/pkg/discovery/kv/kv_test.go create mode 100644 engine/pkg/discovery/memory/memory.go create mode 100644 engine/pkg/discovery/memory/memory_test.go create mode 100644 engine/pkg/discovery/nodes/nodes.go create mode 100644 engine/pkg/discovery/nodes/nodes_test.go create mode 100644 engine/pkg/dmesg/dmesg_linux.go create mode 100644 engine/pkg/dmesg/dmesg_linux_test.go create mode 100644 engine/pkg/filenotify/filenotify.go create mode 100644 engine/pkg/filenotify/fsnotify.go create mode 100644 engine/pkg/filenotify/poller.go create mode 100644 engine/pkg/filenotify/poller_test.go create mode 100644 engine/pkg/fileutils/fileutils.go create mode 100644 engine/pkg/fileutils/fileutils_darwin.go create mode 100644 engine/pkg/fileutils/fileutils_test.go create mode 100644 engine/pkg/fileutils/fileutils_unix.go create mode 100644 engine/pkg/fileutils/fileutils_windows.go create mode 100644 engine/pkg/fsutils/fsutils_linux.go create mode 100644 engine/pkg/fsutils/fsutils_linux_test.go create mode 100644 engine/pkg/homedir/homedir_linux.go create mode 100644 engine/pkg/homedir/homedir_others.go create mode 100644 engine/pkg/homedir/homedir_test.go create mode 100644 engine/pkg/homedir/homedir_unix.go create mode 100644 engine/pkg/homedir/homedir_windows.go create mode 100644 engine/pkg/idtools/idtools.go create mode 100644 engine/pkg/idtools/idtools_test.go create mode 100644 engine/pkg/idtools/idtools_unix.go create mode 100644 engine/pkg/idtools/idtools_unix_test.go create mode 100644 engine/pkg/idtools/idtools_windows.go create mode 100644 engine/pkg/idtools/usergroupadd_linux.go create mode 100644 engine/pkg/idtools/usergroupadd_unsupported.go create mode 100644 engine/pkg/idtools/utils_unix.go create mode 100644 engine/pkg/ioutils/buffer.go create mode 100644 engine/pkg/ioutils/buffer_test.go create mode 100644 engine/pkg/ioutils/bytespipe.go create mode 100644 engine/pkg/ioutils/bytespipe_test.go create mode 100644 engine/pkg/ioutils/fswriters.go create mode 100644 engine/pkg/ioutils/fswriters_test.go create mode 100644 engine/pkg/ioutils/readers.go create mode 100644 engine/pkg/ioutils/readers_test.go create mode 100644 engine/pkg/ioutils/temp_unix.go create mode 100644 engine/pkg/ioutils/temp_windows.go create mode 100644 engine/pkg/ioutils/writeflusher.go create mode 100644 engine/pkg/ioutils/writers.go create mode 100644 engine/pkg/ioutils/writers_test.go create mode 100644 engine/pkg/jsonmessage/jsonmessage.go create mode 100644 engine/pkg/jsonmessage/jsonmessage_test.go create mode 100644 engine/pkg/locker/README.md create mode 100644 engine/pkg/locker/locker.go create mode 100644 engine/pkg/locker/locker_test.go create mode 100644 engine/pkg/longpath/longpath.go create mode 100644 engine/pkg/longpath/longpath_test.go create mode 100644 engine/pkg/loopback/attach_loopback.go create mode 100644 engine/pkg/loopback/ioctl.go create mode 100644 engine/pkg/loopback/loop_wrapper.go create mode 100644 engine/pkg/loopback/loopback.go create mode 100644 engine/pkg/mount/flags.go create mode 100644 engine/pkg/mount/flags_freebsd.go create mode 100644 engine/pkg/mount/flags_linux.go create mode 100644 engine/pkg/mount/flags_unsupported.go create mode 100644 engine/pkg/mount/mount.go create mode 100644 engine/pkg/mount/mount_unix_test.go create mode 100644 engine/pkg/mount/mounter_freebsd.go create mode 100644 engine/pkg/mount/mounter_linux.go create mode 100644 engine/pkg/mount/mounter_linux_test.go create mode 100644 engine/pkg/mount/mounter_unsupported.go create mode 100644 engine/pkg/mount/mountinfo.go create mode 100644 engine/pkg/mount/mountinfo_freebsd.go create mode 100644 engine/pkg/mount/mountinfo_linux.go create mode 100644 engine/pkg/mount/mountinfo_linux_test.go create mode 100644 engine/pkg/mount/mountinfo_unsupported.go create mode 100644 engine/pkg/mount/mountinfo_windows.go create mode 100644 engine/pkg/mount/sharedsubtree_linux.go create mode 100644 engine/pkg/mount/sharedsubtree_linux_test.go create mode 100644 engine/pkg/mount/unmount_unix.go create mode 100644 engine/pkg/mount/unmount_unsupported.go create mode 100644 engine/pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 engine/pkg/namesgenerator/names-generator.go create mode 100644 engine/pkg/namesgenerator/names-generator_test.go create mode 100644 engine/pkg/parsers/kernel/kernel.go create mode 100644 engine/pkg/parsers/kernel/kernel_darwin.go create mode 100644 engine/pkg/parsers/kernel/kernel_unix.go create mode 100644 engine/pkg/parsers/kernel/kernel_unix_test.go create mode 100644 engine/pkg/parsers/kernel/kernel_windows.go create mode 100644 engine/pkg/parsers/kernel/uname_linux.go create mode 100644 engine/pkg/parsers/kernel/uname_solaris.go create mode 100644 engine/pkg/parsers/kernel/uname_unsupported.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 engine/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 engine/pkg/parsers/parsers.go create mode 100644 engine/pkg/parsers/parsers_test.go create mode 100644 engine/pkg/pidfile/pidfile.go create mode 100644 engine/pkg/pidfile/pidfile_darwin.go create mode 100644 engine/pkg/pidfile/pidfile_test.go create mode 100644 engine/pkg/pidfile/pidfile_unix.go create mode 100644 engine/pkg/pidfile/pidfile_windows.go create mode 100644 engine/pkg/platform/architecture_linux.go create mode 100644 engine/pkg/platform/architecture_unix.go create mode 100644 engine/pkg/platform/architecture_windows.go create mode 100644 engine/pkg/platform/platform.go create mode 100644 engine/pkg/plugingetter/getter.go create mode 100644 engine/pkg/plugins/client.go create mode 100644 engine/pkg/plugins/client_test.go create mode 100644 engine/pkg/plugins/discovery.go create mode 100644 engine/pkg/plugins/discovery_test.go create mode 100644 engine/pkg/plugins/discovery_unix.go create mode 100644 engine/pkg/plugins/discovery_unix_test.go create mode 100644 engine/pkg/plugins/discovery_windows.go create mode 100644 engine/pkg/plugins/errors.go create mode 100644 engine/pkg/plugins/plugin_test.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/README.md create mode 100644 engine/pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/main.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/parser.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 engine/pkg/plugins/pluginrpc-gen/template.go create mode 100644 engine/pkg/plugins/plugins.go create mode 100644 engine/pkg/plugins/plugins_unix.go create mode 100644 engine/pkg/plugins/plugins_windows.go create mode 100644 engine/pkg/plugins/transport/http.go create mode 100644 engine/pkg/plugins/transport/http_test.go create mode 100644 engine/pkg/plugins/transport/transport.go create mode 100644 engine/pkg/pools/pools.go create mode 100644 engine/pkg/pools/pools_test.go create mode 100644 engine/pkg/progress/progress.go create mode 100644 engine/pkg/progress/progressreader.go create mode 100644 engine/pkg/progress/progressreader_test.go create mode 100644 engine/pkg/pubsub/publisher.go create mode 100644 engine/pkg/pubsub/publisher_test.go create mode 100644 engine/pkg/reexec/README.md create mode 100644 engine/pkg/reexec/command_linux.go create mode 100644 engine/pkg/reexec/command_unix.go create mode 100644 engine/pkg/reexec/command_unsupported.go create mode 100644 engine/pkg/reexec/command_windows.go create mode 100644 engine/pkg/reexec/reexec.go create mode 100644 engine/pkg/reexec/reexec_test.go create mode 100644 engine/pkg/signal/README.md create mode 100644 engine/pkg/signal/signal.go create mode 100644 engine/pkg/signal/signal_darwin.go create mode 100644 engine/pkg/signal/signal_freebsd.go create mode 100644 engine/pkg/signal/signal_linux.go create mode 100644 engine/pkg/signal/signal_linux_mipsx.go create mode 100644 engine/pkg/signal/signal_linux_test.go create mode 100644 engine/pkg/signal/signal_test.go create mode 100644 engine/pkg/signal/signal_unix.go create mode 100644 engine/pkg/signal/signal_unsupported.go create mode 100644 engine/pkg/signal/signal_windows.go create mode 100644 engine/pkg/signal/testfiles/main.go create mode 100644 engine/pkg/signal/trap.go create mode 100644 engine/pkg/signal/trap_linux_test.go create mode 100644 engine/pkg/stdcopy/stdcopy.go create mode 100644 engine/pkg/stdcopy/stdcopy_test.go create mode 100644 engine/pkg/streamformatter/streamformatter.go create mode 100644 engine/pkg/streamformatter/streamformatter_test.go create mode 100644 engine/pkg/streamformatter/streamwriter.go create mode 100644 engine/pkg/streamformatter/streamwriter_test.go create mode 100644 engine/pkg/stringid/README.md create mode 100644 engine/pkg/stringid/stringid.go create mode 100644 engine/pkg/stringid/stringid_test.go create mode 100644 engine/pkg/symlink/LICENSE.APACHE create mode 100644 engine/pkg/symlink/LICENSE.BSD create mode 100644 engine/pkg/symlink/README.md create mode 100644 engine/pkg/symlink/fs.go create mode 100644 engine/pkg/symlink/fs_unix.go create mode 100644 engine/pkg/symlink/fs_unix_test.go create mode 100644 engine/pkg/symlink/fs_windows.go create mode 100644 engine/pkg/sysinfo/README.md create mode 100644 engine/pkg/sysinfo/numcpu.go create mode 100644 engine/pkg/sysinfo/numcpu_linux.go create mode 100644 engine/pkg/sysinfo/numcpu_windows.go create mode 100644 engine/pkg/sysinfo/sysinfo.go create mode 100644 engine/pkg/sysinfo/sysinfo_linux.go create mode 100644 engine/pkg/sysinfo/sysinfo_linux_test.go create mode 100644 engine/pkg/sysinfo/sysinfo_test.go create mode 100644 engine/pkg/sysinfo/sysinfo_unix.go create mode 100644 engine/pkg/sysinfo/sysinfo_windows.go create mode 100644 engine/pkg/system/args_windows.go create mode 100644 engine/pkg/system/chtimes.go create mode 100644 engine/pkg/system/chtimes_test.go create mode 100644 engine/pkg/system/chtimes_unix.go create mode 100644 engine/pkg/system/chtimes_unix_test.go create mode 100644 engine/pkg/system/chtimes_windows.go create mode 100644 engine/pkg/system/chtimes_windows_test.go create mode 100644 engine/pkg/system/errors.go create mode 100644 engine/pkg/system/exitcode.go create mode 100644 engine/pkg/system/filesys.go create mode 100644 engine/pkg/system/filesys_windows.go create mode 100644 engine/pkg/system/init.go create mode 100644 engine/pkg/system/init_unix.go create mode 100644 engine/pkg/system/init_windows.go create mode 100644 engine/pkg/system/lcow.go create mode 100644 engine/pkg/system/lcow_unix.go create mode 100644 engine/pkg/system/lcow_windows.go create mode 100644 engine/pkg/system/lstat_unix.go create mode 100644 engine/pkg/system/lstat_unix_test.go create mode 100644 engine/pkg/system/lstat_windows.go create mode 100644 engine/pkg/system/meminfo.go create mode 100644 engine/pkg/system/meminfo_linux.go create mode 100644 engine/pkg/system/meminfo_unix_test.go create mode 100644 engine/pkg/system/meminfo_unsupported.go create mode 100644 engine/pkg/system/meminfo_windows.go create mode 100644 engine/pkg/system/mknod.go create mode 100644 engine/pkg/system/mknod_windows.go create mode 100644 engine/pkg/system/path.go create mode 100644 engine/pkg/system/path_unix.go create mode 100644 engine/pkg/system/path_windows.go create mode 100644 engine/pkg/system/path_windows_test.go create mode 100644 engine/pkg/system/process_unix.go create mode 100644 engine/pkg/system/process_windows.go create mode 100644 engine/pkg/system/rm.go create mode 100644 engine/pkg/system/rm_test.go create mode 100644 engine/pkg/system/stat_darwin.go create mode 100644 engine/pkg/system/stat_freebsd.go create mode 100644 engine/pkg/system/stat_linux.go create mode 100644 engine/pkg/system/stat_openbsd.go create mode 100644 engine/pkg/system/stat_solaris.go create mode 100644 engine/pkg/system/stat_unix.go create mode 100644 engine/pkg/system/stat_unix_test.go create mode 100644 engine/pkg/system/stat_windows.go create mode 100644 engine/pkg/system/syscall_unix.go create mode 100644 engine/pkg/system/syscall_windows.go create mode 100644 engine/pkg/system/syscall_windows_test.go create mode 100644 engine/pkg/system/umask.go create mode 100644 engine/pkg/system/umask_windows.go create mode 100644 engine/pkg/system/utimes_freebsd.go create mode 100644 engine/pkg/system/utimes_linux.go create mode 100644 engine/pkg/system/utimes_unix_test.go create mode 100644 engine/pkg/system/utimes_unsupported.go create mode 100644 engine/pkg/system/xattrs_linux.go create mode 100644 engine/pkg/system/xattrs_unsupported.go create mode 100644 engine/pkg/tailfile/tailfile.go create mode 100644 engine/pkg/tailfile/tailfile_test.go create mode 100644 engine/pkg/tarsum/builder_context.go create mode 100644 engine/pkg/tarsum/builder_context_test.go create mode 100644 engine/pkg/tarsum/fileinfosums.go create mode 100644 engine/pkg/tarsum/fileinfosums_test.go create mode 100644 engine/pkg/tarsum/tarsum.go create mode 100644 engine/pkg/tarsum/tarsum_spec.md create mode 100644 engine/pkg/tarsum/tarsum_test.go create mode 100644 engine/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 engine/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 engine/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 engine/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 engine/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 engine/pkg/tarsum/testdata/xattr/json create mode 100644 engine/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 engine/pkg/tarsum/versioning.go create mode 100644 engine/pkg/tarsum/versioning_test.go create mode 100644 engine/pkg/tarsum/writercloser.go create mode 100644 engine/pkg/term/ascii.go create mode 100644 engine/pkg/term/ascii_test.go create mode 100644 engine/pkg/term/proxy.go create mode 100644 engine/pkg/term/proxy_test.go create mode 100644 engine/pkg/term/tc.go create mode 100644 engine/pkg/term/term.go create mode 100644 engine/pkg/term/term_linux_test.go create mode 100644 engine/pkg/term/term_windows.go create mode 100644 engine/pkg/term/termios_bsd.go create mode 100644 engine/pkg/term/termios_linux.go create mode 100644 engine/pkg/term/windows/ansi_reader.go create mode 100644 engine/pkg/term/windows/ansi_writer.go create mode 100644 engine/pkg/term/windows/console.go create mode 100644 engine/pkg/term/windows/windows.go create mode 100644 engine/pkg/term/windows/windows_test.go create mode 100644 engine/pkg/term/winsize.go create mode 100644 engine/pkg/truncindex/truncindex.go create mode 100644 engine/pkg/truncindex/truncindex_test.go create mode 100644 engine/pkg/urlutil/urlutil.go create mode 100644 engine/pkg/urlutil/urlutil_test.go create mode 100644 engine/pkg/useragent/README.md create mode 100644 engine/pkg/useragent/useragent.go create mode 100644 engine/pkg/useragent/useragent_test.go create mode 100644 engine/plugin/backend_linux.go create mode 100644 engine/plugin/backend_linux_test.go create mode 100644 engine/plugin/backend_unsupported.go create mode 100644 engine/plugin/blobstore.go create mode 100644 engine/plugin/defs.go create mode 100644 engine/plugin/errors.go create mode 100644 engine/plugin/events.go create mode 100644 engine/plugin/executor/containerd/containerd.go create mode 100644 engine/plugin/manager.go create mode 100644 engine/plugin/manager_linux.go create mode 100644 engine/plugin/manager_linux_test.go create mode 100644 engine/plugin/manager_test.go create mode 100644 engine/plugin/manager_windows.go create mode 100644 engine/plugin/store.go create mode 100644 engine/plugin/store_test.go create mode 100644 engine/plugin/v2/plugin.go create mode 100644 engine/plugin/v2/plugin_linux.go create mode 100644 engine/plugin/v2/plugin_unsupported.go create mode 100644 engine/plugin/v2/settable.go create mode 100644 engine/plugin/v2/settable_test.go create mode 100644 engine/poule.yml create mode 100644 engine/profiles/apparmor/apparmor.go create mode 100644 engine/profiles/apparmor/template.go create mode 100755 engine/profiles/seccomp/default.json create mode 100755 engine/profiles/seccomp/fixtures/example.json create mode 100644 engine/profiles/seccomp/generate.go create mode 100644 engine/profiles/seccomp/seccomp.go create mode 100644 engine/profiles/seccomp/seccomp_default.go create mode 100644 engine/profiles/seccomp/seccomp_test.go create mode 100644 engine/profiles/seccomp/seccomp_unsupported.go create mode 100644 engine/project/ARM.md create mode 100644 engine/project/BRANCHES-AND-TAGS.md create mode 100644 engine/project/GOVERNANCE.md create mode 100644 engine/project/IRC-ADMINISTRATION.md create mode 100644 engine/project/ISSUE-TRIAGE.md create mode 100644 engine/project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 engine/project/PACKAGERS.md create mode 100644 engine/project/PATCH-RELEASES.md create mode 100644 engine/project/PRINCIPLES.md create mode 100644 engine/project/README.md create mode 100644 engine/project/RELEASE-PROCESS.md create mode 100644 engine/project/REVIEWING.md create mode 100644 engine/project/TOOLS.md create mode 100644 engine/reference/errors.go create mode 100644 engine/reference/store.go create mode 100644 engine/reference/store_test.go create mode 100644 engine/registry/auth.go create mode 100644 engine/registry/auth_test.go create mode 100644 engine/registry/config.go create mode 100644 engine/registry/config_test.go create mode 100644 engine/registry/config_unix.go create mode 100644 engine/registry/config_windows.go create mode 100644 engine/registry/endpoint_test.go create mode 100644 engine/registry/endpoint_v1.go create mode 100644 engine/registry/errors.go create mode 100644 engine/registry/registry.go create mode 100644 engine/registry/registry_mock_test.go create mode 100644 engine/registry/registry_test.go create mode 100644 engine/registry/resumable/resumablerequestreader.go create mode 100644 engine/registry/resumable/resumablerequestreader_test.go create mode 100644 engine/registry/service.go create mode 100644 engine/registry/service_v2.go create mode 100644 engine/registry/session.go create mode 100644 engine/registry/types.go create mode 100644 engine/reports/2017-05-01.md create mode 100644 engine/reports/2017-05-08.md create mode 100644 engine/reports/2017-05-15.md create mode 100644 engine/reports/2017-06-05.md create mode 100644 engine/reports/2017-06-12.md create mode 100644 engine/reports/2017-06-26.md create mode 100644 engine/reports/builder/2017-05-01.md create mode 100644 engine/reports/builder/2017-05-08.md create mode 100644 engine/reports/builder/2017-05-15.md create mode 100644 engine/reports/builder/2017-05-22.md create mode 100644 engine/reports/builder/2017-05-29.md create mode 100644 engine/reports/builder/2017-06-05.md create mode 100644 engine/reports/builder/2017-06-12.md create mode 100644 engine/reports/builder/2017-06-26.md create mode 100644 engine/reports/builder/2017-07-10.md create mode 100644 engine/reports/builder/2017-07-17.md create mode 100644 engine/restartmanager/restartmanager.go create mode 100644 engine/restartmanager/restartmanager_test.go create mode 100644 engine/rootless/rootless.go create mode 100644 engine/rootless/specconv/specconv_linux.go create mode 100644 engine/runconfig/config.go create mode 100644 engine/runconfig/config_test.go create mode 100644 engine/runconfig/config_unix.go create mode 100644 engine/runconfig/config_windows.go create mode 100644 engine/runconfig/errors.go create mode 100644 engine/runconfig/fixtures/unix/container_config_1_14.json create mode 100644 engine/runconfig/fixtures/unix/container_config_1_17.json create mode 100644 engine/runconfig/fixtures/unix/container_config_1_19.json create mode 100644 engine/runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 engine/runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 engine/runconfig/fixtures/windows/container_config_1_19.json create mode 100644 engine/runconfig/hostconfig.go create mode 100644 engine/runconfig/hostconfig_test.go create mode 100644 engine/runconfig/hostconfig_unix.go create mode 100644 engine/runconfig/hostconfig_windows.go create mode 100644 engine/runconfig/hostconfig_windows_test.go create mode 100644 engine/runconfig/opts/parse.go create mode 100644 engine/vendor.conf create mode 100644 engine/vendor/code.cloudfoundry.org/clock/LICENSE create mode 100644 engine/vendor/code.cloudfoundry.org/clock/NOTICE create mode 100644 engine/vendor/code.cloudfoundry.org/clock/README.md create mode 100644 engine/vendor/code.cloudfoundry.org/clock/clock.go create mode 100644 engine/vendor/code.cloudfoundry.org/clock/package.go create mode 100644 engine/vendor/code.cloudfoundry.org/clock/ticker.go create mode 100644 engine/vendor/code.cloudfoundry.org/clock/timer.go create mode 100644 engine/vendor/github.com/containerd/go-runc/LICENSE create mode 100644 engine/vendor/github.com/containerd/go-runc/README.md create mode 100644 engine/vendor/github.com/containerd/go-runc/command_linux.go create mode 100644 engine/vendor/github.com/containerd/go-runc/command_other.go create mode 100644 engine/vendor/github.com/containerd/go-runc/console.go create mode 100644 engine/vendor/github.com/containerd/go-runc/container.go create mode 100644 engine/vendor/github.com/containerd/go-runc/events.go create mode 100644 engine/vendor/github.com/containerd/go-runc/io.go create mode 100644 engine/vendor/github.com/containerd/go-runc/io_unix.go create mode 100644 engine/vendor/github.com/containerd/go-runc/io_windows.go create mode 100644 engine/vendor/github.com/containerd/go-runc/monitor.go create mode 100644 engine/vendor/github.com/containerd/go-runc/runc.go create mode 100644 engine/vendor/github.com/containerd/go-runc/utils.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/LICENSE create mode 100644 engine/vendor/github.com/containerd/ttrpc/README.md create mode 100644 engine/vendor/github.com/containerd/ttrpc/channel.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/client.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/codec.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/config.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/handshake.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/interceptor.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/metadata.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/server.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/services.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/types.go create mode 100644 engine/vendor/github.com/containerd/ttrpc/unixcreds_linux.go create mode 100644 engine/vendor/github.com/golang/gddo/LICENSE create mode 100644 engine/vendor/github.com/golang/gddo/README.markdown create mode 100644 engine/vendor/github.com/golang/gddo/httputil/buster.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/header/header.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/httputil.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/negotiate.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/respbuf.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/static.go create mode 100644 engine/vendor/github.com/golang/gddo/httputil/transport.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/README.rst create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/README.md create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/command_line.proto create mode 100644 engine/vendor/github.com/grpc-ecosystem/grpc-opentracing/python/examples/protos/store.proto create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 engine/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go create mode 100644 engine/vendor/github.com/moby/buildkit/LICENSE create mode 100644 engine/vendor/github.com/moby/buildkit/README.md create mode 100644 engine/vendor/github.com/moby/buildkit/api/services/control/control.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/services/control/control.proto create mode 100644 engine/vendor/github.com/moby/buildkit/api/services/control/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/types/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/types/worker.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/api/types/worker.proto create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/path.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/metadata.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/metadata/metadata.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/refs.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/export.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/import.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go create mode 100644 engine/vendor/github.com/moby/buildkit/cache/util/fsutil.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/build.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/buildid/metadata.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/client.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/client_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/client_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/diskusage.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/exporters.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/filter.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/graph.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/exec.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/fileop.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/marshal.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/meta.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/resolver.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/source.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/llb/state.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/prune.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/solve.go create mode 100644 engine/vendor/github.com/moby/buildkit/client/workers.go create mode 100644 engine/vendor/github.com/moby/buildkit/control/control.go create mode 100644 engine/vendor/github.com/moby/buildkit/control/gateway/gateway.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/executor.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/hosts.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/mounts.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/spec.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/oci/user.go create mode 100644 engine/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go create mode 100644 engine/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go create mode 100644 engine/vendor/github.com/moby/buildkit/exporter/exporter.go create mode 100644 engine/vendor/github.com/moby/buildkit/exporter/local/export.go create mode 100644 engine/vendor/github.com/moby/buildkit/exporter/tar/export.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/frontend.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/frontend/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/go.mod create mode 100644 engine/vendor/github.com/moby/buildkit/identity/randomid.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/auth.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/auth.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/auth.proto create mode 100644 engine/vendor/github.com/moby/buildkit/session/auth/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/content/attachable.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/content/caller.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/context.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/filesync.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/filesync.proto create mode 100644 engine/vendor/github.com/moby/buildkit/session/filesync/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/grpc.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/grpchijack/dial.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/secrets/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/secrets/secrets.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/secrets/secrets.proto create mode 100644 engine/vendor/github.com/moby/buildkit/session/session.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/sshforward/copy.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/sshforward/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/sshforward/ssh.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto create mode 100644 engine/vendor/github.com/moby/buildkit/session/upload/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/upload/upload.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/upload/upload.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/session/upload/upload.proto create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/localmounter.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/snapshot/snapshotter.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/cachekey.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/cachemanager.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/cachestorage.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/combinedcache.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/edge.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/exporter.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/index.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/jobs.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/memorycachestorage.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/attr.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/caps.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/const.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/ops.proto create mode 100644 engine/vendor/github.com/moby/buildkit/solver/pb/platform.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/progress.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/scheduler.go create mode 100644 engine/vendor/github.com/moby/buildkit/solver/types.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/git/gitsource.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/gitidentifier.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/http/httpsource.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/http/transport.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/identifier.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/local/local.go create mode 100644 engine/vendor/github.com/moby/buildkit/source/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/caps.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto create mode 100644 engine/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/cond/cond.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/buffer.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/copy.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/pusher.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/contentutil/refs.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/entitlements/security_linux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/imageutil/config.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/imageutil/schema1.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/leaseutil/manager.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/network/host.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/network/network.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/network/none.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/logs/logs.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/multireader.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/multiwriter.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/progress/progress.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/resolver/resolver.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/path_unix.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/path_windows.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/throttle/throttle.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/tracing/multispan.go create mode 100644 engine/vendor/github.com/moby/buildkit/util/tracing/tracing.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/cacheresult.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/filter.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/result.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/worker.go create mode 100644 engine/vendor/github.com/moby/buildkit/worker/workercontroller.go create mode 100644 engine/vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 engine/vendor/github.com/spf13/cobra/README.md create mode 100644 engine/vendor/github.com/spf13/cobra/args.go create mode 100644 engine/vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 engine/vendor/github.com/spf13/cobra/cobra.go create mode 100644 engine/vendor/github.com/spf13/cobra/command.go create mode 100644 engine/vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 engine/vendor/github.com/spf13/cobra/command_win.go create mode 100644 engine/vendor/github.com/spf13/cobra/zsh_completions.go create mode 100644 engine/vendor/github.com/spf13/pflag/LICENSE create mode 100644 engine/vendor/github.com/spf13/pflag/README.md create mode 100644 engine/vendor/github.com/spf13/pflag/bool.go create mode 100644 engine/vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 engine/vendor/github.com/spf13/pflag/bytes.go create mode 100644 engine/vendor/github.com/spf13/pflag/count.go create mode 100644 engine/vendor/github.com/spf13/pflag/duration.go create mode 100644 engine/vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 engine/vendor/github.com/spf13/pflag/flag.go create mode 100644 engine/vendor/github.com/spf13/pflag/float32.go create mode 100644 engine/vendor/github.com/spf13/pflag/float64.go create mode 100644 engine/vendor/github.com/spf13/pflag/golangflag.go create mode 100644 engine/vendor/github.com/spf13/pflag/int.go create mode 100644 engine/vendor/github.com/spf13/pflag/int16.go create mode 100644 engine/vendor/github.com/spf13/pflag/int32.go create mode 100644 engine/vendor/github.com/spf13/pflag/int64.go create mode 100644 engine/vendor/github.com/spf13/pflag/int8.go create mode 100644 engine/vendor/github.com/spf13/pflag/int_slice.go create mode 100644 engine/vendor/github.com/spf13/pflag/ip.go create mode 100644 engine/vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 engine/vendor/github.com/spf13/pflag/ipmask.go create mode 100644 engine/vendor/github.com/spf13/pflag/ipnet.go create mode 100644 engine/vendor/github.com/spf13/pflag/string.go create mode 100644 engine/vendor/github.com/spf13/pflag/string_array.go create mode 100644 engine/vendor/github.com/spf13/pflag/string_slice.go create mode 100644 engine/vendor/github.com/spf13/pflag/uint.go create mode 100644 engine/vendor/github.com/spf13/pflag/uint16.go create mode 100644 engine/vendor/github.com/spf13/pflag/uint32.go create mode 100644 engine/vendor/github.com/spf13/pflag/uint64.go create mode 100644 engine/vendor/github.com/spf13/pflag/uint8.go create mode 100644 engine/vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/LICENSE create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/copy.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diff.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diff_containerd.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diskwriter.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/followlinks.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/fs.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/go.mod create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/hardlinks.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/readme.md create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/receive.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/send.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/stat.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/stat_unix.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/stat_windows.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/tarwriter.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/types/generate.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/types/stat.proto create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/types/wire.proto create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/validator.go create mode 100644 engine/vendor/github.com/tonistiigi/fsutil/walker.go create mode 100644 engine/volume/drivers/adapter.go create mode 100644 engine/volume/drivers/extpoint.go create mode 100644 engine/volume/drivers/extpoint_test.go create mode 100644 engine/volume/drivers/proxy.go create mode 100644 engine/volume/drivers/proxy_test.go create mode 100644 engine/volume/local/local.go create mode 100644 engine/volume/local/local_test.go create mode 100644 engine/volume/local/local_unix.go create mode 100644 engine/volume/local/local_windows.go create mode 100644 engine/volume/mounts/lcow_parser.go create mode 100644 engine/volume/mounts/linux_parser.go create mode 100644 engine/volume/mounts/mounts.go create mode 100644 engine/volume/mounts/parser.go create mode 100644 engine/volume/mounts/parser_test.go create mode 100644 engine/volume/mounts/validate.go create mode 100644 engine/volume/mounts/validate_test.go create mode 100644 engine/volume/mounts/validate_unix_test.go create mode 100644 engine/volume/mounts/validate_windows_test.go create mode 100644 engine/volume/mounts/volume_copy.go create mode 100644 engine/volume/mounts/volume_unix.go create mode 100644 engine/volume/mounts/volume_windows.go create mode 100644 engine/volume/mounts/windows_parser.go create mode 100644 engine/volume/service/by.go create mode 100644 engine/volume/service/convert.go create mode 100644 engine/volume/service/db.go create mode 100644 engine/volume/service/db_test.go create mode 100644 engine/volume/service/default_driver.go create mode 100644 engine/volume/service/default_driver_stubs.go create mode 100644 engine/volume/service/errors.go create mode 100644 engine/volume/service/opts/opts.go create mode 100644 engine/volume/service/restore.go create mode 100644 engine/volume/service/restore_test.go create mode 100644 engine/volume/service/service.go create mode 100644 engine/volume/service/service_linux_test.go create mode 100644 engine/volume/service/service_test.go create mode 100644 engine/volume/service/store.go create mode 100644 engine/volume/service/store_test.go create mode 100644 engine/volume/service/store_unix.go create mode 100644 engine/volume/service/store_windows.go create mode 100644 engine/volume/testutils/testutils.go create mode 100644 engine/volume/volume.go diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..1537797a --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,216 @@ +# Changelog + +For official release notes for Docker Engine CE and Docker Engine EE, visit the +[release notes page](https://docs.docker.com/engine/release-notes/). + +## 19.03.4 (2019-10-17) + +### Networking + +- Rollback libnetwork changes so `DOCKER-USER` iptables chain is back. [docker/engine#404](https://github.com/docker/engine/pull/404) + +## 19.03.3 (2019-10-07) + +### Known Issues + +- `DOCKER-USER` iptables chain is missing [docker/for-linux#810](https://github.com/docker/for-linux/issues/810). Users cannot perform additional container network traffic filtering on top of this iptables chain. You are not affected by this issue if you are not customizing iptables chains on top of `DOCKER-USER`. + + Workaround is to insert the iptables chain after docker daemon starts. + ``` + iptables -N DOCKER-USER + iptables -I FORWARD -j DOCKER-USER + iptables -A DOCKER-USER -j RETURN + ``` +### Builder + +- Fix builder-next: resolve digest for third party registries. [docker/engine#339](https://github.com/docker/engine/pull/339) +- Fix builder-next: user namespace builds when daemon started with socket activation. [docker/engine#373](https://github.com/docker/engine/pull/373) +- Fix builder-next: session: release forwarded ssh socket connection per connection. [docker/engine#373](https://github.com/docker/engine/pull/373) +- Fix builder-next: llbsolver: error on multiple cache importers. [docker/engine#373](https://github.com/docker/engine/pull/373) + +### Networking + +- Fix various libnetwork issues for iptables, DNS queries, and more. [docker/engine#330](https://github.com/docker/engine/pull/330) + +### Runtime + +* Bump Golang to 1.12.10. [docker/engine#387](https://github.com/docker/engine/pull/387) +* Bump containerd to 1.2.10. [docker/engine#385](https://github.com/docker/engine/pull/385) +* Distribution: modify warning logic when pulling v2 schema1 manifests. [docker/engine#368](https://github.com/docker/engine/pull/368) +- Fix `POST /images/create` returning a 500 status code when providing an incorrect platform option. [docker/engine#365](https://github.com/docker/engine/pull/365) +- Fix `POST /build` returning a 500 status code when providing an incorrect platform option. [docker/engine#365](https://github.com/docker/engine/pull/365) +- Fix panic on 32-bit ARMv7 caused by misaligned struct member. [docker/engine#363](https://github.com/docker/engine/pull/363) +- Fix to return "invalid parameter" when linking to non-existing container. [docker/engine#352](https://github.com/docker/engine/pull/352) +- Fix overlay2: busy error on mount when using kernel >= 5.2. [docker/engine#332](https://github.com/docker/engine/pull/332) +- Fix `docker rmi` stuck in certain misconfigured systems, e.g. dead NFS share. [docker/engine#335](https://github.com/docker/engine/pull/335) +- Fix handling of blocked I/O of exec'd processes. [docker/engine#296](https://github.com/docker/engine/pull/296) +- Fix jsonfile logger: follow logs stuck when `max-size` is set and `max-file=1`. [docker/engine#378](https://github.com/docker/engine/pull/378) + +### Client + +* Mitigate against YAML files that have excessive aliasing. [docker/cli#2119](https://github.com/docker/cli/pull/2119) + + +## 19.03.2 (2019-08-29) + +### Builder + +- Fix "COPY --from" to non-existing directory on Windows. [moby/moby#39695](https://github.com/moby/moby/pull/39695) +- Fix builder-next: metadata commands not having created time in history. [moby/moby#39456](https://github.com/moby/moby/issues/39456) +- Fix builder-next: close progress on layer export error. [moby/moby#39782](https://github.com/moby/moby/pull/39782) +* Update buildkit to 588c73e1e4. [moby/moby#39781](https://github.com/moby/moby/pull/39781) + +### Client + +- Fix Windows absolute path detection on non-Windows. [docker/cli#1990](https://github.com/docker/cli/pull/1990) +- Fix to zsh completion script for `docker login --username`. +- Fix context: produce consistent output on `context create`. [docker/cli#1985](https://github.com/docker/cli/pull/1874) +- Fix support for HTTP proxy env variable. [docker/cli#2059](https://github.com/docker/cli/pull/2059) + +### Logging + +- Fix for reading journald logs. [moby/moby#37819](https://github.com/moby/moby/pull/37819) [moby/moby#38859](https://github.com/moby/moby/pull/38859) + +### Networking + +- Prevent panic on network attach to a container with disabled networking. [moby/moby#39589](https://github.com/moby/moby/pull/39589) + +### Runtime + +* Bump Golang to 1.12.8. +- Fix a potential engine panic when using XFS disk quota for containers. [moby/moby#39644](https://github.com/moby/moby/pull/39644) + +### Swarm + +- Fix an issue where nodes with lots of tasks could not be removed. [docker/swarmkit#2867](https://github.com/docker/swarmkit/pull/2867) + +## 19.03.1 (2019-07-25) + +### Runtime + +- Fix [CVE-2019-14271](https://nvd.nist.gov/vuln/detail/CVE-2019-14271) loading of nsswitch based config inside chroot under Glibc. + +## 19.03.0 (2019-07-22) + +### Deprecation + +* Deprecate image manifest v2 schema1 in favor of v2 schema2. Future version of Docker will remove support for v2 schema1 altogether. [moby/moby#39365](https://github.com/moby/moby/pull/39365) +* Remove v1.10 migrator. [moby/moby#38265](https://github.com/moby/moby/pull/38265) +* Skip deprecated storage-drivers in auto-selection. [moby/moby#38019](https://github.com/moby/moby/pull/38019) +* Deprecate `aufs` storage driver and add warning. [moby/moby#38090](https://github.com/moby/moby/pull/38090) + +### Client + ++ Add `--pids-limit` flag to `docker update`. [docker/cli#1765](https://github.com/docker/cli/pull/1765) ++ Add systctl support for services. [docker/cli#1754](https://github.com/docker/cli/pull/1754) ++ Add support for `template_driver` in composefiles. [docker/cli#1746](https://github.com/docker/cli/pull/1746) ++ Add --device support for Windows. [docker/cli#1606](https://github.com/docker/cli/pull/1606) ++ Data Path Port configuration support. [docker/cli#1509](https://github.com/docker/cli/pull/1509) ++ Fast context switch: commands. [docker/cli#1501](https://github.com/docker/cli/pull/1501) ++ Support --mount type=bind,bind-nonrecursive,... [docker/cli#1430](https://github.com/docker/cli/pull/1430) ++ Add maximum replicas per node. [docker/cli#1410](https://github.com/docker/cli/pull/1410) [docker/cli#1612](https://github.com/docker/cli/pull/1612) ++ Add option to pull images quietly. [docker/cli#882](https://github.com/docker/cli/pull/882) ++ Add a separate `--domainname` flag. [docker/cli#1130](https://github.com/docker/cli/pull/1130) ++ Add support for secret drivers in `docker stack deploy`. [docker/cli#1783](https://github.com/docker/cli/pull/1783) ++ Add ability to use swarm `Configs` as `CredentialSpecs` on services. [docker/cli#1781](https://github.com/docker/cli/pull/1781) ++ Add `--security-opt systempaths=unconfined` support. [docker/cli#1808](https://github.com/docker/cli/pull/1808) ++ Basic framework for writing and running CLI plugins. [docker/cli#1564](https://github.com/docker/cli/pull/1564) [docker/cli#1898](https://github.com/docker/cli/pull/1898) ++ Docker App v0.8.0. [docker/docker-ce-packaging#341](https://github.com/docker/docker-ce-packaging/pull/341) ++ Docker buildx. [docker/docker-ce-packaging#336](https://github.com/docker/docker-ce-packaging/pull/336) +* Bump google.golang.org/grpc to v1.20.1. [docker/cli#1884](https://github.com/docker/cli/pull/1884) +* Cli change to pass driver specific options to docker run. [docker/cli#1767](https://github.com/docker/cli/pull/1767) +* Bump Golang 1.12.5. [docker/cli#1875](https://github.com/docker/cli/pull/1875) +* The `docker system info` output now segregates information relevant to the client and daemon. [docker/cli#1638](https://github.com/docker/cli/pull/1638) +* (Experimental) When targetting Kubernetes, add support for `x-pull-secret: some-pull-secret` in compose-files service configs. [docker/cli#1617](https://github.com/docker/cli/pull/1617) +* (Experimental) When targetting Kubernetes, add support for `x-pull-policy: ` in compose-files service configs. [docker/cli#1617](https://github.com/docker/cli/pull/1617) +* cp, save, export: Prevent overwriting irregular files. [docker/cli#1515](https://github.com/docker/cli/pull/1515) +* Allow npipe volume type on stack file. [docker/cli#1195](https://github.com/docker/cli/pull/1195) +- Fix tty initial size error. [docker/cli#1529](https://github.com/docker/cli/pull/1529) +- Fix labels copying value from environment variables. [docker/cli#1671](https://github.com/docker/cli/pull/1671) + +### API + ++ Update API version to v1.40. [moby/moby#38089](https://github.com/moby/moby/pull/38089) ++ Add warnings to `/info` endpoint, and move detection to the daemon. [moby/moby#37502](https://github.com/moby/moby/pull/37502) ++ Add HEAD support for `/_ping` endpoint. [moby/moby#38570](https://github.com/moby/moby/pull/38570) ++ Add `Cache-Control` headers to disable caching `/_ping` endpoint. [moby/moby#38569](https://github.com/moby/moby/pull/38569) ++ Add containerd, runc, and docker-init versions to /version. [moby/moby#37974](https://github.com/moby/moby/pull/37974) +* Add undocumented `/grpc` endpoint and register BuildKit's controller. [moby/moby#38990](https://github.com/moby/moby/pull/38990) + +### Builder + ++ builder-next: allow setting buildkit outputs. [docker/cli#1766](https://github.com/docker/cli/pull/1766) ++ builder-next: look for a Dockerfile specific dockerignore file (eg. Dockerfile.dockerignore) for ignored paths. [docker/engine#215](https://github.com/docker/engine/pull/215) ++ builder-next: automatically detect if process execution is possible for x86, arm and arm64 binaries. [docker/engine#215](https://github.com/docker/engine/pull/215) ++ builder-next: added inline cache support `--cache-from`. [docker/engine#215](https://github.com/docker/engine/pull/215) ++ builder-next: allow outputs configuration. [moby/moby#38898](https://github.com/moby/moby/pull/38898) +* builder-next: update buildkit to 1f89ec1. [docker/engine#260](https://github.com/docker/engine/pull/260) +* builder-next: buildkit now also uses systemd's resolv.conf. [docker/engine#260](https://github.com/docker/engine/pull/260) +* builder-next: use Dockerfile frontend version `docker/dockerfile:1.1` by default. [docker/engine#215](https://github.com/docker/engine/pull/215) +* builder-next: no longer rely on an external image for COPY/ADD operations. [docker/engine#215](https://github.com/docker/engine/pull/215) +- Builder: fix `COPY --from` should preserve ownership. [moby/moby#38599](https://github.com/moby/moby/pull/38599) +- builder-next: fix gcr workaround token cache. [docker/engine#212](https://github.com/docker/engine/pull/212) +- builder-next: call stopprogress on download error. [docker/engine#215](https://github.com/docker/engine/pull/215) + +### Experimental + ++ Enable checkpoint/restore of containers with TTY. [moby/moby#38405](https://github.com/moby/moby/pull/38405) ++ LCOW: Add support for memory and CPU limits. [moby/moby#37296](https://github.com/moby/moby/pull/37296) +* Windows: Experimental: ContainerD runtime. [moby/moby#38541](https://github.com/moby/moby/pull/38541) +* Windows: Experimental: LCOW requires Windows RS5+. [moby/moby#39108](https://github.com/moby/moby/pull/39108) + +### Security + +* mount: add BindOptions.NonRecursive (API v1.40). [moby/moby#38003](https://github.com/moby/moby/pull/38003) +* seccomp: whitelist `io_pgetevents()`. [moby/moby#38895](https://github.com/moby/moby/pull/38895) +* seccomp: allow `ptrace(2)` for 4.8+ kernels. [moby/moby#38137](https://github.com/moby/moby/pull/38137) + +### Runtime + ++ Allow running dockerd as a non-root user (Rootless mode). [moby/moby#380050](https://github.com/moby/moby/pull/38050) ++ Rootless: optional support for `lxc-user-nic` SUID binary. [docker/engine#208](https://github.com/docker/engine/pull/208) ++ Add DeviceRequests to HostConfig to support NVIDIA GPUs. [moby/moby#38828](https://github.com/moby/moby/pull/38828) ++ Add `--device` support for Windows. [moby/moby#37638](https://github.com/moby/moby/pull/37638) ++ Add memory.kernelTCP support for linux. [moby/moby#37043](https://github.com/moby/moby/pull/37043) +* Making it possible to pass Windows credential specs directly to the engine. [moby/moby#38777](https://github.com/moby/moby/pull/38777) +* Add pids-limit support in docker update. [moby/moby#32519](https://github.com/moby/moby/pull/32519) +* Add support for exact list of capabilities. [moby/moby#38380](https://github.com/moby/moby/pull/38380) +* daemon: use 'private' ipc mode by default. [moby/moby#35621](https://github.com/moby/moby/pull/35621) +* daemon: switch to semaphore-gated WaitGroup for startup tasks. [moby/moby#38301](https://github.com/moby/moby/pull/38301) +* Use idtools.LookupGroup instead of parsing /etc/group file for docker.sock ownership to fix: api.go doesn't respect nsswitch.conf. [moby/moby#38126](https://github.com/moby/moby/pull/38126) +* cli: fix images filter when use multi reference filter. [moby/moby#38171](https://github.com/moby/moby/pull/38171) +* Bump Golang to 1.12.5. [docker/engine#209](https://github.com/docker/engine/pull/209) +* Bump containerd to 1.2.6. [moby/moby#39016](https://github.com/moby/moby/pull/39016) +* Bump runc to 1.0.0-rc8, opencontainers/selinux v1.2.2. [docker/engine#210](https://github.com/docker/engine/pull/210) +* Bump google.golang.org/grpc to v1.20.1. [docker/engine#215](https://github.com/docker/engine/pull/215) +* Performance optimizations in aufs and layer store for massively parallel container creation/removal. [moby/moby#39135](https://github.com/moby/moby/pull/39135) [moby/moby#39209](https://github.com/moby/moby/pull/39209) +* Pass root to chroot to for chroot Tar/Untar (CVE-2018-15664) [moby/moby#39292](https://github.com/moby/moby/pull/39292) +- Fix docker `--init` with `/dev` bind mount. [moby/moby#37665](https://github.com/moby/moby/pull/37665) +- Fix: fetch the right device number when greater than 255 and using `--device-read-bps` option. [moby/moby#39212](https://github.com/moby/moby/pull/39212) +- Fix: "Path does not exist" error when path definitely exists. [moby/moby#39251](https://github.com/moby/moby/pull/39251) +- Fix: [CVE-2018-15664](https://nvd.nist.gov/vuln/detail/CVE-2018-15664) symlink-exchange attack with directory traversal. [moby/moby#39357](https://github.com/moby/moby/pull/39357) +- Fix [CVE-2019-13509](https://nvd.nist.gov/vuln/detail/CVE-2019-13509) in DebugRequestMiddleware: unconditionally scrub data field. + +### Networking + ++ Move IPVLAN driver out of experimental. [moby/moby#38983](https://github.com/moby/moby/pull/38983) / [docker/libnetwork#2230](https://github.com/docker/libnetwork/pull/2230) +* Network: add support for 'dangling' filter. [moby/moby#31551](https://github.com/moby/moby/pull/31551) +* Windows: Forcing a nil IP specified in PortBindings to IPv4zero (0.0.0.0). [docker/libnetwork#2376](https://github.com/docker/libnetwork/pull/2376) +- Fix to make sure load balancer sandbox is deleted when a service is updated with `--network-rm`. [docker/engine#213](https://github.com/docker/engine/pull/213) + +### Swarm + ++ Add support for maximum replicas per node. [moby/moby#37940](https://github.com/moby/moby/pull/37940) ++ Add support for GMSA CredentialSpecs from Swarmkit configs. [moby/moby#38632](https://github.com/moby/moby/pull/38632) ++ Add support for sysctl options in services. [moby/moby#37701](https://github.com/moby/moby/pull/37701) ++ Add support for filtering on node labels. [moby/moby#37650](https://github.com/moby/moby/pull/37650) ++ Windows: Support named pipe mounts in docker service create + stack yml. [moby/moby#37400](https://github.com/moby/moby/pull/37400) ++ VXLAN UDP Port configuration support. [moby/moby#38102](https://github.com/moby/moby/pull/38102) +* Use Service Placement Constraints in Enforcer. [docker/swarmkit#2857](https://github.com/docker/swarmkit/pull/2857) +* Increase max recv gRPC message size for nodes and secrets. [docker/engine#256](https://github.com/docker/engine/pull/256) + +### Logging + +* Enable gcplogs driver on windows. [moby/moby#37717](https://github.com/moby/moby/pull/37717) +* Add zero padding for RFC5424 syslog format. [moby/moby#38335](https://github.com/moby/moby/pull/38335) +* Add IMAGE_NAME attribute to journald log events. [moby/moby#38032](https://github.com/moby/moby/pull/38032) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..737db485 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,126 @@ +# Contributing to Docker CE + +Want to contribute on Docker CE? Awesome! + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Reporting Issues](#reporting-other-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + +## Reporting other issues + +There are separate issue-tracking repos for the end user Docker CE +products specialized for a platform. Find your issue or file a new issue +for the platform you are using: + +* https://github.com/docker/for-linux +* https://github.com/docker/for-mac +* https://github.com/docker/for-win +* https://github.com/docker/for-aws +* https://github.com/docker/for-azure + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +If presented with a template when creating an issue, please follow its directions. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Submitting pull requests + +Please see the corresponding `CONTRIBUTING.md` file of each component for more information: + +* Changes to the `engine` should be directed upstream to https://github.com/moby/moby +* Changes to the `cli` should be directed upstream to https://github.com/docker/cli +* Changes to the `packaging` should be directed upstream to https://github.com/docker/docker-ce-packaging + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members. + Regional, racial, gender, or other abuse will not be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..8c30ea60 --- /dev/null +++ b/Makefile @@ -0,0 +1,56 @@ +CLI_DIR:=$(CURDIR)/components/cli +ENGINE_DIR:=$(CURDIR)/components/engine +PACKAGING_DIR:=$(CURDIR)/components/packaging +MOBY_COMPONENTS_SHA=ab7c118272b02d8672dc0255561d0c4015979780 +MOBY_COMPONENTS_URL=https://raw.githubusercontent.com/docker/moby-extras/$(MOBY_COMPONENTS_SHA)/cmd/moby-components +MOBY_COMPONENTS=.helpers/moby-components-$(MOBY_COMPONENTS_SHA) +VERSION=$(shell cat VERSION) + +.PHONY: help +help: ## show make targets + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +.PHONY: test-integration-cli +test-integration-cli: $(CLI_DIR)/build/docker ## test integration of cli and engine + $(MAKE) -C $(ENGINE_DIR) DOCKER_CLI_PATH=$< test-integration-cli + +$(CLI_DIR)/build/docker: + $(MAKE) -C $(CLI_DIR) -f docker.Makefile build + +.PHONY: deb +deb: ## build deb packages + $(MAKE) VERSION=$(VERSION) CLI_DIR=$(CLI_DIR) ENGINE_DIR=$(ENGINE_DIR) -C $(PACKAGING_DIR) deb + +.PHONY: rpm +rpm: ## build rpm packages + $(MAKE) VERSION=$(VERSION) CLI_DIR=$(CLI_DIR) ENGINE_DIR=$(ENGINE_DIR) -C $(PACKAGING_DIR) rpm + +.PHONY: static +static: ## build static packages + $(MAKE) VERSION=$(VERSION) CLI_DIR=$(CLI_DIR) ENGINE_DIR=$(ENGINE_DIR) -C $(PACKAGING_DIR) static + +.PHONY: clean +clean: ## clean the build artifacts + -$(MAKE) -C $(CLI_DIR) clean + -$(MAKE) -C $(ENGINE_DIR) clean + -$(MAKE) -C $(PACKAGING_DIR) clean + +$(MOBY_COMPONENTS): + mkdir -p .helpers + curl -fsSL $(MOBY_COMPONENTS_URL) > $(MOBY_COMPONENTS) + chmod +x $(MOBY_COMPONENTS) + +.PHONY: update-components +update-components: update-components-cli update-components-engine update-components-packaging ## udpate components using moby extra tool + +.PHONY: update-components-cli +update-components-cli: $(MOBY_COMPONENTS) + $(MOBY_COMPONENTS) update cli + +.PHONY: update-components-engine +update-components-engine: $(MOBY_COMPONENTS) + $(MOBY_COMPONENTS) update engine + +.PHONY: update-components-packaging +update-components-packaging: $(MOBY_COMPONENTS) + $(MOBY_COMPONENTS) update packaging diff --git a/README.md b/README.md new file mode 100644 index 00000000..da845446 --- /dev/null +++ b/README.md @@ -0,0 +1,94 @@ +# Docker CE + +This repository hosts open source components of Docker CE products. The +`master` branch serves to unify the upstream components on a regular +basis. Long-lived release branches host the code that goes into a product +version for the lifetime of the product. + +This repository is solely maintained by Docker, Inc. + +## Issues + +There are separate issue-tracking repos for the end user Docker CE +products specialized for a platform. Find your issue or file a new issue +for the platform you are using: + +* https://github.com/docker/for-linux +* https://github.com/docker/for-mac +* https://github.com/docker/for-win +* https://github.com/docker/for-aws +* https://github.com/docker/for-azure + +## Unifying upstream sources + +The `master` branch is a combination of components adapted from +different upstream git repos into a unified directory structure using the +[moby-components](https://github.com/shykes/moby-extras/blob/master/cmd/moby-components) +tool. + +You can view the upstream git repos in the +[components.conf](components.conf) file. Each component is isolated into +its own directory under the [components](components) directory. + +The tool will import each component git history within the appropriate path. + +For example, this shows a commit +is imported into the component `engine` from +[moby/moby@a27b4b8](https://github.com/moby/moby/commit/a27b4b8cb8e838d03a99b6d2b30f76bdaf2f9e5d) +into the `components/engine` directory. + +``` +commit 5c70746915d4589a692cbe50a43cf619ed0b7152 +Author: Andrea Luzzardi +Date: Sat Jan 19 00:13:39 2013 + + Initial commit + Upstream-commit: a27b4b8cb8e838d03a99b6d2b30f76bdaf2f9e5d + Component: engine + + components/engine/container.go | 203 ++++++++++++++++++++++++++++... + components/engine/container_test.go | 186 ++++++++++++++++++++++++++++... + components/engine/docker.go | 112 ++++++++++++++++++++++++++++... + components/engine/docker_test.go | 175 ++++++++++++++++++++++++++++... + components/engine/filesystem.go | 52 ++++++++++++++++++++++++++++... + components/engine/filesystem_test.go | 35 +++++++++++++++++++++++++++ + components/engine/lxc_template.go | 94 ++++++++++++++++++++++++++++... + components/engine/state.go | 48 ++++++++++++++++++++++++++++... + components/engine/utils.go | 115 ++++++++++++++++++++++++++++... + components/engine/utils_test.go | 126 ++++++++++++++++++++++++++++... + 10 files changed, 1146 insertions(+) +``` + +## Updates to `master` branch + +Main development of new features should be directed towards the upstream +git repos. The `master` branch of this repo will periodically pull in new +changes from upstream to provide a point for integration. + +## Branching for release + +When a release is started for Docker CE, a new branch will be created +from `master`. Branch names will be `YY.MM` to represent the time-based +release version of the product, e.g. `17.06`. + +## Adding fixes to release branch + +Note: every commit of a fix should affect files only within one component +directory. + +### Fix available upstream + +A PR cherry-picking the necessary commits should be created against +the release branch. If the the cherry-pick cannot be applied cleanly, +the logic of the fix should be ported manually. + +### No fix yet + +First create the PR with the fix for the release branch. Once the fix has +been merged, be sure to port the fix to the respective upstream git repo. + +## Release tags + +There will be a git tag for each release candidate (RC) and general +availability (GA) release. The tag will only point to commits on release +branches. diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..9c95a886 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +19.03.4 diff --git a/cli/.dockerignore b/cli/.dockerignore new file mode 100644 index 00000000..f9fbc560 --- /dev/null +++ b/cli/.dockerignore @@ -0,0 +1,6 @@ +.dockerignore +.git +.gitignore +appveyor.yml +build +circle.yml diff --git a/cli/.mailmap b/cli/.mailmap new file mode 100644 index 00000000..70075a60 --- /dev/null +++ b/cli/.mailmap @@ -0,0 +1,504 @@ +# Generate AUTHORS: scripts/docs/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Aaron L. Xu +Abhinandan Prativadi +Ace Tang +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +AJ Bowen +AJ Bowen +Akihiro Matsushima +Akihiro Suda +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Aleksandrs Fadins +Alessandro Boch +Alex Chen +Alex Ellis +Alexander Larsson +Alexander Morozov +Alexander Morozov +Alexandre Beslic +Alicia Lauerman +Allen Sun +Allen Sun +Andrew Weiss +Andrew Weiss +André Martins +Andy Rothfusz +Andy Smith +Ankush Agarwal +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Anuj Bahuguna +Anuj Bahuguna +Anusha Ragunathan +Ao Li +Arnaud Porterie +Arnaud Porterie +Arthur Gautier +Avi Miller +Ben Bonnefoy +Ben Golub +Ben Toews +Benoit Chesneau +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Bin Liu +Bin Liu +Bingshen Wang +Boaz Shuster +Brandon Philips +Brandon Philips +Brent Salisbury +Brian Goff +Brian Goff +Brian Goff +Chad Faragher +Chander Govindarajan +Chao Wang +Charles Hooper +Chen Chao +Chen Chuanliang +Chen Mingjie +Chen Qiu +Chen Qiu <21321229@zju.edu.cn> +Chris Dias +Chris McKinnel +Christopher Biscardi +Christopher Latham +Christy Norman +Chun Chen +Corbin Coleman +Cristian Staretu +Cristian Staretu +Cristian Staretu +CUI Wei cuiwei13 +Daehyeok Mun +Daehyeok Mun +Daehyeok Mun +Dan Feldman +Daniel Dao +Daniel Dao +Daniel Garcia +Daniel Gasienica +Daniel Goosen +Daniel Grunwell +Daniel J Walsh +Daniel Mizyrycki +Daniel Mizyrycki +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Watkins +Danny Yates +Darren Shepherd +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David M. Karr +David Sheets +David Sissitka +David Williamson +Deshi Xiao +Deshi Xiao +Diego Siqueira +Diogo Monica +Dominik Honnef +Doug Davis +Doug Tangren +Elan Ruusamäe +Elan Ruusamäe +Elango Sivanandam +Eric G. Noriega +Eric Hanchrow +Eric Rosenberg +Erica Windisch +Erica Windisch +Erik Hollensbe +Erwin van der Koogh +Euan Kemp +Eugen Krizo +Evan Hazlett +Evelyn Xu +Evgeny Shmarnev +Faiz Khan +Felix Hupfeld +Felix Ruess +Feng Yan +Fengtu Wang +Francisco Carriedo +Frank Rosquin +Frederick F. Kautz IV +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao <1373319223@qq.com> +George Kontridze +Gerwim Feiken +Giampaolo Mancini +Gopikannan Venugopalsamy +Gou Rao +Greg Stephens +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume J. Charmes +Guillaume Le Floch +Gurjeet Singh +Gustav Sinder +Günther Jungbluth +Hakan Özler +Hao Shu Wei +Hao Shu Wei +Harald Albers +Harold Cooper +Harry Zhang +Harry Zhang +Harry Zhang +Harry Zhang +Harshal Patil +Helen Xie +Hollie Teal +Hollie Teal +Hollie Teal +Hu Keping +Huu Nguyen +Hyzhou Zhy +Hyzhou Zhy <1187766782@qq.com> +Ian Campbell +Ian Campbell +Ilya Khlopotov +Jack Laxson +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Lambert <32850427+jake-lambert-volusion@users.noreply.github.com> +Jamie Hannaford +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Nickoloff +Jeroen Franse +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jim Galasyn +Jiuyue Ma +Joey Geiger +Joffrey F +Joffrey F +Joffrey F +Johan Euphrosine +John Harris +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Stephens +Jordan Arentsen +Jordan Jennings +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Soref +Josh Wilson +Joyce Jang +Julien Bordellier +Julien Bordellier +Justin Cormack +Justin Cormack +Justin Cormack +Justin Simonelis +Jérôme Petazzoni +Jérôme Petazzoni +Jérôme Petazzoni +K. Heller +Kai Qiang Wu (Kennan) +Kai Qiang Wu (Kennan) +Kamil Domański +Kamjar Gerami +Kat Samperi +Ken Cochrane +Ken Herner +Kenfe-Mickaël Laventure +Kevin Feyrer +Kevin Kern +Kevin Meredith +Kir Kolyshkin +Kir Kolyshkin +Kir Kolyshkin +Konrad Kleine +Konstantin Gribov +Konstantin Pelykh +Kotaro Yoshimatsu +Kunal Kushwaha +Kyle Spiers +Kyle Spiers +Lajos Papp +Lei Jitang +Lei Jitang +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Linus Heckemann +Linus Heckemann +Lokesh Mandvekar +Lorenzo Fontana +Louis Opter +Louis Opter +Luca Favatella +Luke Marsden +Lyn +Lynda O'Leary +Lynda O'Leary +Ma Müller +Madhan Raj Mookkandy +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mansi Nahar +Mansi Nahar +Marc Abramowitz +Marcelo Horacio Fortino +Marcus Linke +Marianna Tessel +Mark Oates +Markan Patel +Markus Kortlang +Marsh Macy +Martin Redmond +Martin Redmond +Mary Anthony +Mary Anthony +Mary Anthony moxiegirl +Mateusz Major +Matt Bentley +Matt Schurenko +Matt Williams +Matt Williams +Matthew Heon +Matthew Mosesohn +Matthew Mueller +Matthias Kühnle +Mauricio Garavaglia +Michael Crosby +Michael Crosby +Michael Crosby +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Spetsiotis +Michal Minář +Miguel Angel Alvarez Cabrerizo <30386061+doncicuto@users.noreply.github.com> +Miguel Angel Fernández +Mihai Borobocea +Mike Casas +Mike Goelzer +Milind Chawre +Misty Stanley-Jones +Mohit Soni +Moorthy RS +Moysés Borges +Moysés Borges +Nace Oroz +Nathan LeClaire +Nathan LeClaire +Neil Horman +Nick Russo +Nicolas Borboën +Nigel Poulton +Nik Nyby +Nolan Darilek +O.S. Tezer +O.S. Tezer +Oh Jinkyun +Ouyang Liduo +Patrick Stapleton +Paul Liljenberg +Pavel Tikhomirov +Pawel Konczalski +Peter Choi +Peter Dave Hello +Peter Hsu +Peter Jaffe +Peter Nagy +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Qiang Huang +Qiang Huang +Ray Tsang +Renaud Gaubert +Robert Terhaar +Roberto G. Hashioka +Roberto Muñoz Fernández +Roman Dudin +Ross Boucher +Runshen Zhu +Ryan Stelly +Sakeven Jiang +Sandeep Bansal +Sandeep Bansal +Sargun Dhillon +Sean Lee +Sebastiaan van Stijn +Sebastiaan van Stijn +Shaun Kaasten +Shawn Landden +Shengbo Song +Shengbo Song +Shih-Yuan Lee +Shishir Mahajan +Shukui Yang +Shuwei Hao +Shuwei Hao +Sidhartha Mani +Silvin Lubecki +Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> +Sjoerd Langkemper +Solomon Hykes +Solomon Hykes +Solomon Hykes +Soshi Katsuta +Soshi Katsuta +Sridhar Ratnakumar +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Stefan Berger +Stefan Berger +Stefan J. Wernli +Stefan S. +Stefan Scherer +Stefan Scherer +Stephen Day +Stephen Day +Stephen Day +Steve Desmond +Steve Richards stevejr <> +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sun Jianbo +Sunny Gogoi +Sunny Gogoi +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sylvain Bellemare +Sylvain Bellemare +Tangi Colin +Tejesh Mehta +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens +Thomas Gazagnaire +Thomas Krzero +Thomas Léveil +Thomas Léveil +Thomas Riccardi +Thomas Riccardi +Tibor Vass +Tibor Vass +Tim Bart +Tim Bosse +Tim Ruffles +Tim Terhorst +Tim Zju <21651152@zju.edu.cn> +Timothy Hobbs +Toli Kuznets +Tom Barlow +Tom Milligan +Tom Milligan +Tom Sweeney +Tõnis Tiigi +Trishna Guha +Tristan Carel +Tristan Carel +Umesh Yadav +Umesh Yadav +Victor Lyuboslavsky +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Victor Vieux +Viktor Vojnovski +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Demeester +Vincent Demeester +Vishnu Kannan +Vladimir Rutsky +Walter Stanish +Wang Guoliang +Wang Jie +Wang Lei +Wang Ping +Wang Xing +Wang Yuexiao +Wayne Chang +Wayne Song +Wei Wu cizixs +Wenjun Tang +Wewang Xiaorenfine +Will Weaver +Xianglin Gao +Xianlu Bird +Xiaoyu Zhang +Xuecong Liao +Yamasaki Masahide +Yao Zaiyong +Yassine Tijani +Yazhong Liu +Yestin Sun +Yi EungJun +Ying Li +Ying Li +Yong Tang +Yosef Fertel +Yu Changchun +Yu Chengxia +Yu Peng +Yu Peng +Yue Zhang +Zachary Jaffee +Zachary Jaffee +ZhangHang +Zhenkun Bi +Zhou Hao +Zhoulin Xie +Zhu Kunjia +Zou Yu diff --git a/cli/AUTHORS b/cli/AUTHORS new file mode 100644 index 00000000..04edcf79 --- /dev/null +++ b/cli/AUTHORS @@ -0,0 +1,716 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/docs/generate-authors.sh`. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Aleksa Sarai +Alessandro Boch +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arnaud Porterie +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +ChaYoung You +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Corey Farrell +Corey Quon +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Dan Cotora +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Don Kjer +Dong Chen +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Hupfeld +Felix Rabe +Filip JareÅ¡ +Flavio Crisciani +Florian Klein +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Gou Rao +Grant Reaber +Greg Pflaum +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +huqun +Huu Nguyen +Hyzhou Zhy +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Heiss +Jason Plum +Jay Kamat +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonh Wendell +Jordan Jennings +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Kyle Spiers +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Mei ChunTao +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Moysés Borges +Mrunal Patel +muicoder +Muthukumar R +Máximo Cuadros +MÃ¥rten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +ohmystack +Olle Jonsson +Olli Janatuinen +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Weaver +Pavel Pospisil +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +pidster +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Rodolfo Ortiz +Rogelio Canedo +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sambuddha Basu +Sami Tabet +Samuel Karp +Santhosh Manohar +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +squeegels <1674195+squeegels@users.noreply.github.com> +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Taylor Jones +Tejaswini Duggaraju +Thatcher Peskens +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +zebrilee +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +徐俊杰 diff --git a/cli/CONTRIBUTING.md b/cli/CONTRIBUTING.md new file mode 100644 index 00000000..245d3a3a --- /dev/null +++ b/cli/CONTRIBUTING.md @@ -0,0 +1,365 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/cli/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/cli/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Community Slack + The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up with this link. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](README.md) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, make sure the test suite passes. Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in the pull request +description that close an issue. Including references automatically closes the issue +on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. \ No newline at end of file diff --git a/cli/Jenkinsfile b/cli/Jenkinsfile new file mode 100644 index 00000000..3416d957 --- /dev/null +++ b/cli/Jenkinsfile @@ -0,0 +1,13 @@ +wrappedNode(label: 'linux && x86_64', cleanWorkspace: true) { + timeout(time: 60, unit: 'MINUTES') { + stage "Git Checkout" + checkout scm + + stage "Run end-to-end test suite" + sh "docker version" + sh "docker info" + sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \ + IMAGE_TAG=clie2e${BUILD_NUMBER} \ + DOCKER_BUILDKIT=1 make -f docker.Makefile test-e2e" + } +} diff --git a/cli/LICENSE b/cli/LICENSE new file mode 100644 index 00000000..9c8e20ab --- /dev/null +++ b/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cli/MAINTAINERS b/cli/MAINTAINERS new file mode 100644 index 00000000..c0b64509 --- /dev/null +++ b/cli/MAINTAINERS @@ -0,0 +1,136 @@ +# Docker maintainers file +# +# This file describes who runs the docker/cli project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + people = [ + "aaronlehmann", + "albers", + "cpuguy83", + "dnephin", + "justincormack", + "silvin-lubecki", + "stevvooe", + "thajeztah", + "tibor", + "tonistiigi", + "vdemeester", + "vieux", + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "programmerq", + "thajeztah" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.albers] + Name = "Harald Albers" + Email = "github@albersweb.de" + GitHub = "albers" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + GitHub = "cpuguy83" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.silvin-lubecki] + Name = "Silvin Lubecki" + Email = "silvin.lubecki@docker.com" + GitHub = "silvin-lubecki" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stevvooe@gmail.com" + GitHub = "stevvooe" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + diff --git a/cli/Makefile b/cli/Makefile new file mode 100644 index 00000000..6f9abf91 --- /dev/null +++ b/cli/Makefile @@ -0,0 +1,102 @@ +# +# github.com/docker/cli +# +all: binary + + +_:=$(shell ./scripts/warn-outside-container $(MAKECMDGOALS)) + +.PHONY: clean +clean: ## remove build artifacts + rm -rf ./build/* cli/winresources/rsrc_* ./man/man[1-9] docs/yaml/gen + +.PHONY: test-unit +test-unit: ## run unit tests, to change the output format use: GOTESTSUM_FORMAT=(dots|short|standard-quiet|short-verbose|standard-verbose) make test-unit + gotestsum $(TESTFLAGS) -- $${TESTDIRS:-$(shell go list ./... | grep -vE '/vendor/|/e2e/')} + +.PHONY: test +test: test-unit ## run tests + +.PHONY: test-coverage +test-coverage: ## run test coverage + gotestsum -- -coverprofile=coverage.txt $(shell go list ./... | grep -vE '/vendor/|/e2e/') + +.PHONY: fmt +fmt: + go list -f {{.Dir}} ./... | xargs gofmt -w -s -d + +.PHONY: lint +lint: ## run all the lint tools + gometalinter --config gometalinter.json ./... + +.PHONY: binary +binary: ## build executable for Linux + @echo "WARNING: binary creates a Linux executable. Use cross for macOS or Windows." + ./scripts/build/binary + +.PHONY: plugins +plugins: ## build example CLI plugins + ./scripts/build/plugins + +.PHONY: cross +cross: ## build executable for macOS and Windows + ./scripts/build/cross + +.PHONY: binary-windows +binary-windows: ## build executable for Windows + ./scripts/build/windows + +.PHONY: plugins-windows +plugins-windows: ## build example CLI plugins for Windows + ./scripts/build/plugins-windows + +.PHONY: binary-osx +binary-osx: ## build executable for macOS + ./scripts/build/osx + +.PHONY: plugins-osx +plugins-osx: ## build example CLI plugins for macOS + ./scripts/build/plugins-osx + +.PHONY: dynbinary +dynbinary: ## build dynamically linked binary + ./scripts/build/dynbinary + +vendor: vendor.conf ## check that vendor matches vendor.conf + rm -rf vendor + bash -c 'vndr |& grep -v -i clone' + scripts/validate/check-git-diff vendor + +.PHONY: authors +authors: ## generate AUTHORS file from git history + scripts/docs/generate-authors.sh + +.PHONY: manpages +manpages: ## generate man pages from go source and markdown + scripts/docs/generate-man.sh + +.PHONY: yamldocs +yamldocs: ## generate documentation YAML files consumed by docs repo + scripts/docs/generate-yaml.sh + +.PHONY: shellcheck +shellcheck: ## run shellcheck validation + scripts/validate/shellcheck + +.PHONY: help +help: ## print this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + + +cli/compose/schema/bindata.go: cli/compose/schema/data/*.json + go generate github.com/docker/cli/cli/compose/schema + +compose-jsonschema: cli/compose/schema/bindata.go + scripts/validate/check-git-diff cli/compose/schema/bindata.go + +.PHONY: ci-validate +ci-validate: + time make -B vendor + time make -B compose-jsonschema + time make manpages + time make yamldocs diff --git a/cli/NOTICE b/cli/NOTICE new file mode 100644 index 00000000..0c74e15b --- /dev/null +++ b/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/cli/README.md b/cli/README.md new file mode 100644 index 00000000..2964377e --- /dev/null +++ b/cli/README.md @@ -0,0 +1,69 @@ +[![build status](https://circleci.com/gh/docker/cli.svg?style=shield)](https://circleci.com/gh/docker/cli/tree/master) [![Build Status](https://jenkins.dockerproject.org/job/docker/job/cli/job/master/badge/icon)](https://jenkins.dockerproject.org/job/docker/job/cli/job/master/) + +docker/cli +========== + +This repository is the home of the cli used in the Docker CE and +Docker EE products. + +Development +=========== + +`docker/cli` is developed using Docker. + +Build a linux binary: + +``` +$ make -f docker.Makefile binary +``` + +Build binaries for all supported platforms: + +``` +$ make -f docker.Makefile cross +``` + +Run all linting: + +``` +$ make -f docker.Makefile lint +``` + +List all the available targets: + +``` +$ make help +``` + +### In-container development environment + +Start an interactive development environment: + +``` +$ make -f docker.Makefile shell +``` + +In the development environment you can run many tasks, including build binaries: + +``` +$ make binary +``` + +Legal +===== +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/cli/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +Licensing +========= +docker/cli is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. diff --git a/cli/TESTING.md b/cli/TESTING.md new file mode 100644 index 00000000..41c1272a --- /dev/null +++ b/cli/TESTING.md @@ -0,0 +1,85 @@ +# Testing + +The following guidelines summarize the testing policy for docker/cli. + +## Unit Test Suite + +All code changes should have unit test coverage. + +Error cases should be tested with unit tests. + +Bug fixes should be covered by new unit tests or additional assertions in +existing unit tests. + +### Details + +The unit test suite follows the standard Go testing convention. Tests are +located in the package directory in `_test.go` files. + +Unit tests should be named using the convention: + +``` +Test +``` + +[Table tests](https://github.com/golang/go/wiki/TableDrivenTests) should be used +where appropriate, but may not be appropriate in all cases. + +Assertions should be made using +[gotest.tools/assert](https://godoc.org/gotest.tools/assert). + +Fakes, and testing utilities can be found in +[internal/test](https://godoc.org/github.com/docker/cli/internal/test) and +[gotest.tools](https://godoc.org/gotest.tools). + +## End-to-End Test Suite + +The end-to-end test suite tests a cli binary against a real API backend. + +### Guidelines + +Each feature (subcommand) should have a single end-to-end test for +the success case. The test should include all (or most) flags/options supported +by that feature. + +In some rare cases a couple additional end-to-end tests may be written for a +sufficiently complex and critical feature (ex: `container run`, `service +create`, `service update`, and `docker build` may have ~3-5 cases each). + +In some rare cases a sufficiently critical error paths may have a single +end-to-end test case. + +In all other cases the behaviour should be covered by unit tests. + +If a code change adds a new flag, that flag should be added to the existing +"success case" end-to-end test. + +If a code change fixes a bug, that bug fix should be covered either by adding +assertions to the existing end-to-end test, or with one or more unit test. + +### Details + +The end-to-end test suite is located in +[./e2e](https://github.com/docker/cli/tree/master/e2e). Each directory in `e2e` +corresponds to a directory in `cli/command` and contains the tests for that +subcommand. Files in each directory should be named `_test.go` where +command is the basename of the command (ex: the test for `docker stack deploy` +is found in `e2e/stack/deploy_test.go`). + +Tests should be named using the convention: + +``` +Test[] +``` + +where the test case name is only required when there are multiple test cases for +a single command. + +End-to-end test should run the `docker` binary using +[gotestyourself/icmd](https://godoc.org/github.com/gotestyourself/gotestyourself/icmd) +and make assertions about the exit code, stdout, stderr, and local file system. + +Any Docker image or registry operations should use `registry:5000/` +to communicate with the local instance of the Docker registry. To load +additional fixture images to the registry see +[scripts/test/e2e/run](https://github.com/docker/cli/blob/master/scripts/test/e2e/run). diff --git a/cli/VERSION b/cli/VERSION new file mode 100644 index 00000000..8ff4639f --- /dev/null +++ b/cli/VERSION @@ -0,0 +1 @@ +19.03.0-dev diff --git a/cli/appveyor.yml b/cli/appveyor.yml new file mode 100644 index 00000000..c117563f --- /dev/null +++ b/cli/appveyor.yml @@ -0,0 +1,23 @@ +version: "{build}" + +clone_folder: c:\gopath\src\github.com\docker\cli + +environment: + GOPATH: c:\gopath + GOVERSION: 1.12.10 + DEPVERSION: v0.4.1 + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - go version + - go env + +deploy: false + +build_script: + - ps: .\scripts\make.ps1 -Binary + +test_script: + - ps: .\scripts\make.ps1 -TestUnit diff --git a/cli/circle.yml b/cli/circle.yml new file mode 100644 index 00000000..d2431e44 --- /dev/null +++ b/cli/circle.yml @@ -0,0 +1,133 @@ +version: 2 + +jobs: + + lint: + working_directory: /work + docker: [{image: 'docker:18.09-git'}] + environment: + DOCKER_BUILDKIT: 1 + steps: + - checkout + - setup_remote_docker: + version: 18.09.3 + reusable: true + exclusive: false + - run: + command: docker version + - run: + name: "Lint" + command: | + docker build --progress=plain -f dockerfiles/Dockerfile.lint --tag cli-linter:$CIRCLE_BUILD_NUM . + docker run --rm cli-linter:$CIRCLE_BUILD_NUM + + cross: + working_directory: /work + docker: [{image: 'docker:18.09-git'}] + environment: + DOCKER_BUILDKIT: 1 + parallelism: 3 + steps: + - checkout + - setup_remote_docker: + version: 18.09.3 + reusable: true + exclusive: false + - run: + name: "Cross" + command: | + docker build --progress=plain -f dockerfiles/Dockerfile.cross --tag cli-builder:$CIRCLE_BUILD_NUM . + name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX + docker run \ + -e CROSS_GROUP=$CIRCLE_NODE_INDEX \ + --name $name cli-builder:$CIRCLE_BUILD_NUM \ + make cross + docker cp \ + $name:/go/src/github.com/docker/cli/build \ + /work/build + - store_artifacts: + path: /work/build + + test: + working_directory: /work + docker: [{image: 'docker:18.09-git'}] + environment: + DOCKER_BUILDKIT: 1 + steps: + - checkout + - setup_remote_docker: + version: 18.09.3 + reusable: true + exclusive: false + - run: + name: "Unit Test with Coverage" + command: | + mkdir -p test-results/unit-tests + docker build --progress=plain -f dockerfiles/Dockerfile.dev --tag cli-builder:$CIRCLE_BUILD_NUM . + docker run \ + -e GOTESTSUM_JUNITFILE=/tmp/junit.xml \ + --name \ + test-$CIRCLE_BUILD_NUM cli-builder:$CIRCLE_BUILD_NUM \ + make test-coverage + docker cp \ + test-$CIRCLE_BUILD_NUM:/tmp/junit.xml \ + ./test-results/unit-tests/junit.xml + - run: + name: "Upload to Codecov" + command: | + docker cp \ + test-$CIRCLE_BUILD_NUM:/go/src/github.com/docker/cli/coverage.txt \ + coverage.txt + apk add -U bash curl + curl -s https://codecov.io/bash | bash || \ + echo 'Codecov failed to upload' + - store_test_results: + path: test-results + - store_artifacts: + path: test-results + + validate: + working_directory: /work + docker: [{image: 'docker:18.09-git'}] + environment: + DOCKER_BUILDKIT: 1 + steps: + - checkout + - setup_remote_docker: + version: 18.09.3 + reusable: true + exclusive: false + - run: + name: "Validate Vendor, Docs, and Code Generation" + command: | + rm -f .dockerignore # include .git + docker build --progress=plain -f dockerfiles/Dockerfile.dev --tag cli-builder-with-git:$CIRCLE_BUILD_NUM . + docker run --rm cli-builder-with-git:$CIRCLE_BUILD_NUM \ + make ci-validate + no_output_timeout: 15m + shellcheck: + working_directory: /work + docker: [{image: 'docker:18.09-git'}] + environment: + DOCKER_BUILDKIT: 1 + steps: + - checkout + - setup_remote_docker: + version: 18.09.3 + reusable: true + exclusive: false + - run: + name: "Run shellcheck" + command: | + docker build --progress=plain -f dockerfiles/Dockerfile.shellcheck --tag cli-validator:$CIRCLE_BUILD_NUM . + docker run --rm cli-validator:$CIRCLE_BUILD_NUM \ + make shellcheck +workflows: + version: 2 + ci: + jobs: + - lint + - cross + - test + - validate + - shellcheck diff --git a/cli/cli-plugins/examples/helloworld/main.go b/cli/cli-plugins/examples/helloworld/main.go new file mode 100644 index 00000000..597dc42b --- /dev/null +++ b/cli/cli-plugins/examples/helloworld/main.go @@ -0,0 +1,106 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/docker/cli/cli-plugins/manager" + "github.com/docker/cli/cli-plugins/plugin" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +func main() { + plugin.Run(func(dockerCli command.Cli) *cobra.Command { + goodbye := &cobra.Command{ + Use: "goodbye", + Short: "Say Goodbye instead of Hello", + Run: func(cmd *cobra.Command, _ []string) { + fmt.Fprintln(dockerCli.Out(), "Goodbye World!") + }, + } + apiversion := &cobra.Command{ + Use: "apiversion", + Short: "Print the API version of the server", + RunE: func(_ *cobra.Command, _ []string) error { + cli := dockerCli.Client() + ping, err := cli.Ping(context.Background()) + if err != nil { + return err + } + fmt.Println(ping.APIVersion) + return nil + }, + } + + exitStatus2 := &cobra.Command{ + Use: "exitstatus2", + Short: "Exit with status 2", + RunE: func(_ *cobra.Command, _ []string) error { + fmt.Fprintln(dockerCli.Err(), "Exiting with error status 2") + os.Exit(2) + return nil + }, + } + + var ( + who, context string + preRun, debug bool + ) + cmd := &cobra.Command{ + Use: "helloworld", + Short: "A basic Hello World plugin for tests", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := plugin.PersistentPreRunE(cmd, args); err != nil { + return err + } + if preRun { + fmt.Fprintf(dockerCli.Err(), "Plugin PersistentPreRunE called") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + if debug { + fmt.Fprintf(dockerCli.Err(), "Plugin debug mode enabled") + } + + switch context { + case "Christmas": + fmt.Fprintf(dockerCli.Out(), "Merry Christmas!\n") + return nil + case "": + // nothing + } + + if who == "" { + who, _ = dockerCli.ConfigFile().PluginConfig("helloworld", "who") + } + if who == "" { + who = "World" + } + + fmt.Fprintf(dockerCli.Out(), "Hello %s!\n", who) + dockerCli.ConfigFile().SetPluginConfig("helloworld", "lastwho", who) + return dockerCli.ConfigFile().Save() + }, + } + + flags := cmd.Flags() + flags.StringVar(&who, "who", "", "Who are we addressing?") + flags.BoolVar(&preRun, "pre-run", false, "Log from prerun hook") + // These are intended to deliberately clash with the CLIs own top + // level arguments. + flags.BoolVarP(&debug, "debug", "D", false, "Enable debug") + flags.StringVarP(&context, "context", "c", "", "Is it Christmas?") + + cmd.AddCommand(goodbye, apiversion, exitStatus2) + return cmd + }, + manager.Metadata{ + SchemaVersion: "0.1.0", + Vendor: "Docker Inc.", + Version: "testing", + Experimental: os.Getenv("HELLO_EXPERIMENTAL") != "", + }) +} diff --git a/cli/cli-plugins/manager/candidate.go b/cli/cli-plugins/manager/candidate.go new file mode 100644 index 00000000..2000e5b1 --- /dev/null +++ b/cli/cli-plugins/manager/candidate.go @@ -0,0 +1,23 @@ +package manager + +import ( + "os/exec" +) + +// Candidate represents a possible plugin candidate, for mocking purposes +type Candidate interface { + Path() string + Metadata() ([]byte, error) +} + +type candidate struct { + path string +} + +func (c *candidate) Path() string { + return c.path +} + +func (c *candidate) Metadata() ([]byte, error) { + return exec.Command(c.path, MetadataSubcommandName).Output() +} diff --git a/cli/cli-plugins/manager/candidate_test.go b/cli/cli-plugins/manager/candidate_test.go new file mode 100644 index 00000000..a8cf2ab4 --- /dev/null +++ b/cli/cli-plugins/manager/candidate_test.go @@ -0,0 +1,101 @@ +package manager + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/spf13/cobra" + "gotest.tools/assert" + "gotest.tools/assert/cmp" +) + +type fakeCandidate struct { + path string + exec bool + meta string + allowExperimental bool +} + +func (c *fakeCandidate) Path() string { + return c.path +} + +func (c *fakeCandidate) Metadata() ([]byte, error) { + if !c.exec { + return nil, fmt.Errorf("faked a failure to exec %q", c.path) + } + return []byte(c.meta), nil +} + +func TestValidateCandidate(t *testing.T) { + var ( + goodPluginName = NamePrefix + "goodplugin" + + builtinName = NamePrefix + "builtin" + builtinAlias = NamePrefix + "alias" + + badPrefixPath = "/usr/local/libexec/cli-plugins/wobble" + badNamePath = "/usr/local/libexec/cli-plugins/docker-123456" + goodPluginPath = "/usr/local/libexec/cli-plugins/" + goodPluginName + metaExperimental = `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing", "Experimental": true}` + ) + + fakeroot := &cobra.Command{Use: "docker"} + fakeroot.AddCommand(&cobra.Command{ + Use: strings.TrimPrefix(builtinName, NamePrefix), + Aliases: []string{ + strings.TrimPrefix(builtinAlias, NamePrefix), + }, + }) + + for _, tc := range []struct { + name string + c *fakeCandidate + + // Either err or invalid may be non-empty, but not both (both can be empty for a good plugin). + err string + invalid string + }{ + /* Each failing one of the tests */ + {name: "empty path", c: &fakeCandidate{path: ""}, err: "plugin candidate path cannot be empty"}, + {name: "bad prefix", c: &fakeCandidate{path: badPrefixPath}, err: fmt.Sprintf("does not have %q prefix", NamePrefix)}, + {name: "bad path", c: &fakeCandidate{path: badNamePath}, invalid: "did not match"}, + {name: "builtin command", c: &fakeCandidate{path: builtinName}, invalid: `plugin "builtin" duplicates builtin command`}, + {name: "builtin alias", c: &fakeCandidate{path: builtinAlias}, invalid: `plugin "alias" duplicates an alias of builtin command "builtin"`}, + {name: "fetch failure", c: &fakeCandidate{path: goodPluginPath, exec: false}, invalid: fmt.Sprintf("failed to fetch metadata: faked a failure to exec %q", goodPluginPath)}, + {name: "metadata not json", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `xyzzy`}, invalid: "invalid character"}, + {name: "empty schemaversion", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{}`}, invalid: `plugin SchemaVersion "" is not valid`}, + {name: "invalid schemaversion", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "xyzzy"}`}, invalid: `plugin SchemaVersion "xyzzy" is not valid`}, + {name: "no vendor", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0"}`}, invalid: "plugin metadata does not define a vendor"}, + {name: "empty vendor", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": ""}`}, invalid: "plugin metadata does not define a vendor"}, + {name: "experimental required", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: metaExperimental}, invalid: "requires experimental CLI"}, + // This one should work + {name: "valid", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`}}, + {name: "valid + allowing experimental", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: `{"SchemaVersion": "0.1.0", "Vendor": "e2e-testing"}`, allowExperimental: true}}, + {name: "experimental + allowing experimental", c: &fakeCandidate{path: goodPluginPath, exec: true, meta: metaExperimental, allowExperimental: true}}, + } { + t.Run(tc.name, func(t *testing.T) { + p, err := newPlugin(tc.c, fakeroot, tc.c.allowExperimental) + if tc.err != "" { + assert.ErrorContains(t, err, tc.err) + } else if tc.invalid != "" { + assert.NilError(t, err) + assert.Assert(t, cmp.ErrorType(p.Err, reflect.TypeOf(&pluginError{}))) + assert.ErrorContains(t, p.Err, tc.invalid) + } else { + assert.NilError(t, err) + assert.Equal(t, NamePrefix+p.Name, goodPluginName) + assert.Equal(t, p.SchemaVersion, "0.1.0") + assert.Equal(t, p.Vendor, "e2e-testing") + } + }) + } +} + +func TestCandidatePath(t *testing.T) { + exp := "/some/path" + cand := &candidate{path: exp} + assert.Equal(t, exp, cand.Path()) +} diff --git a/cli/cli-plugins/manager/cobra.go b/cli/cli-plugins/manager/cobra.go new file mode 100644 index 00000000..0fcd73e7 --- /dev/null +++ b/cli/cli-plugins/manager/cobra.go @@ -0,0 +1,60 @@ +package manager + +import ( + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +const ( + // CommandAnnotationPlugin is added to every stub command added by + // AddPluginCommandStubs with the value "true" and so can be + // used to distinguish plugin stubs from regular commands. + CommandAnnotationPlugin = "com.docker.cli.plugin" + + // CommandAnnotationPluginVendor is added to every stub command + // added by AddPluginCommandStubs and contains the vendor of + // that plugin. + CommandAnnotationPluginVendor = "com.docker.cli.plugin.vendor" + + // CommandAnnotationPluginVersion is added to every stub command + // added by AddPluginCommandStubs and contains the version of + // that plugin. + CommandAnnotationPluginVersion = "com.docker.cli.plugin.version" + + // CommandAnnotationPluginInvalid is added to any stub command + // added by AddPluginCommandStubs for an invalid command (that + // is, one which failed it's candidate test) and contains the + // reason for the failure. + CommandAnnotationPluginInvalid = "com.docker.cli.plugin-invalid" +) + +// AddPluginCommandStubs adds a stub cobra.Commands for each valid and invalid +// plugin. The command stubs will have several annotations added, see +// `CommandAnnotationPlugin*`. +func AddPluginCommandStubs(dockerCli command.Cli, cmd *cobra.Command) error { + plugins, err := ListPlugins(dockerCli, cmd) + if err != nil { + return err + } + for _, p := range plugins { + vendor := p.Vendor + if vendor == "" { + vendor = "unknown" + } + annotations := map[string]string{ + CommandAnnotationPlugin: "true", + CommandAnnotationPluginVendor: vendor, + CommandAnnotationPluginVersion: p.Version, + } + if p.Err != nil { + annotations[CommandAnnotationPluginInvalid] = p.Err.Error() + } + cmd.AddCommand(&cobra.Command{ + Use: p.Name, + Short: p.ShortDescription, + Run: func(_ *cobra.Command, _ []string) {}, + Annotations: annotations, + }) + } + return nil +} diff --git a/cli/cli-plugins/manager/error.go b/cli/cli-plugins/manager/error.go new file mode 100644 index 00000000..1ad28678 --- /dev/null +++ b/cli/cli-plugins/manager/error.go @@ -0,0 +1,43 @@ +package manager + +import ( + "github.com/pkg/errors" +) + +// pluginError is set as Plugin.Err by NewPlugin if the plugin +// candidate fails one of the candidate tests. This exists primarily +// to implement encoding.TextMarshaller such that rendering a plugin as JSON +// (e.g. for `docker info -f '{{json .CLIPlugins}}'`) renders the Err +// field as a useful string and not just `{}`. See +// https://github.com/golang/go/issues/10748 for some discussion +// around why the builtin error type doesn't implement this. +type pluginError struct { + cause error +} + +// Error satisfies the core error interface for pluginError. +func (e *pluginError) Error() string { + return e.cause.Error() +} + +// Cause satisfies the errors.causer interface for pluginError. +func (e *pluginError) Cause() error { + return e.cause +} + +// MarshalText marshalls the pluginError into a textual form. +func (e *pluginError) MarshalText() (text []byte, err error) { + return []byte(e.cause.Error()), nil +} + +// wrapAsPluginError wraps an error in a pluginError with an +// additional message, analogous to errors.Wrapf. +func wrapAsPluginError(err error, msg string) error { + return &pluginError{cause: errors.Wrap(err, msg)} +} + +// NewPluginError creates a new pluginError, analogous to +// errors.Errorf. +func NewPluginError(msg string, args ...interface{}) error { + return &pluginError{cause: errors.Errorf(msg, args...)} +} diff --git a/cli/cli-plugins/manager/error_test.go b/cli/cli-plugins/manager/error_test.go new file mode 100644 index 00000000..04614e24 --- /dev/null +++ b/cli/cli-plugins/manager/error_test.go @@ -0,0 +1,24 @@ +package manager + +import ( + "fmt" + "testing" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" + "gotest.tools/assert" +) + +func TestPluginError(t *testing.T) { + err := NewPluginError("new error") + assert.Error(t, err, "new error") + + inner := fmt.Errorf("testing") + err = wrapAsPluginError(inner, "wrapping") + assert.Error(t, err, "wrapping: testing") + assert.Equal(t, inner, errors.Cause(err)) + + actual, err := yaml.Marshal(err) + assert.NilError(t, err) + assert.Equal(t, "'wrapping: testing'\n", string(actual)) +} diff --git a/cli/cli-plugins/manager/manager.go b/cli/cli-plugins/manager/manager.go new file mode 100644 index 00000000..e06286e9 --- /dev/null +++ b/cli/cli-plugins/manager/manager.go @@ -0,0 +1,209 @@ +package manager + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/config" + "github.com/spf13/cobra" +) + +// ReexecEnvvar is the name of an ennvar which is set to the command +// used to originally invoke the docker CLI when executing a +// plugin. Assuming $PATH and $CWD remain unchanged this should allow +// the plugin to re-execute the original CLI. +const ReexecEnvvar = "DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND" + +// errPluginNotFound is the error returned when a plugin could not be found. +type errPluginNotFound string + +func (e errPluginNotFound) NotFound() {} + +func (e errPluginNotFound) Error() string { + return "Error: No such CLI plugin: " + string(e) +} + +type errPluginRequireExperimental string + +// Note: errPluginRequireExperimental implements notFound so that the plugin +// is skipped when listing the plugins. +func (e errPluginRequireExperimental) NotFound() {} + +func (e errPluginRequireExperimental) Error() string { + return fmt.Sprintf("plugin candidate %q: requires experimental CLI", string(e)) +} + +type notFound interface{ NotFound() } + +// IsNotFound is true if the given error is due to a plugin not being found. +func IsNotFound(err error) bool { + if e, ok := err.(*pluginError); ok { + err = e.Cause() + } + _, ok := err.(notFound) + return ok +} + +func getPluginDirs(dockerCli command.Cli) ([]string, error) { + var pluginDirs []string + + if cfg := dockerCli.ConfigFile(); cfg != nil { + pluginDirs = append(pluginDirs, cfg.CLIPluginsExtraDirs...) + } + pluginDir, err := config.Path("cli-plugins") + if err != nil { + return nil, err + } + + pluginDirs = append(pluginDirs, pluginDir) + pluginDirs = append(pluginDirs, defaultSystemPluginDirs...) + return pluginDirs, nil +} + +func addPluginCandidatesFromDir(res map[string][]string, d string) error { + dentries, err := ioutil.ReadDir(d) + if err != nil { + return err + } + for _, dentry := range dentries { + switch dentry.Mode() & os.ModeType { + case 0, os.ModeSymlink: + // Regular file or symlink, keep going + default: + // Something else, ignore. + continue + } + name := dentry.Name() + if !strings.HasPrefix(name, NamePrefix) { + continue + } + name = strings.TrimPrefix(name, NamePrefix) + var err error + if name, err = trimExeSuffix(name); err != nil { + continue + } + res[name] = append(res[name], filepath.Join(d, dentry.Name())) + } + return nil +} + +// listPluginCandidates returns a map from plugin name to the list of (unvalidated) Candidates. The list is in descending order of priority. +func listPluginCandidates(dirs []string) (map[string][]string, error) { + result := make(map[string][]string) + for _, d := range dirs { + // Silently ignore any directories which we cannot + // Stat (e.g. due to permissions or anything else) or + // which is not a directory. + if fi, err := os.Stat(d); err != nil || !fi.IsDir() { + continue + } + if err := addPluginCandidatesFromDir(result, d); err != nil { + // Silently ignore paths which don't exist. + if os.IsNotExist(err) { + continue + } + return nil, err // Or return partial result? + } + } + return result, nil +} + +// ListPlugins produces a list of the plugins available on the system +func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) { + pluginDirs, err := getPluginDirs(dockerCli) + if err != nil { + return nil, err + } + + candidates, err := listPluginCandidates(pluginDirs) + if err != nil { + return nil, err + } + + var plugins []Plugin + for _, paths := range candidates { + if len(paths) == 0 { + continue + } + c := &candidate{paths[0]} + p, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental) + if err != nil { + return nil, err + } + if !IsNotFound(p.Err) { + p.ShadowedPaths = paths[1:] + plugins = append(plugins, p) + } + } + + return plugins, nil +} + +// PluginRunCommand returns an "os/exec".Cmd which when .Run() will execute the named plugin. +// The rootcmd argument is referenced to determine the set of builtin commands in order to detect conficts. +// The error returned satisfies the IsNotFound() predicate if no plugin was found or if the first candidate plugin was invalid somehow. +func PluginRunCommand(dockerCli command.Cli, name string, rootcmd *cobra.Command) (*exec.Cmd, error) { + // This uses the full original args, not the args which may + // have been provided by cobra to our caller. This is because + // they lack e.g. global options which we must propagate here. + args := os.Args[1:] + if !pluginNameRe.MatchString(name) { + // We treat this as "not found" so that callers will + // fallback to their "invalid" command path. + return nil, errPluginNotFound(name) + } + exename := addExeSuffix(NamePrefix + name) + pluginDirs, err := getPluginDirs(dockerCli) + if err != nil { + return nil, err + } + + for _, d := range pluginDirs { + path := filepath.Join(d, exename) + + // We stat here rather than letting the exec tell us + // ENOENT because the latter does not distinguish a + // file not existing from its dynamic loader or one of + // its libraries not existing. + if _, err := os.Stat(path); os.IsNotExist(err) { + continue + } + + c := &candidate{path: path} + plugin, err := newPlugin(c, rootcmd, dockerCli.ClientInfo().HasExperimental) + if err != nil { + return nil, err + } + if plugin.Err != nil { + // TODO: why are we not returning plugin.Err? + + err := plugin.Err.(*pluginError).Cause() + // if an experimental plugin was invoked directly while experimental mode is off + // provide a more useful error message than "not found". + if err, ok := err.(errPluginRequireExperimental); ok { + return nil, err + } + return nil, errPluginNotFound(name) + } + cmd := exec.Command(plugin.Path, args...) + // Using dockerCli.{In,Out,Err}() here results in a hang until something is input. + // See: - https://github.com/golang/go/issues/10338 + // - https://github.com/golang/go/commit/d000e8742a173aa0659584aa01b7ba2834ba28ab + // os.Stdin is a *os.File which avoids this behaviour. We don't need the functionality + // of the wrappers here anyway. + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, ReexecEnvvar+"="+os.Args[0]) + + return cmd, nil + } + return nil, errPluginNotFound(name) +} diff --git a/cli/cli-plugins/manager/manager_test.go b/cli/cli-plugins/manager/manager_test.go new file mode 100644 index 00000000..d727fdac --- /dev/null +++ b/cli/cli-plugins/manager/manager_test.go @@ -0,0 +1,114 @@ +package manager + +import ( + "strings" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + "gotest.tools/fs" +) + +func TestListPluginCandidates(t *testing.T) { + // Populate a selection of directories with various shadowed and bogus/obscure plugin candidates. + // For the purposes of this test no contents is required and permissions are irrelevant. + dir := fs.NewDir(t, t.Name(), + fs.WithDir( + "plugins1", + fs.WithFile("docker-plugin1", ""), // This appears in each directory + fs.WithFile("not-a-plugin", ""), // Should be ignored + fs.WithFile("docker-symlinked1", ""), // This and ... + fs.WithSymlink("docker-symlinked2", "docker-symlinked1"), // ... this should both appear + fs.WithDir("ignored1"), // A directory should be ignored + ), + fs.WithDir( + "plugins2", + fs.WithFile("docker-plugin1", ""), + fs.WithFile("also-not-a-plugin", ""), + fs.WithFile("docker-hardlink1", ""), // This and ... + fs.WithHardlink("docker-hardlink2", "docker-hardlink1"), // ... this should both appear + fs.WithDir("ignored2"), + ), + fs.WithDir( + "plugins3-target", // Will be referenced as a symlink from below + fs.WithFile("docker-plugin1", ""), + fs.WithDir("ignored3"), + fs.WithSymlink("docker-brokensymlink", "broken"), // A broken symlink is still a candidate (but would fail tests later) + fs.WithFile("non-plugin-symlinked", ""), // This shouldn't appear, but ... + fs.WithSymlink("docker-symlinked", "non-plugin-symlinked"), // ... this link to it should. + ), + fs.WithSymlink("plugins3", "plugins3-target"), + fs.WithFile("/plugins4", ""), + fs.WithSymlink("plugins5", "plugins5-nonexistent-target"), + ) + defer dir.Remove() + + var dirs []string + for _, d := range []string{"plugins1", "nonexistent", "plugins2", "plugins3", "plugins4", "plugins5"} { + dirs = append(dirs, dir.Join(d)) + } + + candidates, err := listPluginCandidates(dirs) + assert.NilError(t, err) + exp := map[string][]string{ + "plugin1": { + dir.Join("plugins1", "docker-plugin1"), + dir.Join("plugins2", "docker-plugin1"), + dir.Join("plugins3", "docker-plugin1"), + }, + "symlinked1": { + dir.Join("plugins1", "docker-symlinked1"), + }, + "symlinked2": { + dir.Join("plugins1", "docker-symlinked2"), + }, + "hardlink1": { + dir.Join("plugins2", "docker-hardlink1"), + }, + "hardlink2": { + dir.Join("plugins2", "docker-hardlink2"), + }, + "brokensymlink": { + dir.Join("plugins3", "docker-brokensymlink"), + }, + "symlinked": { + dir.Join("plugins3", "docker-symlinked"), + }, + } + + assert.DeepEqual(t, candidates, exp) +} + +func TestErrPluginNotFound(t *testing.T) { + var err error = errPluginNotFound("test") + err.(errPluginNotFound).NotFound() + assert.Error(t, err, "Error: No such CLI plugin: test") + assert.Assert(t, IsNotFound(err)) + assert.Assert(t, !IsNotFound(nil)) +} + +func TestGetPluginDirs(t *testing.T) { + cli := test.NewFakeCli(nil) + + pluginDir, err := config.Path("cli-plugins") + assert.NilError(t, err) + expected := append([]string{pluginDir}, defaultSystemPluginDirs...) + + var pluginDirs []string + pluginDirs, err = getPluginDirs(cli) + assert.Equal(t, strings.Join(expected, ":"), strings.Join(pluginDirs, ":")) + assert.NilError(t, err) + + extras := []string{ + "foo", "bar", "baz", + } + expected = append(extras, expected...) + cli.SetConfigFile(&configfile.ConfigFile{ + CLIPluginsExtraDirs: extras, + }) + pluginDirs, err = getPluginDirs(cli) + assert.DeepEqual(t, expected, pluginDirs) + assert.NilError(t, err) +} diff --git a/cli/cli-plugins/manager/manager_unix.go b/cli/cli-plugins/manager/manager_unix.go new file mode 100644 index 00000000..f586acbd --- /dev/null +++ b/cli/cli-plugins/manager/manager_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package manager + +var defaultSystemPluginDirs = []string{ + "/usr/local/lib/docker/cli-plugins", "/usr/local/libexec/docker/cli-plugins", + "/usr/lib/docker/cli-plugins", "/usr/libexec/docker/cli-plugins", +} diff --git a/cli/cli-plugins/manager/manager_windows.go b/cli/cli-plugins/manager/manager_windows.go new file mode 100644 index 00000000..2ce5a759 --- /dev/null +++ b/cli/cli-plugins/manager/manager_windows.go @@ -0,0 +1,11 @@ +package manager + +import ( + "os" + "path/filepath" +) + +var defaultSystemPluginDirs = []string{ + filepath.Join(os.Getenv("ProgramData"), "Docker", "cli-plugins"), + filepath.Join(os.Getenv("ProgramFiles"), "Docker", "cli-plugins"), +} diff --git a/cli/cli-plugins/manager/metadata.go b/cli/cli-plugins/manager/metadata.go new file mode 100644 index 00000000..19379034 --- /dev/null +++ b/cli/cli-plugins/manager/metadata.go @@ -0,0 +1,28 @@ +package manager + +const ( + // NamePrefix is the prefix required on all plugin binary names + NamePrefix = "docker-" + + // MetadataSubcommandName is the name of the plugin subcommand + // which must be supported by every plugin and returns the + // plugin metadata. + MetadataSubcommandName = "docker-cli-plugin-metadata" +) + +// Metadata provided by the plugin. See docs/extend/cli_plugins.md for canonical information. +type Metadata struct { + // SchemaVersion describes the version of this struct. Mandatory, must be "0.1.0" + SchemaVersion string `json:",omitempty"` + // Vendor is the name of the plugin vendor. Mandatory + Vendor string `json:",omitempty"` + // Version is the optional version of this plugin. + Version string `json:",omitempty"` + // ShortDescription should be suitable for a single line help message. + ShortDescription string `json:",omitempty"` + // URL is a pointer to the plugin's homepage. + URL string `json:",omitempty"` + // Experimental specifies whether the plugin is experimental. + // Experimental plugins are not displayed on non-experimental CLIs. + Experimental bool `json:",omitempty"` +} diff --git a/cli/cli-plugins/manager/plugin.go b/cli/cli-plugins/manager/plugin.go new file mode 100644 index 00000000..fc3ad693 --- /dev/null +++ b/cli/cli-plugins/manager/plugin.go @@ -0,0 +1,112 @@ +package manager + +import ( + "encoding/json" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + pluginNameRe = regexp.MustCompile("^[a-z][a-z0-9]*$") +) + +// Plugin represents a potential plugin with all it's metadata. +type Plugin struct { + Metadata + + Name string `json:",omitempty"` + Path string `json:",omitempty"` + + // Err is non-nil if the plugin failed one of the candidate tests. + Err error `json:",omitempty"` + + // ShadowedPaths contains the paths of any other plugins which this plugin takes precedence over. + ShadowedPaths []string `json:",omitempty"` +} + +// newPlugin determines if the given candidate is valid and returns a +// Plugin. If the candidate fails one of the tests then `Plugin.Err` +// is set, and is always a `pluginError`, but the `Plugin` is still +// returned with no error. An error is only returned due to a +// non-recoverable error. +// +// nolint: gocyclo +func newPlugin(c Candidate, rootcmd *cobra.Command, allowExperimental bool) (Plugin, error) { + path := c.Path() + if path == "" { + return Plugin{}, errors.New("plugin candidate path cannot be empty") + } + + // The candidate listing process should have skipped anything + // which would fail here, so there are all real errors. + fullname := filepath.Base(path) + if fullname == "." { + return Plugin{}, errors.Errorf("unable to determine basename of plugin candidate %q", path) + } + var err error + if fullname, err = trimExeSuffix(fullname); err != nil { + return Plugin{}, errors.Wrapf(err, "plugin candidate %q", path) + } + if !strings.HasPrefix(fullname, NamePrefix) { + return Plugin{}, errors.Errorf("plugin candidate %q: does not have %q prefix", path, NamePrefix) + } + + p := Plugin{ + Name: strings.TrimPrefix(fullname, NamePrefix), + Path: path, + } + + // Now apply the candidate tests, so these update p.Err. + if !pluginNameRe.MatchString(p.Name) { + p.Err = NewPluginError("plugin candidate %q did not match %q", p.Name, pluginNameRe.String()) + return p, nil + } + + if rootcmd != nil { + for _, cmd := range rootcmd.Commands() { + // Ignore conflicts with commands which are + // just plugin stubs (i.e. from a previous + // call to AddPluginCommandStubs). + if p := cmd.Annotations[CommandAnnotationPlugin]; p == "true" { + continue + } + if cmd.Name() == p.Name { + p.Err = NewPluginError("plugin %q duplicates builtin command", p.Name) + return p, nil + } + if cmd.HasAlias(p.Name) { + p.Err = NewPluginError("plugin %q duplicates an alias of builtin command %q", p.Name, cmd.Name()) + return p, nil + } + } + } + + // We are supposed to check for relevant execute permissions here. Instead we rely on an attempt to execute. + meta, err := c.Metadata() + if err != nil { + p.Err = wrapAsPluginError(err, "failed to fetch metadata") + return p, nil + } + + if err := json.Unmarshal(meta, &p.Metadata); err != nil { + p.Err = wrapAsPluginError(err, "invalid metadata") + return p, nil + } + if p.Experimental && !allowExperimental { + p.Err = &pluginError{errPluginRequireExperimental(p.Name)} + return p, nil + } + if p.Metadata.SchemaVersion != "0.1.0" { + p.Err = NewPluginError("plugin SchemaVersion %q is not valid, must be 0.1.0", p.Metadata.SchemaVersion) + return p, nil + } + if p.Metadata.Vendor == "" { + p.Err = NewPluginError("plugin metadata does not define a vendor") + return p, nil + } + return p, nil +} diff --git a/cli/cli-plugins/manager/suffix_unix.go b/cli/cli-plugins/manager/suffix_unix.go new file mode 100644 index 00000000..14f0903f --- /dev/null +++ b/cli/cli-plugins/manager/suffix_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package manager + +func trimExeSuffix(s string) (string, error) { + return s, nil +} +func addExeSuffix(s string) string { + return s +} diff --git a/cli/cli-plugins/manager/suffix_windows.go b/cli/cli-plugins/manager/suffix_windows.go new file mode 100644 index 00000000..53b507c8 --- /dev/null +++ b/cli/cli-plugins/manager/suffix_windows.go @@ -0,0 +1,26 @@ +package manager + +import ( + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// This is made slightly more complex due to needing to be case insensitive. +func trimExeSuffix(s string) (string, error) { + ext := filepath.Ext(s) + if ext == "" { + return "", errors.Errorf("path %q lacks required file extension", s) + } + + exe := ".exe" + if !strings.EqualFold(ext, exe) { + return "", errors.Errorf("path %q lacks required %q suffix", s, exe) + } + return strings.TrimSuffix(s, ext), nil +} + +func addExeSuffix(s string) string { + return s + ".exe" +} diff --git a/cli/cli-plugins/plugin/plugin.go b/cli/cli-plugins/plugin/plugin.go new file mode 100644 index 00000000..7bd5f50c --- /dev/null +++ b/cli/cli-plugins/plugin/plugin.go @@ -0,0 +1,161 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "os" + "sync" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli-plugins/manager" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/connhelper" + "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +// PersistentPreRunE must be called by any plugin command (or +// subcommand) which uses the cobra `PersistentPreRun*` hook. Plugins +// which do not make use of `PersistentPreRun*` do not need to call +// this (although it remains safe to do so). Plugins are recommended +// to use `PersistenPreRunE` to enable the error to be +// returned. Should not be called outside of a command's +// PersistentPreRunE hook and must not be run unless Run has been +// called. +var PersistentPreRunE func(*cobra.Command, []string) error + +func runPlugin(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager.Metadata) error { + tcmd := newPluginCommand(dockerCli, plugin, meta) + + var persistentPreRunOnce sync.Once + PersistentPreRunE = func(_ *cobra.Command, _ []string) error { + var err error + persistentPreRunOnce.Do(func() { + var opts []command.InitializeOpt + if os.Getenv("DOCKER_CLI_PLUGIN_USE_DIAL_STDIO") != "" { + opts = append(opts, withPluginClientConn(plugin.Name())) + } + err = tcmd.Initialize(opts...) + }) + return err + } + + cmd, args, err := tcmd.HandleGlobalFlags() + if err != nil { + return err + } + // We've parsed global args already, so reset args to those + // which remain. + cmd.SetArgs(args) + return cmd.Execute() +} + +// Run is the top-level entry point to the CLI plugin framework. It should be called from your plugin's `main()` function. +func Run(makeCmd func(command.Cli) *cobra.Command, meta manager.Metadata) { + dockerCli, err := command.NewDockerCli() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + plugin := makeCmd(dockerCli) + + if err := runPlugin(dockerCli, plugin, meta); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(dockerCli.Err(), sterr.Status) + } + // StatusError should only be used for errors, and all errors should + // have a non-zero exit status, so never exit with 0 + if sterr.StatusCode == 0 { + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(dockerCli.Err(), err) + os.Exit(1) + } +} + +func withPluginClientConn(name string) command.InitializeOpt { + return command.WithInitializeClient(func(dockerCli *command.DockerCli) (client.APIClient, error) { + cmd := "docker" + if x := os.Getenv(manager.ReexecEnvvar); x != "" { + cmd = x + } + var flags []string + + // Accumulate all the global arguments, that is those + // up to (but not including) the plugin's name. This + // ensures that `docker system dial-stdio` is + // evaluating the same set of `--config`, `--tls*` etc + // global options as the plugin was called with, which + // in turn is the same as what the original docker + // invocation was passed. + for _, a := range os.Args[1:] { + if a == name { + break + } + flags = append(flags, a) + } + flags = append(flags, "system", "dial-stdio") + + helper, err := connhelper.GetCommandConnectionHelper(cmd, flags...) + if err != nil { + return nil, err + } + + return client.NewClientWithOpts(client.WithDialContext(helper.Dialer)) + }) +} + +func newPluginCommand(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager.Metadata) *cli.TopLevelCommand { + name := plugin.Name() + fullname := manager.NamePrefix + name + + cmd := &cobra.Command{ + Use: fmt.Sprintf("docker [OPTIONS] %s [ARG...]", name), + Short: fullname + " is a Docker CLI plugin", + SilenceUsage: true, + SilenceErrors: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + // We can't use this as the hook directly since it is initialised later (in runPlugin) + return PersistentPreRunE(cmd, args) + }, + TraverseChildren: true, + DisableFlagsInUseLine: true, + } + opts, flags := cli.SetupPluginRootCommand(cmd) + + cmd.SetOutput(dockerCli.Out()) + + cmd.AddCommand( + plugin, + newMetadataSubcommand(plugin, meta), + ) + + cli.DisableFlagsInUseLine(cmd) + + return cli.NewTopLevelCommand(cmd, dockerCli, opts, flags) +} + +func newMetadataSubcommand(plugin *cobra.Command, meta manager.Metadata) *cobra.Command { + if meta.ShortDescription == "" { + meta.ShortDescription = plugin.Short + } + cmd := &cobra.Command{ + Use: manager.MetadataSubcommandName, + Hidden: true, + // Suppress the global/parent PersistentPreRunE, which + // needlessly initializes the client and tries to + // connect to the daemon. + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(cmd *cobra.Command, args []string) error { + enc := json.NewEncoder(os.Stdout) + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + return enc.Encode(meta) + }, + } + return cmd +} diff --git a/cli/cli/cobra.go b/cli/cli/cobra.go new file mode 100644 index 00000000..ed9c9b5b --- /dev/null +++ b/cli/cli/cobra.go @@ -0,0 +1,343 @@ +package cli + +import ( + "fmt" + "os" + "strings" + + pluginmanager "github.com/docker/cli/cli-plugins/manager" + "github.com/docker/cli/cli/command" + cliconfig "github.com/docker/cli/cli/config" + cliflags "github.com/docker/cli/cli/flags" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// setupCommonRootCommand contains the setup common to +// SetupRootCommand and SetupPluginRootCommand. +func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet, *cobra.Command) { + opts := cliflags.NewClientOptions() + flags := rootCmd.Flags() + + flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files") + opts.Common.InstallFlags(flags) + + cobra.AddTemplateFunc("add", func(a, b int) int { return a + b }) + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("hasInvalidPlugins", hasInvalidPlugins) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("invalidPlugins", invalidPlugins) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + cobra.AddTemplateFunc("vendorAndVersion", vendorAndVersion) + cobra.AddTemplateFunc("invalidPluginReason", invalidPluginReason) + cobra.AddTemplateFunc("isPlugin", isPlugin) + cobra.AddTemplateFunc("decoratedName", decoratedName) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") + rootCmd.PersistentFlags().Lookup("help").Hidden = true + + return opts, flags, helpCommand +} + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet, *cobra.Command) { + opts, flags, helpCmd := setupCommonRootCommand(rootCmd) + + rootCmd.SetVersionTemplate("Docker version {{.Version}}\n") + + return opts, flags, helpCmd +} + +// SetupPluginRootCommand sets default usage, help and error handling for a plugin root command. +func SetupPluginRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet) { + opts, flags, _ := setupCommonRootCommand(rootCmd) + return opts, flags +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/cli/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +// TopLevelCommand encapsulates a top-level cobra command (either +// docker CLI or a plugin) and global flag handling logic necessary +// for plugins. +type TopLevelCommand struct { + cmd *cobra.Command + dockerCli *command.DockerCli + opts *cliflags.ClientOptions + flags *pflag.FlagSet + args []string +} + +// NewTopLevelCommand returns a new TopLevelCommand object +func NewTopLevelCommand(cmd *cobra.Command, dockerCli *command.DockerCli, opts *cliflags.ClientOptions, flags *pflag.FlagSet) *TopLevelCommand { + return &TopLevelCommand{cmd, dockerCli, opts, flags, os.Args[1:]} +} + +// SetArgs sets the args (default os.Args[:1] used to invoke the command +func (tcmd *TopLevelCommand) SetArgs(args []string) { + tcmd.args = args + tcmd.cmd.SetArgs(args) +} + +// SetFlag sets a flag in the local flag set of the top-level command +func (tcmd *TopLevelCommand) SetFlag(name, value string) { + tcmd.cmd.Flags().Set(name, value) +} + +// HandleGlobalFlags takes care of parsing global flags defined on the +// command, it returns the underlying cobra command and the args it +// will be called with (or an error). +// +// On success the caller is responsible for calling Initialize() +// before calling `Execute` on the returned command. +func (tcmd *TopLevelCommand) HandleGlobalFlags() (*cobra.Command, []string, error) { + cmd := tcmd.cmd + + // We manually parse the global arguments and find the + // subcommand in order to properly deal with plugins. We rely + // on the root command never having any non-flag arguments. We + // create our own FlagSet so that we can configure it + // (e.g. `SetInterspersed` below) in an idempotent way. + flags := pflag.NewFlagSet(cmd.Name(), pflag.ContinueOnError) + + // We need !interspersed to ensure we stop at the first + // potential command instead of accumulating it into + // flags.Args() and then continuing on and finding other + // arguments which we try and treat as globals (when they are + // actually arguments to the subcommand). + flags.SetInterspersed(false) + + // We need the single parse to see both sets of flags. + flags.AddFlagSet(cmd.Flags()) + flags.AddFlagSet(cmd.PersistentFlags()) + // Now parse the global flags, up to (but not including) the + // first command. The result will be that all the remaining + // arguments are in `flags.Args()`. + if err := flags.Parse(tcmd.args); err != nil { + // Our FlagErrorFunc uses the cli, make sure it is initialized + if err := tcmd.Initialize(); err != nil { + return nil, nil, err + } + return nil, nil, cmd.FlagErrorFunc()(cmd, err) + } + + return cmd, flags.Args(), nil +} + +// Initialize finalises global option parsing and initializes the docker client. +func (tcmd *TopLevelCommand) Initialize(ops ...command.InitializeOpt) error { + tcmd.opts.Common.SetDefaultOptions(tcmd.flags) + return tcmd.dockerCli.Initialize(tcmd.opts, ops...) +} + +// VisitAll will traverse all commands from the root. +// This is different from the VisitAll of cobra.Command where only parents +// are checked. +func VisitAll(root *cobra.Command, fn func(*cobra.Command)) { + for _, cmd := range root.Commands() { + VisitAll(cmd, fn) + } + fn(root) +} + +// DisableFlagsInUseLine sets the DisableFlagsInUseLine flag on all +// commands within the tree rooted at cmd. +func DisableFlagsInUseLine(cmd *cobra.Command) { + VisitAll(cmd, func(ccmd *cobra.Command) { + // do not add a `[flags]` to the end of the usage line. + ccmd.DisableFlagsInUseLine = true + }) +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func isPlugin(cmd *cobra.Command) bool { + return cmd.Annotations[pluginmanager.CommandAnnotationPlugin] == "true" +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func hasInvalidPlugins(cmd *cobra.Command) bool { + return len(invalidPlugins(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if isPlugin(sub) { + continue + } + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func decoratedName(cmd *cobra.Command) string { + decoration := " " + if isPlugin(cmd) { + decoration = "*" + } + return cmd.Name() + decoration +} + +func vendorAndVersion(cmd *cobra.Command) string { + if vendor, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVendor]; ok && isPlugin(cmd) { + version := "" + if v, ok := cmd.Annotations[pluginmanager.CommandAnnotationPluginVersion]; ok && v != "" { + version = ", " + v + } + return fmt.Sprintf("(%s%s)", vendor, version) + } + return "" +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if isPlugin(sub) { + if invalidPluginReason(sub) == "" { + cmds = append(cmds, sub) + } + continue + } + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func invalidPlugins(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if !isPlugin(sub) { + continue + } + if invalidPluginReason(sub) != "" { + cmds = append(cmds, sub) + } + } + return cmds +} + +func invalidPluginReason(cmd *cobra.Command) string { + return cmd.Annotations[pluginmanager.CommandAnnotationPluginInvalid] +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}}{{- if .HasAvailableFlags}} [OPTIONS]{{end}} COMMAND{{end}} + +{{if ne .Long ""}}{{ .Long | trim }}{{ else }}{{ .Short | trim }}{{end}} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasAvailableFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad (decoratedName .) (add .NamePadding 1)}}{{.Short}}{{ if isPlugin .}} {{vendorAndVersion .}}{{ end}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if hasInvalidPlugins . }} + +Invalid Plugins: + +{{- range invalidPlugins . }} + {{rpad .Name .NamePadding }} {{invalidPluginReason .}} +{{- end}} + +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/cli/cli/cobra_test.go b/cli/cli/cobra_test.go new file mode 100644 index 00000000..5fe8f5d9 --- /dev/null +++ b/cli/cli/cobra_test.go @@ -0,0 +1,88 @@ +package cli + +import ( + "testing" + + pluginmanager "github.com/docker/cli/cli-plugins/manager" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/spf13/cobra" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestVisitAll(t *testing.T) { + root := &cobra.Command{Use: "root"} + sub1 := &cobra.Command{Use: "sub1"} + sub1sub1 := &cobra.Command{Use: "sub1sub1"} + sub1sub2 := &cobra.Command{Use: "sub1sub2"} + sub2 := &cobra.Command{Use: "sub2"} + + root.AddCommand(sub1, sub2) + sub1.AddCommand(sub1sub1, sub1sub2) + + // Take the opportunity to test DisableFlagsInUseLine too + DisableFlagsInUseLine(root) + + var visited []string + VisitAll(root, func(ccmd *cobra.Command) { + visited = append(visited, ccmd.Name()) + assert.Assert(t, ccmd.DisableFlagsInUseLine, "DisableFlagsInUseLine not set on %q", ccmd.Name()) + }) + expected := []string{"sub1sub1", "sub1sub2", "sub1", "sub2", "root"} + assert.DeepEqual(t, expected, visited) +} + +func TestVendorAndVersion(t *testing.T) { + // Non plugin. + assert.Equal(t, vendorAndVersion(&cobra.Command{Use: "test"}), "") + + // Plugins with various lengths of vendor. + for _, tc := range []struct { + vendor string + version string + expected string + }{ + {vendor: "vendor", expected: "(vendor)"}, + {vendor: "vendor", version: "testing", expected: "(vendor, testing)"}, + } { + t.Run(tc.vendor, func(t *testing.T) { + cmd := &cobra.Command{ + Use: "test", + Annotations: map[string]string{ + pluginmanager.CommandAnnotationPlugin: "true", + pluginmanager.CommandAnnotationPluginVendor: tc.vendor, + pluginmanager.CommandAnnotationPluginVersion: tc.version, + }, + } + assert.Equal(t, vendorAndVersion(cmd), tc.expected) + }) + } +} + +func TestInvalidPlugin(t *testing.T) { + root := &cobra.Command{Use: "root"} + sub1 := &cobra.Command{Use: "sub1"} + sub1sub1 := &cobra.Command{Use: "sub1sub1"} + sub1sub2 := &cobra.Command{Use: "sub1sub2"} + sub2 := &cobra.Command{Use: "sub2"} + + assert.Assert(t, is.Len(invalidPlugins(root), 0)) + + sub1.Annotations = map[string]string{ + pluginmanager.CommandAnnotationPlugin: "true", + pluginmanager.CommandAnnotationPluginInvalid: "foo", + } + root.AddCommand(sub1, sub2) + sub1.AddCommand(sub1sub1, sub1sub2) + + assert.DeepEqual(t, invalidPlugins(root), []*cobra.Command{sub1}, cmpopts.IgnoreUnexported(cobra.Command{})) +} + +func TestDecoratedName(t *testing.T) { + root := &cobra.Command{Use: "root"} + topLevelCommand := &cobra.Command{Use: "pluginTopLevelCommand"} + root.AddCommand(topLevelCommand) + assert.Equal(t, decoratedName(topLevelCommand), "pluginTopLevelCommand ") + topLevelCommand.Annotations = map[string]string{pluginmanager.CommandAnnotationPlugin: "true"} + assert.Equal(t, decoratedName(topLevelCommand), "pluginTopLevelCommand*") +} diff --git a/cli/cli/command/builder/cmd.go b/cli/cli/command/builder/cmd.go new file mode 100644 index 00000000..724f7ca9 --- /dev/null +++ b/cli/cli/command/builder/cmd.go @@ -0,0 +1,25 @@ +package builder + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" +) + +// NewBuilderCommand returns a cobra command for `builder` subcommands +func NewBuilderCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "builder", + Short: "Manage builds", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{"version": "1.31"}, + } + cmd.AddCommand( + NewPruneCommand(dockerCli), + image.NewBuildCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/builder/prune.go b/cli/cli/command/builder/prune.go new file mode 100644 index 00000000..3d4d4fc1 --- /dev/null +++ b/cli/cli/command/builder/prune.go @@ -0,0 +1,96 @@ +package builder + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + filter opts.FilterOpt + keepStorage opts.MemBytes +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune", + Short: "Remove build cache", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.39"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'unused-for=24h')") + flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache") + + return cmd +} + +const ( + normalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?` + allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?` +) + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := options.filter.Value() + pruneFilters = command.PruneFilters(dockerCli, pruneFilters) + + warning := normalWarning + if options.all { + warning = allCacheWarning + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return 0, "", nil + } + + report, err := dockerCli.Client().BuildCachePrune(context.Background(), types.BuildCachePruneOptions{ + All: options.all, + KeepStorage: options.keepStorage.Value(), + Filters: pruneFilters, + }) + if err != nil { + return 0, "", err + } + + if len(report.CachesDeleted) > 0 { + var sb strings.Builder + sb.WriteString("Deleted build cache objects:\n") + for _, id := range report.CachesDeleted { + sb.WriteString(id) + sb.WriteByte('\n') + } + output = sb.String() + } + + return report.SpaceReclaimed, output, nil +} + +// CachePrune executes a prune command for build cache +func CachePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) +} diff --git a/cli/cli/command/bundlefile/bundlefile.go b/cli/cli/command/bundlefile/bundlefile.go new file mode 100644 index 00000000..07e2c8b0 --- /dev/null +++ b/cli/cli/command/bundlefile/bundlefile.go @@ -0,0 +1,70 @@ +package bundlefile + +import ( + "encoding/json" + "io" + + "github.com/pkg/errors" +) + +// Bundlefile stores the contents of a bundlefile +type Bundlefile struct { + Version string + Services map[string]Service +} + +// Service is a service from a bundlefile +type Service struct { + Image string + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Env []string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Ports []Port `json:",omitempty"` + WorkingDir *string `json:",omitempty"` + User *string `json:",omitempty"` + Networks []string `json:",omitempty"` +} + +// Port is a port as defined in a bundlefile +type Port struct { + Protocol string + Port uint32 +} + +// LoadFile loads a bundlefile from a path to the file +func LoadFile(reader io.Reader) (*Bundlefile, error) { + bundlefile := &Bundlefile{} + + decoder := json.NewDecoder(reader) + if err := decoder.Decode(bundlefile); err != nil { + switch jsonErr := err.(type) { + case *json.SyntaxError: + return nil, errors.Errorf( + "JSON syntax error at byte %v: %s", + jsonErr.Offset, + jsonErr.Error()) + case *json.UnmarshalTypeError: + return nil, errors.Errorf( + "Unexpected type at byte %v. Expected %s but received %s.", + jsonErr.Offset, + jsonErr.Type, + jsonErr.Value) + } + return nil, err + } + + return bundlefile, nil +} + +// Print writes the contents of the bundlefile to the output writer +// as human readable json +func Print(out io.Writer, bundle *Bundlefile) error { + bytes, err := json.MarshalIndent(*bundle, "", " ") + if err != nil { + return err + } + + _, err = out.Write(bytes) + return err +} diff --git a/cli/cli/command/bundlefile/bundlefile_test.go b/cli/cli/command/bundlefile/bundlefile_test.go new file mode 100644 index 00000000..cbaa341c --- /dev/null +++ b/cli/cli/command/bundlefile/bundlefile_test.go @@ -0,0 +1,78 @@ +package bundlefile + +import ( + "bytes" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestLoadFileV01Success(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } + }`) + + bundle, err := LoadFile(reader) + assert.NilError(t, err) + assert.Check(t, is.Equal("0.1", bundle.Version)) + assert.Check(t, is.Len(bundle.Services, 2)) +} + +func TestLoadFileSyntaxError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": unquoted string + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "JSON syntax error at byte 37: invalid character 'u' looking for beginning of value") +} + +func TestLoadFileTypeError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "web": { + "Image": "redis", + "Networks": "none" + } + } + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string.") +} + +func TestPrint(t *testing.T) { + var buffer bytes.Buffer + bundle := &Bundlefile{ + Version: "0.1", + Services: map[string]Service{ + "web": { + Image: "image", + Command: []string{"echo", "something"}, + }, + }, + } + assert.Check(t, Print(&buffer, bundle)) + output := buffer.String() + assert.Check(t, is.Contains(output, "\"Image\": \"image\"")) + assert.Check(t, is.Contains(output, + `"Command": [ + "echo", + "something" + ]`)) +} diff --git a/cli/cli/command/checkpoint/client_test.go b/cli/cli/command/checkpoint/client_test.go new file mode 100644 index 00000000..c8fe190e --- /dev/null +++ b/cli/cli/command/checkpoint/client_test.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + checkpointCreateFunc func(container string, options types.CheckpointCreateOptions) error + checkpointDeleteFunc func(container string, options types.CheckpointDeleteOptions) error + checkpointListFunc func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} + +func (cli *fakeClient) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + if cli.checkpointCreateFunc != nil { + return cli.checkpointCreateFunc(container, options) + } + return nil +} + +func (cli *fakeClient) CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error { + if cli.checkpointDeleteFunc != nil { + return cli.checkpointDeleteFunc(container, options) + } + return nil +} + +func (cli *fakeClient) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + if cli.checkpointListFunc != nil { + return cli.checkpointListFunc(container, options) + } + return []types.Checkpoint{}, nil +} diff --git a/cli/cli/command/checkpoint/cmd.go b/cli/cli/command/checkpoint/cmd.go new file mode 100644 index 00000000..2a698e74 --- /dev/null +++ b/cli/cli/command/checkpoint/cmd.go @@ -0,0 +1,28 @@ +package checkpoint + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) +func NewCheckpointCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "checkpoint", + Short: "Manage checkpoints", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "experimental": "", + "ostype": "linux", + "version": "1.25", + }, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/checkpoint/create.go b/cli/cli/command/checkpoint/create.go new file mode 100644 index 00000000..45b4bd63 --- /dev/null +++ b/cli/cli/command/checkpoint/create.go @@ -0,0 +1,57 @@ +package checkpoint + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type createOptions struct { + container string + checkpoint string + checkpointDir string + leaveRunning bool +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + var opts createOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONTAINER CHECKPOINT", + Short: "Create a checkpoint from a running container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.checkpoint = args[1] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runCreate(dockerCli command.Cli, opts createOptions) error { + client := dockerCli.Client() + + checkpointOpts := types.CheckpointCreateOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + Exit: !opts.leaveRunning, + } + + err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) + return nil +} diff --git a/cli/cli/command/checkpoint/create_test.go b/cli/cli/command/checkpoint/create_test.go new file mode 100644 index 00000000..70c6aad7 --- /dev/null +++ b/cli/cli/command/checkpoint/create_test.go @@ -0,0 +1,72 @@ +package checkpoint + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCheckpointCreateErrors(t *testing.T) { + testCases := []struct { + args []string + checkpointCreateFunc func(container string, options types.CheckpointCreateOptions) error + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"foo", "bar"}, + checkpointCreateFunc: func(container string, options types.CheckpointCreateOptions) error { + return errors.Errorf("error creating checkpoint for container foo") + }, + expectedError: "error creating checkpoint for container foo", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + checkpointCreateFunc: tc.checkpointCreateFunc, + }) + cmd := newCreateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCheckpointCreateWithOptions(t *testing.T) { + var containerID, checkpointID, checkpointDir string + var exit bool + cli := test.NewFakeCli(&fakeClient{ + checkpointCreateFunc: func(container string, options types.CheckpointCreateOptions) error { + containerID = container + checkpointID = options.CheckpointID + checkpointDir = options.CheckpointDir + exit = options.Exit + return nil + }, + }) + cmd := newCreateCommand(cli) + checkpoint := "checkpoint-bar" + cmd.SetArgs([]string{"container-foo", checkpoint}) + cmd.Flags().Set("leave-running", "true") + cmd.Flags().Set("checkpoint-dir", "/dir/foo") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("container-foo", containerID)) + assert.Check(t, is.Equal(checkpoint, checkpointID)) + assert.Check(t, is.Equal("/dir/foo", checkpointDir)) + assert.Check(t, is.Equal(false, exit)) + assert.Check(t, is.Equal(checkpoint, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/checkpoint/formatter.go b/cli/cli/command/checkpoint/formatter.go new file mode 100644 index 00000000..1ad3aafd --- /dev/null +++ b/cli/cli/command/checkpoint/formatter.go @@ -0,0 +1,55 @@ +package checkpoint + +import ( + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" +) + +const ( + defaultCheckpointFormat = "table {{.Name}}" + + checkpointNameHeader = "CHECKPOINT NAME" +) + +// NewFormat returns a format for use with a checkpoint Context +func NewFormat(source string) formatter.Format { + switch source { + case formatter.TableFormatKey: + return defaultCheckpointFormat + } + return formatter.Format(source) +} + +// FormatWrite writes formatted checkpoints using the Context +func FormatWrite(ctx formatter.Context, checkpoints []types.Checkpoint) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, checkpoint := range checkpoints { + if err := format(&checkpointContext{c: checkpoint}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newCheckpointContext(), render) +} + +type checkpointContext struct { + formatter.HeaderContext + c types.Checkpoint +} + +func newCheckpointContext() *checkpointContext { + cpCtx := checkpointContext{} + cpCtx.Header = formatter.SubHeaderContext{ + "Name": checkpointNameHeader, + } + return &cpCtx +} + +func (c *checkpointContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *checkpointContext) Name() string { + return c.c.Name +} diff --git a/cli/cli/command/checkpoint/formatter_test.go b/cli/cli/command/checkpoint/formatter_test.go new file mode 100644 index 00000000..7256b2fb --- /dev/null +++ b/cli/cli/command/checkpoint/formatter_test.go @@ -0,0 +1,53 @@ +package checkpoint + +import ( + "bytes" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "gotest.tools/assert" +) + +func TestCheckpointContextFormatWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{Format: NewFormat(defaultCheckpointFormat)}, + `CHECKPOINT NAME +checkpoint-1 +checkpoint-2 +checkpoint-3 +`, + }, + { + formatter.Context{Format: NewFormat("{{.Name}}")}, + `checkpoint-1 +checkpoint-2 +checkpoint-3 +`, + }, + { + formatter.Context{Format: NewFormat("{{.Name}}:")}, + `checkpoint-1: +checkpoint-2: +checkpoint-3: +`, + }, + } + + checkpoints := []types.Checkpoint{ + {Name: "checkpoint-1"}, + {Name: "checkpoint-2"}, + {Name: "checkpoint-3"}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + err := FormatWrite(testcase.context, checkpoints) + assert.NilError(t, err) + assert.Equal(t, out.String(), testcase.expected) + } +} diff --git a/cli/cli/command/checkpoint/list.go b/cli/cli/command/checkpoint/list.go new file mode 100644 index 00000000..fda23e29 --- /dev/null +++ b/cli/cli/command/checkpoint/list.go @@ -0,0 +1,54 @@ +package checkpoint + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type listOptions struct { + checkpointDir string +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS] CONTAINER", + Aliases: []string{"list"}, + Short: "List checkpoints for a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, args[0], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd + +} + +func runList(dockerCli command.Cli, container string, opts listOptions) error { + client := dockerCli.Client() + + listOpts := types.CheckpointListOptions{ + CheckpointDir: opts.checkpointDir, + } + + checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) + if err != nil { + return err + } + + cpCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(formatter.TableFormatKey), + } + return FormatWrite(cpCtx, checkpoints) +} diff --git a/cli/cli/command/checkpoint/list_test.go b/cli/cli/command/checkpoint/list_test.go new file mode 100644 index 00000000..986d3ee4 --- /dev/null +++ b/cli/cli/command/checkpoint/list_test.go @@ -0,0 +1,67 @@ +package checkpoint + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestCheckpointListErrors(t *testing.T) { + testCases := []struct { + args []string + checkpointListFunc func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) + expectedError string + }{ + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"foo"}, + checkpointListFunc: func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + return []types.Checkpoint{}, errors.Errorf("error getting checkpoints for container foo") + }, + expectedError: "error getting checkpoints for container foo", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + checkpointListFunc: tc.checkpointListFunc, + }) + cmd := newListCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCheckpointListWithOptions(t *testing.T) { + var containerID, checkpointDir string + cli := test.NewFakeCli(&fakeClient{ + checkpointListFunc: func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + containerID = container + checkpointDir = options.CheckpointDir + return []types.Checkpoint{ + {Name: "checkpoint-foo"}, + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.SetArgs([]string{"container-foo"}) + cmd.Flags().Set("checkpoint-dir", "/dir/foo") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("container-foo", containerID)) + assert.Check(t, is.Equal("/dir/foo", checkpointDir)) + golden.Assert(t, cli.OutBuffer().String(), "checkpoint-list-with-options.golden") +} diff --git a/cli/cli/command/checkpoint/remove.go b/cli/cli/command/checkpoint/remove.go new file mode 100644 index 00000000..3f894421 --- /dev/null +++ b/cli/cli/command/checkpoint/remove.go @@ -0,0 +1,44 @@ +package checkpoint + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type removeOptions struct { + checkpointDir string +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER CHECKPOINT", + Aliases: []string{"remove"}, + Short: "Remove a checkpoint", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args[0], args[1], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runRemove(dockerCli command.Cli, container string, checkpoint string, opts removeOptions) error { + client := dockerCli.Client() + + removeOpts := types.CheckpointDeleteOptions{ + CheckpointID: checkpoint, + CheckpointDir: opts.checkpointDir, + } + + return client.CheckpointDelete(context.Background(), container, removeOpts) +} diff --git a/cli/cli/command/checkpoint/remove_test.go b/cli/cli/command/checkpoint/remove_test.go new file mode 100644 index 00000000..d1a9ac4b --- /dev/null +++ b/cli/cli/command/checkpoint/remove_test.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCheckpointRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + checkpointDeleteFunc func(container string, options types.CheckpointDeleteOptions) error + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"foo", "bar"}, + checkpointDeleteFunc: func(container string, options types.CheckpointDeleteOptions) error { + return errors.Errorf("error deleting checkpoint") + }, + expectedError: "error deleting checkpoint", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + checkpointDeleteFunc: tc.checkpointDeleteFunc, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCheckpointRemoveWithOptions(t *testing.T) { + var containerID, checkpointID, checkpointDir string + cli := test.NewFakeCli(&fakeClient{ + checkpointDeleteFunc: func(container string, options types.CheckpointDeleteOptions) error { + containerID = container + checkpointID = options.CheckpointID + checkpointDir = options.CheckpointDir + return nil + }, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs([]string{"container-foo", "checkpoint-bar"}) + cmd.Flags().Set("checkpoint-dir", "/dir/foo") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("container-foo", containerID)) + assert.Check(t, is.Equal("checkpoint-bar", checkpointID)) + assert.Check(t, is.Equal("/dir/foo", checkpointDir)) +} diff --git a/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden b/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden new file mode 100644 index 00000000..f53f016a --- /dev/null +++ b/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden @@ -0,0 +1,2 @@ +CHECKPOINT NAME +checkpoint-foo diff --git a/cli/cli/command/cli.go b/cli/cli/command/cli.go new file mode 100644 index 00000000..186854f8 --- /dev/null +++ b/cli/cli/command/cli.go @@ -0,0 +1,547 @@ +package command + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + + "github.com/docker/cli/cli/config" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + dcontext "github.com/docker/cli/cli/context" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" + "github.com/docker/cli/cli/debug" + cliflags "github.com/docker/cli/cli/flags" + manifeststore "github.com/docker/cli/cli/manifest/store" + registryclient "github.com/docker/cli/cli/registry/client" + "github.com/docker/cli/cli/streams" + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/cli/version" + "github.com/docker/cli/internal/containerizedengine" + dopts "github.com/docker/cli/opts" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary" + notaryclient "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *streams.In + Out() *streams.Out + Err() io.Writer +} + +// Cli represents the docker command line client. +type Cli interface { + Client() client.APIClient + Out() *streams.Out + Err() io.Writer + In() *streams.In + SetIn(in *streams.In) + Apply(ops ...DockerCliOption) error + ConfigFile() *configfile.ConfigFile + ServerInfo() ServerInfo + ClientInfo() ClientInfo + NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) + DefaultVersion() string + ManifestStore() manifeststore.Store + RegistryClient(bool) registryclient.RegistryClient + ContentTrustEnabled() bool + NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) + ContextStore() store.Store + CurrentContext() string + StackOrchestrator(flagValue string) (Orchestrator, error) + DockerEndpoint() docker.Endpoint +} + +// DockerCli is an instance the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *streams.In + out *streams.Out + err io.Writer + client client.APIClient + serverInfo ServerInfo + clientInfo ClientInfo + contentTrust bool + newContainerizeClient func(string) (clitypes.ContainerizedClient, error) + contextStore store.Store + currentContext string + dockerEndpoint docker.Endpoint + contextStoreConfig store.Config +} + +// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.clientInfo.DefaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *streams.Out { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// SetIn sets the reader used for stdin +func (cli *DockerCli) SetIn(in *streams.In) { + cli.in = in +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *streams.In { + return cli.in +} + +// ShowHelp shows the command help. +func ShowHelp(err io.Writer) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + cmd.SetOutput(err) + cmd.HelpFunc()(cmd, args) + return nil + } +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// ServerInfo returns the server version details for the host this client is +// connected to +func (cli *DockerCli) ServerInfo() ServerInfo { + return cli.serverInfo +} + +// ClientInfo returns the client details for the cli +func (cli *DockerCli) ClientInfo() ClientInfo { + return cli.clientInfo +} + +// ContentTrustEnabled returns whether content trust has been enabled by an +// environment variable. +func (cli *DockerCli) ContentTrustEnabled() bool { + return cli.contentTrust +} + +// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting +// or otherwise the client-side DOCKER_BUILDKIT environment variable +func BuildKitEnabled(si ServerInfo) (bool, error) { + buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit + if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" { + var err error + buildkitEnabled, err = strconv.ParseBool(buildkitEnv) + if err != nil { + return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") + } + } + return buildkitEnabled, nil +} + +// ManifestStore returns a store for local manifests +func (cli *DockerCli) ManifestStore() manifeststore.Store { + // TODO: support override default location from config file + return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests")) +} + +// RegistryClient returns a client for communicating with a Docker distribution +// registry +func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient { + resolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return ResolveAuthConfig(ctx, cli, index) + } + return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure) +} + +// InitializeOpt is the type of the functional options passed to DockerCli.Initialize +type InitializeOpt func(dockerCli *DockerCli) error + +// WithInitializeClient is passed to DockerCli.Initialize by callers who wish to set a particular API Client for use by the CLI. +func WithInitializeClient(makeClient func(dockerCli *DockerCli) (client.APIClient, error)) InitializeOpt { + return func(dockerCli *DockerCli) error { + var err error + dockerCli.client, err = makeClient(dockerCli) + return err + } +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...InitializeOpt) error { + var err error + + for _, o := range ops { + if err := o(cli); err != nil { + return err + } + } + cliflags.SetLogLevel(opts.Common.LogLevel) + + if opts.ConfigDir != "" { + cliconfig.SetDir(opts.ConfigDir) + } + + if opts.Common.Debug { + debug.Enable() + } + + cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err) + + baseContextStore := store.New(cliconfig.ContextStoreDir(), cli.contextStoreConfig) + cli.contextStore = &ContextStoreWithDefault{ + Store: baseContextStore, + Resolver: func() (*DefaultContext, error) { + return ResolveDefaultContext(opts.Common, cli.ConfigFile(), cli.contextStoreConfig, cli.Err()) + }, + } + cli.currentContext, err = resolveContextName(opts.Common, cli.configFile, cli.contextStore) + if err != nil { + return err + } + cli.dockerEndpoint, err = resolveDockerEndpoint(cli.contextStore, cli.currentContext) + if err != nil { + return errors.Wrap(err, "unable to resolve docker endpoint") + } + + if cli.client == nil { + cli.client, err = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile) + if tlsconfig.IsErrEncryptedKey(err) { + passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) + newClient := func(password string) (client.APIClient, error) { + cli.dockerEndpoint.TLSPassword = password + return newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile) + } + cli.client, err = getClientWithPassword(passRetriever, newClient) + } + if err != nil { + return err + } + } + var experimentalValue string + // Environment variable always overrides configuration + if experimentalValue = os.Getenv("DOCKER_CLI_EXPERIMENTAL"); experimentalValue == "" { + experimentalValue = cli.configFile.Experimental + } + hasExperimental, err := isEnabled(experimentalValue) + if err != nil { + return errors.Wrap(err, "Experimental field") + } + cli.clientInfo = ClientInfo{ + DefaultVersion: cli.client.ClientVersion(), + HasExperimental: hasExperimental, + } + cli.initializeFromClient() + return nil +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + storeConfig := DefaultContextStoreConfig() + store := &ContextStoreWithDefault{ + Store: store.New(cliconfig.ContextStoreDir(), storeConfig), + Resolver: func() (*DefaultContext, error) { + return ResolveDefaultContext(opts, configFile, storeConfig, ioutil.Discard) + }, + } + contextName, err := resolveContextName(opts, configFile, store) + if err != nil { + return nil, err + } + endpoint, err := resolveDockerEndpoint(store, contextName) + if err != nil { + return nil, errors.Wrap(err, "unable to resolve docker endpoint") + } + return newAPIClientFromEndpoint(endpoint, configFile) +} + +func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigFile) (client.APIClient, error) { + clientOpts, err := ep.ClientOpts() + if err != nil { + return nil, err + } + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders)) + return client.NewClientWithOpts(clientOpts...) +} + +func resolveDockerEndpoint(s store.Reader, contextName string) (docker.Endpoint, error) { + ctxMeta, err := s.GetMetadata(contextName) + if err != nil { + return docker.Endpoint{}, err + } + epMeta, err := docker.EndpointFromContext(ctxMeta) + if err != nil { + return docker.Endpoint{}, err + } + return docker.WithTLSData(s, contextName, epMeta) +} + +// Resolve the Docker endpoint for the default context (based on config, env vars and CLI flags) +func resolveDefaultDockerEndpoint(opts *cliflags.CommonOptions) (docker.Endpoint, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return docker.Endpoint{}, err + } + + var ( + skipTLSVerify bool + tlsData *dcontext.TLSData + ) + + if opts.TLSOptions != nil { + skipTLSVerify = opts.TLSOptions.InsecureSkipVerify + tlsData, err = dcontext.TLSDataFromFiles(opts.TLSOptions.CAFile, opts.TLSOptions.CertFile, opts.TLSOptions.KeyFile) + if err != nil { + return docker.Endpoint{}, err + } + } + + return docker.Endpoint{ + EndpointMeta: docker.EndpointMeta{ + Host: host, + SkipTLSVerify: skipTLSVerify, + }, + TLSData: tlsData, + }, nil +} + +func isEnabled(value string) (bool, error) { + switch value { + case "enabled": + return true, nil + case "", "disabled": + return false, nil + default: + return false, errors.Errorf("%q is not valid, should be either enabled or disabled", value) + } +} + +func (cli *DockerCli) initializeFromClient() { + ping, err := cli.client.Ping(context.Background()) + if err != nil { + // Default to true if we fail to connect to daemon + cli.serverInfo = ServerInfo{HasExperimental: true} + + if ping.APIVersion != "" { + cli.client.NegotiateAPIVersionPing(ping) + } + return + } + + cli.serverInfo = ServerInfo{ + HasExperimental: ping.Experimental, + OSType: ping.OSType, + BuildkitVersion: ping.BuilderVersion, + } + cli.client.NegotiateAPIVersionPing(ping) +} + +func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) { + for attempts := 0; ; attempts++ { + passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts) + if giveup || err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase") + } + + apiclient, err := newClient(passwd) + if !tlsconfig.IsErrEncryptedKey(err) { + return apiclient, err + } + } +} + +// NotaryClient provides a Notary Repository to interact with signed metadata for an image +func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) { + return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...) +} + +// NewContainerizedEngineClient returns a containerized engine client +func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) { + return cli.newContainerizeClient(sockPath) +} + +// ContextStore returns the ContextStore +func (cli *DockerCli) ContextStore() store.Store { + return cli.contextStore +} + +// CurrentContext returns the current context name +func (cli *DockerCli) CurrentContext() string { + return cli.currentContext +} + +// StackOrchestrator resolves which stack orchestrator is in use +func (cli *DockerCli) StackOrchestrator(flagValue string) (Orchestrator, error) { + currentContext := cli.CurrentContext() + ctxRaw, err := cli.ContextStore().GetMetadata(currentContext) + if store.IsErrContextDoesNotExist(err) { + // case where the currentContext has been removed (CLI behavior is to fallback to using DOCKER_HOST based resolution) + return GetStackOrchestrator(flagValue, "", cli.ConfigFile().StackOrchestrator, cli.Err()) + } + if err != nil { + return "", err + } + ctxMeta, err := GetDockerContext(ctxRaw) + if err != nil { + return "", err + } + ctxOrchestrator := string(ctxMeta.StackOrchestrator) + return GetStackOrchestrator(flagValue, ctxOrchestrator, cli.ConfigFile().StackOrchestrator, cli.Err()) +} + +// DockerEndpoint returns the current docker endpoint +func (cli *DockerCli) DockerEndpoint() docker.Endpoint { + return cli.dockerEndpoint +} + +// Apply all the operation on the cli +func (cli *DockerCli) Apply(ops ...DockerCliOption) error { + for _, op := range ops { + if err := op(cli); err != nil { + return err + } + } + return nil +} + +// ServerInfo stores details about the supported features and platform of the +// server +type ServerInfo struct { + HasExperimental bool + OSType string + BuildkitVersion types.BuilderVersion +} + +// ClientInfo stores details about the supported features of the client +type ClientInfo struct { + HasExperimental bool + DefaultVersion string +} + +// NewDockerCli returns a DockerCli instance with all operators applied on it. +// It applies by default the standard streams, the content trust from +// environment and the default containerized client constructor operations. +func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) { + cli := &DockerCli{} + defaultOps := []DockerCliOption{ + WithContentTrustFromEnv(), + WithContainerizedClient(containerizedengine.NewClient), + } + cli.contextStoreConfig = DefaultContextStoreConfig() + ops = append(defaultOps, ops...) + if err := cli.Apply(ops...); err != nil { + return nil, err + } + if cli.out == nil || cli.in == nil || cli.err == nil { + stdin, stdout, stderr := term.StdStreams() + if cli.in == nil { + cli.in = streams.NewIn(stdin) + } + if cli.out == nil { + cli.out = streams.NewOut(stdout) + } + if cli.err == nil { + cli.err = stderr + } + } + return cli, nil +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) { + var host string + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + return dopts.ParseHost(tlsOptions != nil, host) +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + version.Version + " (" + runtime.GOOS + ")" +} + +// resolveContextName resolves the current context name with the following rules: +// - setting both --context and --host flags is ambiguous +// - if --context is set, use this value +// - if --host flag or DOCKER_HOST is set, fallbacks to use the same logic as before context-store was added +// for backward compatibility with existing scripts +// - if DOCKER_CONTEXT is set, use this value +// - if Config file has a globally set "CurrentContext", use this value +// - fallbacks to default HOST, uses TLS config from flags/env vars +func resolveContextName(opts *cliflags.CommonOptions, config *configfile.ConfigFile, contextstore store.Reader) (string, error) { + if opts.Context != "" && len(opts.Hosts) > 0 { + return "", errors.New("Conflicting options: either specify --host or --context, not both") + } + if opts.Context != "" { + return opts.Context, nil + } + if len(opts.Hosts) > 0 { + return DefaultContextName, nil + } + if _, present := os.LookupEnv("DOCKER_HOST"); present { + return DefaultContextName, nil + } + if ctxName, ok := os.LookupEnv("DOCKER_CONTEXT"); ok { + return ctxName, nil + } + if config != nil && config.CurrentContext != "" { + _, err := contextstore.GetMetadata(config.CurrentContext) + if store.IsErrContextDoesNotExist(err) { + return "", errors.Errorf("Current context %q is not found on the file system, please check your config file at %s", config.CurrentContext, config.Filename) + } + return config.CurrentContext, err + } + return DefaultContextName, nil +} + +var defaultStoreEndpoints = []store.NamedTypeGetter{ + store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }), +} + +// RegisterDefaultStoreEndpoints registers a new named endpoint +// metadata type with the default context store config, so that +// endpoint will be supported by stores using the config returned by +// DefaultContextStoreConfig. +func RegisterDefaultStoreEndpoints(ep ...store.NamedTypeGetter) { + defaultStoreEndpoints = append(defaultStoreEndpoints, ep...) +} + +// DefaultContextStoreConfig returns a new store.Config with the default set of endpoints configured. +func DefaultContextStoreConfig() store.Config { + return store.NewConfig( + func() interface{} { return &DockerContext{} }, + defaultStoreEndpoints..., + ) +} diff --git a/cli/cli/command/cli_options.go b/cli/cli/command/cli_options.go new file mode 100644 index 00000000..607cd220 --- /dev/null +++ b/cli/cli/command/cli_options.go @@ -0,0 +1,105 @@ +package command + +import ( + "fmt" + "io" + "os" + "strconv" + + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" + "github.com/docker/cli/cli/streams" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/pkg/term" +) + +// DockerCliOption applies a modification on a DockerCli. +type DockerCliOption func(cli *DockerCli) error + +// WithStandardStreams sets a cli in, out and err streams with the standard streams. +func WithStandardStreams() DockerCliOption { + return func(cli *DockerCli) error { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + cli.in = streams.NewIn(stdin) + cli.out = streams.NewOut(stdout) + cli.err = stderr + return nil + } +} + +// WithCombinedStreams uses the same stream for the output and error streams. +func WithCombinedStreams(combined io.Writer) DockerCliOption { + return func(cli *DockerCli) error { + cli.out = streams.NewOut(combined) + cli.err = combined + return nil + } +} + +// WithInputStream sets a cli input stream. +func WithInputStream(in io.ReadCloser) DockerCliOption { + return func(cli *DockerCli) error { + cli.in = streams.NewIn(in) + return nil + } +} + +// WithOutputStream sets a cli output stream. +func WithOutputStream(out io.Writer) DockerCliOption { + return func(cli *DockerCli) error { + cli.out = streams.NewOut(out) + return nil + } +} + +// WithErrorStream sets a cli error stream. +func WithErrorStream(err io.Writer) DockerCliOption { + return func(cli *DockerCli) error { + cli.err = err + return nil + } +} + +// WithContentTrustFromEnv enables content trust on a cli from environment variable DOCKER_CONTENT_TRUST value. +func WithContentTrustFromEnv() DockerCliOption { + return func(cli *DockerCli) error { + cli.contentTrust = false + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + cli.contentTrust = true + } + } + return nil + } +} + +// WithContentTrust enables content trust on a cli. +func WithContentTrust(enabled bool) DockerCliOption { + return func(cli *DockerCli) error { + cli.contentTrust = enabled + return nil + } +} + +// WithContainerizedClient sets the containerized client constructor on a cli. +func WithContainerizedClient(containerizedFn func(string) (clitypes.ContainerizedClient, error)) DockerCliOption { + return func(cli *DockerCli) error { + cli.newContainerizeClient = containerizedFn + return nil + } +} + +// WithContextEndpointType add support for an additional typed endpoint in the context store +// Plugins should use this to store additional endpoints configuration in the context store +func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption { + return func(cli *DockerCli) error { + switch endpointName { + case docker.DockerEndpoint: + return fmt.Errorf("cannot change %q endpoint type", endpointName) + } + cli.contextStoreConfig.SetEndpoint(endpointName, endpointType) + return nil + } +} diff --git a/cli/cli/command/cli_options_test.go b/cli/cli/command/cli_options_test.go new file mode 100644 index 00000000..10dcad8b --- /dev/null +++ b/cli/cli/command/cli_options_test.go @@ -0,0 +1,37 @@ +package command + +import ( + "os" + "testing" + + "gotest.tools/assert" +) + +func contentTrustEnabled(t *testing.T) bool { + var cli DockerCli + assert.NilError(t, WithContentTrustFromEnv()(&cli)) + return cli.contentTrust +} + +// NB: Do not t.Parallel() this test -- it messes with the process environment. +func TestWithContentTrustFromEnv(t *testing.T) { + envvar := "DOCKER_CONTENT_TRUST" + if orig, ok := os.LookupEnv(envvar); ok { + defer func() { + os.Setenv(envvar, orig) + }() + } else { + defer func() { + os.Unsetenv(envvar) + }() + } + + os.Setenv(envvar, "true") + assert.Assert(t, contentTrustEnabled(t)) + os.Setenv(envvar, "false") + assert.Assert(t, !contentTrustEnabled(t)) + os.Setenv(envvar, "invalid") + assert.Assert(t, contentTrustEnabled(t)) + os.Unsetenv(envvar) + assert.Assert(t, !contentTrustEnabled(t)) +} diff --git a/cli/cli/command/cli_test.go b/cli/cli/command/cli_test.go new file mode 100644 index 00000000..34d0c219 --- /dev/null +++ b/cli/cli/command/cli_test.go @@ -0,0 +1,325 @@ +package command + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "runtime" + "testing" + + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/flags" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/env" + "gotest.tools/fs" +) + +func TestNewAPIClientFromFlags(t *testing.T) { + host := "unix://path" + if runtime.GOOS == "windows" { + host = "npipe://./" + } + opts := &flags.CommonOptions{Hosts: []string{host}} + configFile := &configfile.ConfigFile{ + HTTPHeaders: map[string]string{ + "My-Header": "Custom-Value", + }, + } + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + assert.Check(t, is.Equal(host, apiclient.DaemonHost())) + + expectedHeaders := map[string]string{ + "My-Header": "Custom-Value", + "User-Agent": UserAgent(), + } + assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders())) + assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion())) +} + +func TestNewAPIClientFromFlagsForDefaultSchema(t *testing.T) { + host := ":2375" + opts := &flags.CommonOptions{Hosts: []string{host}} + configFile := &configfile.ConfigFile{ + HTTPHeaders: map[string]string{ + "My-Header": "Custom-Value", + }, + } + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + assert.Check(t, is.Equal("tcp://localhost"+host, apiclient.DaemonHost())) + + expectedHeaders := map[string]string{ + "My-Header": "Custom-Value", + "User-Agent": UserAgent(), + } + assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders())) + assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion())) +} + +func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) { + customVersion := "v3.3.3" + defer env.Patch(t, "DOCKER_API_VERSION", customVersion)() + defer env.Patch(t, "DOCKER_HOST", ":2375")() + + opts := &flags.CommonOptions{} + configFile := &configfile.ConfigFile{} + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion())) +} + +func TestNewAPIClientFromFlagsWithHttpProxyEnv(t *testing.T) { + defer env.Patch(t, "HTTP_PROXY", "http://proxy.acme.com:1234")() + defer env.Patch(t, "DOCKER_HOST", "tcp://docker.acme.com:2376")() + + opts := &flags.CommonOptions{} + configFile := &configfile.ConfigFile{} + apiclient, err := NewAPIClientFromFlags(opts, configFile) + assert.NilError(t, err) + transport, ok := apiclient.HTTPClient().Transport.(*http.Transport) + assert.Assert(t, ok) + assert.Assert(t, transport.Proxy != nil) + request, err := http.NewRequest(http.MethodGet, "tcp://docker.acme.com:2376", nil) + assert.NilError(t, err) + url, err := transport.Proxy(request) + assert.NilError(t, err) + assert.Check(t, is.Equal("http://proxy.acme.com:1234", url.String())) +} + +type fakeClient struct { + client.Client + pingFunc func() (types.Ping, error) + version string + negotiated bool +} + +func (c *fakeClient) Ping(_ context.Context) (types.Ping, error) { + return c.pingFunc() +} + +func (c *fakeClient) ClientVersion() string { + return c.version +} + +func (c *fakeClient) NegotiateAPIVersionPing(types.Ping) { + c.negotiated = true +} + +func TestInitializeFromClient(t *testing.T) { + defaultVersion := "v1.55" + + var testcases = []struct { + doc string + pingFunc func() (types.Ping, error) + expectedServer ServerInfo + negotiated bool + }{ + { + doc: "successful ping", + pingFunc: func() (types.Ping, error) { + return types.Ping{Experimental: true, OSType: "linux", APIVersion: "v1.30"}, nil + }, + expectedServer: ServerInfo{HasExperimental: true, OSType: "linux"}, + negotiated: true, + }, + { + doc: "failed ping, no API version", + pingFunc: func() (types.Ping, error) { + return types.Ping{}, errors.New("failed") + }, + expectedServer: ServerInfo{HasExperimental: true}, + }, + { + doc: "failed ping, with API version", + pingFunc: func() (types.Ping, error) { + return types.Ping{APIVersion: "v1.33"}, errors.New("failed") + }, + expectedServer: ServerInfo{HasExperimental: true}, + negotiated: true, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + apiclient := &fakeClient{ + pingFunc: testcase.pingFunc, + version: defaultVersion, + } + + cli := &DockerCli{client: apiclient} + cli.initializeFromClient() + assert.Check(t, is.DeepEqual(testcase.expectedServer, cli.serverInfo)) + assert.Check(t, is.Equal(testcase.negotiated, apiclient.negotiated)) + }) + } +} + +func TestExperimentalCLI(t *testing.T) { + defaultVersion := "v1.55" + + var testcases = []struct { + doc string + configfile string + expectedExperimentalCLI bool + }{ + { + doc: "default", + configfile: `{}`, + expectedExperimentalCLI: false, + }, + { + doc: "experimental", + configfile: `{ + "experimental": "enabled" +}`, + expectedExperimentalCLI: true, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + dir := fs.NewDir(t, testcase.doc, fs.WithFile("config.json", testcase.configfile)) + defer dir.Remove() + apiclient := &fakeClient{ + version: defaultVersion, + pingFunc: func() (types.Ping, error) { + return types.Ping{Experimental: true, OSType: "linux", APIVersion: defaultVersion}, nil + }, + } + + cli := &DockerCli{client: apiclient, err: os.Stderr} + cliconfig.SetDir(dir.Path()) + err := cli.Initialize(flags.NewClientOptions()) + assert.NilError(t, err) + assert.Check(t, is.Equal(testcase.expectedExperimentalCLI, cli.ClientInfo().HasExperimental)) + }) + } +} + +func TestGetClientWithPassword(t *testing.T) { + expected := "password" + + var testcases = []struct { + doc string + password string + retrieverErr error + retrieverGiveup bool + newClientErr error + expectedErr string + }{ + { + doc: "successful connect", + password: expected, + }, + { + doc: "password retriever exhausted", + retrieverGiveup: true, + retrieverErr: errors.New("failed"), + expectedErr: "private key is encrypted, but could not get passphrase", + }, + { + doc: "password retriever error", + retrieverErr: errors.New("failed"), + expectedErr: "failed", + }, + { + doc: "newClient error", + newClientErr: errors.New("failed to connect"), + expectedErr: "failed to connect", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + passRetriever := func(_, _ string, _ bool, attempts int) (passphrase string, giveup bool, err error) { + // Always return an invalid pass first to test iteration + switch attempts { + case 0: + return "something else", false, nil + default: + return testcase.password, testcase.retrieverGiveup, testcase.retrieverErr + } + } + + newClient := func(currentPassword string) (client.APIClient, error) { + if testcase.newClientErr != nil { + return nil, testcase.newClientErr + } + if currentPassword == expected { + return &client.Client{}, nil + } + return &client.Client{}, x509.IncorrectPasswordError + } + + _, err := getClientWithPassword(passRetriever, newClient) + if testcase.expectedErr != "" { + assert.ErrorContains(t, err, testcase.expectedErr) + return + } + + assert.NilError(t, err) + }) + } +} + +func TestNewDockerCliAndOperators(t *testing.T) { + // Test default operations and also overriding default ones + cli, err := NewDockerCli( + WithContentTrust(true), + WithContainerizedClient(func(string) (clitypes.ContainerizedClient, error) { return nil, nil }), + ) + assert.NilError(t, err) + // Check streams are initialized + assert.Check(t, cli.In() != nil) + assert.Check(t, cli.Out() != nil) + assert.Check(t, cli.Err() != nil) + assert.Equal(t, cli.ContentTrustEnabled(), true) + client, err := cli.NewContainerizedEngineClient("") + assert.NilError(t, err) + assert.Equal(t, client, nil) + + // Apply can modify a dockerCli after construction + inbuf := bytes.NewBuffer([]byte("input")) + outbuf := bytes.NewBuffer(nil) + errbuf := bytes.NewBuffer(nil) + cli.Apply( + WithInputStream(ioutil.NopCloser(inbuf)), + WithOutputStream(outbuf), + WithErrorStream(errbuf), + ) + // Check input stream + inputStream, err := ioutil.ReadAll(cli.In()) + assert.NilError(t, err) + assert.Equal(t, string(inputStream), "input") + // Check output stream + fmt.Fprintf(cli.Out(), "output") + outputStream, err := ioutil.ReadAll(outbuf) + assert.NilError(t, err) + assert.Equal(t, string(outputStream), "output") + // Check error stream + fmt.Fprintf(cli.Err(), "error") + errStream, err := ioutil.ReadAll(errbuf) + assert.NilError(t, err) + assert.Equal(t, string(errStream), "error") +} + +func TestInitializeShouldAlwaysCreateTheContextStore(t *testing.T) { + cli, err := NewDockerCli() + assert.NilError(t, err) + assert.NilError(t, cli.Initialize(flags.NewClientOptions(), WithInitializeClient(func(cli *DockerCli) (client.APIClient, error) { + return client.NewClientWithOpts() + }))) + assert.Check(t, cli.ContextStore() != nil) +} diff --git a/cli/cli/command/commands/commands.go b/cli/cli/command/commands/commands.go new file mode 100644 index 00000000..c72b386d --- /dev/null +++ b/cli/cli/command/commands/commands.go @@ -0,0 +1,146 @@ +package commands + +import ( + "os" + "runtime" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/builder" + "github.com/docker/cli/cli/command/checkpoint" + "github.com/docker/cli/cli/command/config" + "github.com/docker/cli/cli/command/container" + "github.com/docker/cli/cli/command/context" + "github.com/docker/cli/cli/command/engine" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/manifest" + "github.com/docker/cli/cli/command/network" + "github.com/docker/cli/cli/command/node" + "github.com/docker/cli/cli/command/plugin" + "github.com/docker/cli/cli/command/registry" + "github.com/docker/cli/cli/command/secret" + "github.com/docker/cli/cli/command/service" + "github.com/docker/cli/cli/command/stack" + "github.com/docker/cli/cli/command/swarm" + "github.com/docker/cli/cli/command/system" + "github.com/docker/cli/cli/command/trust" + "github.com/docker/cli/cli/command/volume" + "github.com/spf13/cobra" +) + +// AddCommands adds all the commands from cli/command to the root command +func AddCommands(cmd *cobra.Command, dockerCli command.Cli) { + cmd.AddCommand( + // checkpoint + checkpoint.NewCheckpointCommand(dockerCli), + + // config + config.NewConfigCommand(dockerCli), + + // container + container.NewContainerCommand(dockerCli), + container.NewRunCommand(dockerCli), + + // image + image.NewImageCommand(dockerCli), + image.NewBuildCommand(dockerCli), + + // builder + builder.NewBuilderCommand(dockerCli), + + // manifest + manifest.NewManifestCommand(dockerCli), + + // network + network.NewNetworkCommand(dockerCli), + + // node + node.NewNodeCommand(dockerCli), + + // plugin + plugin.NewPluginCommand(dockerCli), + + // registry + registry.NewLoginCommand(dockerCli), + registry.NewLogoutCommand(dockerCli), + registry.NewSearchCommand(dockerCli), + + // secret + secret.NewSecretCommand(dockerCli), + + // service + service.NewServiceCommand(dockerCli), + + // system + system.NewSystemCommand(dockerCli), + system.NewVersionCommand(dockerCli), + + // stack + stack.NewStackCommand(dockerCli), + + // swarm + swarm.NewSwarmCommand(dockerCli), + + // trust + trust.NewTrustCommand(dockerCli), + + // volume + volume.NewVolumeCommand(dockerCli), + + // context + context.NewContextCommand(dockerCli), + + // legacy commands may be hidden + hide(stack.NewTopLevelDeployCommand(dockerCli)), + hide(system.NewEventsCommand(dockerCli)), + hide(system.NewInfoCommand(dockerCli)), + hide(system.NewInspectCommand(dockerCli)), + hide(container.NewAttachCommand(dockerCli)), + hide(container.NewCommitCommand(dockerCli)), + hide(container.NewCopyCommand(dockerCli)), + hide(container.NewCreateCommand(dockerCli)), + hide(container.NewDiffCommand(dockerCli)), + hide(container.NewExecCommand(dockerCli)), + hide(container.NewExportCommand(dockerCli)), + hide(container.NewKillCommand(dockerCli)), + hide(container.NewLogsCommand(dockerCli)), + hide(container.NewPauseCommand(dockerCli)), + hide(container.NewPortCommand(dockerCli)), + hide(container.NewPsCommand(dockerCli)), + hide(container.NewRenameCommand(dockerCli)), + hide(container.NewRestartCommand(dockerCli)), + hide(container.NewRmCommand(dockerCli)), + hide(container.NewStartCommand(dockerCli)), + hide(container.NewStatsCommand(dockerCli)), + hide(container.NewStopCommand(dockerCli)), + hide(container.NewTopCommand(dockerCli)), + hide(container.NewUnpauseCommand(dockerCli)), + hide(container.NewUpdateCommand(dockerCli)), + hide(container.NewWaitCommand(dockerCli)), + hide(image.NewHistoryCommand(dockerCli)), + hide(image.NewImagesCommand(dockerCli)), + hide(image.NewImportCommand(dockerCli)), + hide(image.NewLoadCommand(dockerCli)), + hide(image.NewPullCommand(dockerCli)), + hide(image.NewPushCommand(dockerCli)), + hide(image.NewRemoveCommand(dockerCli)), + hide(image.NewSaveCommand(dockerCli)), + hide(image.NewTagCommand(dockerCli)), + ) + if runtime.GOOS == "linux" { + // engine + cmd.AddCommand(engine.NewEngineCommand(dockerCli)) + } +} + +func hide(cmd *cobra.Command) *cobra.Command { + // If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty, + // these legacy commands (such as `docker ps`, `docker exec`, etc) + // will not be shown in output console. + if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { + return cmd + } + cmdCopy := *cmd + cmdCopy.Hidden = true + cmdCopy.Aliases = []string{} + return &cmdCopy +} diff --git a/cli/cli/command/config/client_test.go b/cli/cli/command/config/client_test.go new file mode 100644 index 00000000..2e19b775 --- /dev/null +++ b/cli/cli/command/config/client_test.go @@ -0,0 +1,45 @@ +package config + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + configCreateFunc func(swarm.ConfigSpec) (types.ConfigCreateResponse, error) + configInspectFunc func(string) (swarm.Config, []byte, error) + configListFunc func(types.ConfigListOptions) ([]swarm.Config, error) + configRemoveFunc func(string) error +} + +func (c *fakeClient) ConfigCreate(ctx context.Context, spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if c.configCreateFunc != nil { + return c.configCreateFunc(spec) + } + return types.ConfigCreateResponse{}, nil +} + +func (c *fakeClient) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if c.configInspectFunc != nil { + return c.configInspectFunc(id) + } + return swarm.Config{}, nil, nil +} + +func (c *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if c.configListFunc != nil { + return c.configListFunc(options) + } + return []swarm.Config{}, nil +} + +func (c *fakeClient) ConfigRemove(ctx context.Context, name string) error { + if c.configRemoveFunc != nil { + return c.configRemoveFunc(name) + } + return nil +} diff --git a/cli/cli/command/config/cmd.go b/cli/cli/command/config/cmd.go new file mode 100644 index 00000000..7defe2a6 --- /dev/null +++ b/cli/cli/command/config/cmd.go @@ -0,0 +1,29 @@ +package config + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewConfigCommand returns a cobra command for `config` subcommands +func NewConfigCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Manage Docker configs", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.30", + "swarm": "", + }, + } + cmd.AddCommand( + newConfigListCommand(dockerCli), + newConfigCreateCommand(dockerCli), + newConfigInspectCommand(dockerCli), + newConfigRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/config/create.go b/cli/cli/command/config/create.go new file mode 100644 index 00000000..863c41c1 --- /dev/null +++ b/cli/cli/command/config/create.go @@ -0,0 +1,88 @@ +package config + +import ( + "context" + "fmt" + "io" + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// CreateOptions specifies some options that are used when creating a config. +type CreateOptions struct { + Name string + TemplateDriver string + File string + Labels opts.ListOpts +} + +func newConfigCreateCommand(dockerCli command.Cli) *cobra.Command { + createOpts := CreateOptions{ + Labels: opts.NewListOpts(opts.ValidateLabel), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONFIG file|-", + Short: "Create a config from a file or STDIN", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + createOpts.Name = args[0] + createOpts.File = args[1] + return RunConfigCreate(dockerCli, createOpts) + }, + } + flags := cmd.Flags() + flags.VarP(&createOpts.Labels, "label", "l", "Config labels") + flags.StringVar(&createOpts.TemplateDriver, "template-driver", "", "Template driver") + flags.SetAnnotation("template-driver", "version", []string{"1.37"}) + + return cmd +} + +// RunConfigCreate creates a config with the given options. +func RunConfigCreate(dockerCli command.Cli, options CreateOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var in io.Reader = dockerCli.In() + if options.File != "-" { + file, err := system.OpenSequential(options.File) + if err != nil { + return err + } + in = file + defer file.Close() + } + + configData, err := ioutil.ReadAll(in) + if err != nil { + return errors.Errorf("Error reading content from %q: %v", options.File, err) + } + + spec := swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: options.Name, + Labels: opts.ConvertKVStringsToMap(options.Labels.GetAll()), + }, + Data: configData, + } + if options.TemplateDriver != "" { + spec.Templating = &swarm.Driver{ + Name: options.TemplateDriver, + } + } + r, err := client.ConfigCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} diff --git a/cli/cli/command/config/create_test.go b/cli/cli/command/config/create_test.go new file mode 100644 index 00000000..bb2ea946 --- /dev/null +++ b/cli/cli/command/config/create_test.go @@ -0,0 +1,143 @@ +package config + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +const configDataFile = "config-create-with-name.golden" + +func TestConfigCreateErrors(t *testing.T) { + testCases := []struct { + args []string + configCreateFunc func(swarm.ConfigSpec) (types.ConfigCreateResponse, error) + expectedError string + }{ + { + args: []string{"too_few"}, + expectedError: "requires exactly 2 arguments", + }, + {args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"name", filepath.Join("testdata", configDataFile)}, + configCreateFunc: func(configSpec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + return types.ConfigCreateResponse{}, errors.Errorf("error creating config") + }, + expectedError: "error creating config", + }, + } + for _, tc := range testCases { + cmd := newConfigCreateCommand( + test.NewFakeCli(&fakeClient{ + configCreateFunc: tc.configCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigCreateWithName(t *testing.T) { + name := "foo" + var actual []byte + cli := test.NewFakeCli(&fakeClient{ + configCreateFunc: func(spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if spec.Name != name { + return types.ConfigCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + actual = spec.Data + + return types.ConfigCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newConfigCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, string(actual), configDataFile) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestConfigCreateWithLabels(t *testing.T) { + expectedLabels := map[string]string{ + "lbl1": "Label-foo", + "lbl2": "Label-bar", + } + name := "foo" + + data, err := ioutil.ReadFile(filepath.Join("testdata", configDataFile)) + assert.NilError(t, err) + + expected := swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: expectedLabels, + }, + Data: data, + } + + cli := test.NewFakeCli(&fakeClient{ + configCreateFunc: func(spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if !reflect.DeepEqual(spec, expected) { + return types.ConfigCreateResponse{}, errors.Errorf("expected %+v, got %+v", expected, spec) + } + + return types.ConfigCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newConfigCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)}) + cmd.Flags().Set("label", "lbl1=Label-foo") + cmd.Flags().Set("label", "lbl2=Label-bar") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestConfigCreateWithTemplatingDriver(t *testing.T) { + expectedDriver := &swarm.Driver{ + Name: "template-driver", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + configCreateFunc: func(spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + if spec.Name != name { + return types.ConfigCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if spec.Templating.Name != expectedDriver.Name { + return types.ConfigCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels) + } + + return types.ConfigCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newConfigCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)}) + cmd.Flags().Set("template-driver", expectedDriver.Name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/config/formatter.go b/cli/cli/command/config/formatter.go new file mode 100644 index 00000000..4aebdb61 --- /dev/null +++ b/cli/cli/command/config/formatter.go @@ -0,0 +1,172 @@ +package config + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultConfigTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" + configIDHeader = "ID" + configCreatedHeader = "CREATED" + configUpdatedHeader = "UPDATED" + configInspectPrettyTemplate formatter.Format = `ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Created at: {{.CreatedAt}} +Updated at: {{.UpdatedAt}} +Data: +{{.Data}}` +) + +// NewFormat returns a Format for rendering using a config Context +func NewFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.PrettyFormatKey: + return configInspectPrettyTemplate + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultConfigTableFormat + } + return formatter.Format(source) +} + +// FormatWrite writes the context +func FormatWrite(ctx formatter.Context, configs []swarm.Config) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, config := range configs { + configCtx := &configContext{c: config} + if err := format(configCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(newConfigContext(), render) +} + +func newConfigContext() *configContext { + cCtx := &configContext{} + + cCtx.Header = formatter.SubHeaderContext{ + "ID": configIDHeader, + "Name": formatter.NameHeader, + "CreatedAt": configCreatedHeader, + "UpdatedAt": configUpdatedHeader, + "Labels": formatter.LabelsHeader, + } + return cCtx +} + +type configContext struct { + formatter.HeaderContext + c swarm.Config +} + +func (c *configContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *configContext) ID() string { + return c.c.ID +} + +func (c *configContext) Name() string { + return c.c.Spec.Annotations.Name +} + +func (c *configContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.c.Meta.CreatedAt)) + " ago" +} + +func (c *configContext) UpdatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.c.Meta.UpdatedAt)) + " ago" +} + +func (c *configContext) Labels() string { + mapLabels := c.c.Spec.Annotations.Labels + if mapLabels == nil { + return "" + } + var joinLabels []string + for k, v := range mapLabels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *configContext) Label(name string) string { + if c.c.Spec.Annotations.Labels == nil { + return "" + } + return c.c.Spec.Annotations.Labels[name] +} + +// InspectFormatWrite renders the context for a list of configs +func InspectFormatWrite(ctx formatter.Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != configInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext formatter.SubContext) error) error { + for _, ref := range refs { + configI, _, err := getRef(ref) + if err != nil { + return err + } + config, ok := configI.(swarm.Config) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&configInspectContext{Config: config}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&configInspectContext{}, render) +} + +type configInspectContext struct { + swarm.Config + formatter.SubContext +} + +func (ctx *configInspectContext) ID() string { + return ctx.Config.ID +} + +func (ctx *configInspectContext) Name() string { + return ctx.Config.Spec.Name +} + +func (ctx *configInspectContext) Labels() map[string]string { + return ctx.Config.Spec.Labels +} + +func (ctx *configInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Config.CreatedAt) +} + +func (ctx *configInspectContext) UpdatedAt() string { + return command.PrettyPrint(ctx.Config.UpdatedAt) +} + +func (ctx *configInspectContext) Data() string { + if ctx.Config.Spec.Data == nil { + return "" + } + return string(ctx.Config.Spec.Data) +} diff --git a/cli/cli/command/config/formatter_test.go b/cli/cli/command/config/formatter_test.go new file mode 100644 index 00000000..aa8de73c --- /dev/null +++ b/cli/cli/command/config/formatter_test.go @@ -0,0 +1,65 @@ +package config + +import ( + "bytes" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigContextFormatWrite(t *testing.T) { + // Check default output format (verbose and non-verbose mode) for table headers + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + {formatter.Context{Format: NewFormat("table", false)}, + `ID NAME CREATED UPDATED +1 passwords Less than a second ago Less than a second ago +2 id_rsa Less than a second ago Less than a second ago +`}, + {formatter.Context{Format: NewFormat("table {{.Name}}", true)}, + `NAME +passwords +id_rsa +`}, + {formatter.Context{Format: NewFormat("{{.ID}}-{{.Name}}", false)}, + `1-passwords +2-id_rsa +`}, + } + + configs := []swarm.Config{ + {ID: "1", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "passwords"}}}, + {ID: "2", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "id_rsa"}}}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + if err := FormatWrite(testcase.context, configs); err != nil { + assert.ErrorContains(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} diff --git a/cli/cli/command/config/inspect.go b/cli/cli/command/config/inspect.go new file mode 100644 index 00000000..cb39ce27 --- /dev/null +++ b/cli/cli/command/config/inspect.go @@ -0,0 +1,68 @@ +package config + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +// InspectOptions contains options for the docker config inspect command. +type InspectOptions struct { + Names []string + Format string + Pretty bool +} + +func newConfigInspectCommand(dockerCli command.Cli) *cobra.Command { + opts := InspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONFIG [CONFIG...]", + Short: "Display detailed information on one or more configs", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Names = args + return RunConfigInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.Format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVar(&opts.Pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +// RunConfigInspect inspects the given Swarm config. +func RunConfigInspect(dockerCli command.Cli, opts InspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.Pretty { + opts.Format = "pretty" + } + + getRef := func(id string) (interface{}, []byte, error) { + return client.ConfigInspectWithRaw(ctx, id) + } + f := opts.Format + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + configCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(f, false), + } + + if err := InspectFormatWrite(configCtx, opts.Names, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil + +} diff --git a/cli/cli/command/config/inspect_test.go b/cli/cli/command/config/inspect_test.go new file mode 100644 index 00000000..1b4f275c --- /dev/null +++ b/cli/cli/command/config/inspect_test.go @@ -0,0 +1,172 @@ +package config + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestConfigInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + configInspectFunc func(configID string) (swarm.Config, []byte, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"foo"}, + configInspectFunc: func(configID string) (swarm.Config, []byte, error) { + return swarm.Config{}, nil, errors.Errorf("error while inspecting the config") + }, + expectedError: "error while inspecting the config", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + args: []string{"foo", "bar"}, + configInspectFunc: func(configID string) (swarm.Config, []byte, error) { + if configID == "foo" { + return *Config(ConfigName("foo")), nil, nil + } + return swarm.Config{}, nil, errors.Errorf("error while inspecting the config") + }, + expectedError: "error while inspecting the config", + }, + } + for _, tc := range testCases { + cmd := newConfigInspectCommand( + test.NewFakeCli(&fakeClient{ + configInspectFunc: tc.configInspectFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigInspectWithoutFormat(t *testing.T) { + testCases := []struct { + name string + args []string + configInspectFunc func(configID string) (swarm.Config, []byte, error) + }{ + { + name: "single-config", + args: []string{"foo"}, + configInspectFunc: func(name string) (swarm.Config, []byte, error) { + if name != "foo" { + return swarm.Config{}, nil, errors.Errorf("Invalid name, expected %s, got %s", "foo", name) + } + return *Config(ConfigID("ID-foo"), ConfigName("foo")), nil, nil + }, + }, + { + name: "multiple-configs-with-labels", + args: []string{"foo", "bar"}, + configInspectFunc: func(name string) (swarm.Config, []byte, error) { + return *Config(ConfigID("ID-"+name), ConfigName(name), ConfigLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{configInspectFunc: tc.configInspectFunc}) + cmd := newConfigInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("config-inspect-without-format.%s.golden", tc.name)) + } +} + +func TestConfigInspectWithFormat(t *testing.T) { + configInspectFunc := func(name string) (swarm.Config, []byte, error) { + return *Config(ConfigName("foo"), ConfigLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + } + testCases := []struct { + name string + format string + args []string + configInspectFunc func(name string) (swarm.Config, []byte, error) + }{ + { + name: "simple-template", + format: "{{.Spec.Name}}", + args: []string{"foo"}, + configInspectFunc: configInspectFunc, + }, + { + name: "json-template", + format: "{{json .Spec.Labels}}", + args: []string{"foo"}, + configInspectFunc: configInspectFunc, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + configInspectFunc: tc.configInspectFunc, + }) + cmd := newConfigInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("format", tc.format) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("config-inspect-with-format.%s.golden", tc.name)) + } +} + +func TestConfigInspectPretty(t *testing.T) { + testCases := []struct { + name string + configInspectFunc func(string) (swarm.Config, []byte, error) + }{ + { + name: "simple", + configInspectFunc: func(id string) (swarm.Config, []byte, error) { + return *Config( + ConfigLabels(map[string]string{ + "lbl1": "value1", + }), + ConfigID("configID"), + ConfigName("configName"), + ConfigCreatedAt(time.Time{}), + ConfigUpdatedAt(time.Time{}), + ConfigData([]byte("payload here")), + ), []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + configInspectFunc: tc.configInspectFunc, + }) + cmd := newConfigInspectCommand(cli) + + cmd.SetArgs([]string{"configID"}) + cmd.Flags().Set("pretty", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("config-inspect-pretty.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/config/ls.go b/cli/cli/command/config/ls.go new file mode 100644 index 00000000..c004a718 --- /dev/null +++ b/cli/cli/command/config/ls.go @@ -0,0 +1,72 @@ +package config + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +// ListOptions contains options for the docker config ls command. +type ListOptions struct { + Quiet bool + Format string + Filter opts.FilterOpt +} + +func newConfigListCommand(dockerCli command.Cli) *cobra.Command { + listOpts := ListOptions{Filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List configs", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return RunConfigList(dockerCli, listOpts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&listOpts.Quiet, "quiet", "q", false, "Only display IDs") + flags.StringVarP(&listOpts.Format, "format", "", "", "Pretty-print configs using a Go template") + flags.VarP(&listOpts.Filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +// RunConfigList lists Swarm configs. +func RunConfigList(dockerCli command.Cli, options ListOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + configs, err := client.ConfigList(ctx, types.ConfigListOptions{Filters: options.Filter.Value()}) + if err != nil { + return err + } + + format := options.Format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ConfigFormat) > 0 && !options.Quiet { + format = dockerCli.ConfigFile().ConfigFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Slice(configs, func(i, j int) bool { + return sortorder.NaturalLess(configs[i].Spec.Name, configs[j].Spec.Name) + }) + + configCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(format, options.Quiet), + } + return FormatWrite(configCtx, configs) +} diff --git a/cli/cli/command/config/ls_test.go b/cli/cli/command/config/ls_test.go new file mode 100644 index 00000000..d3055b4a --- /dev/null +++ b/cli/cli/command/config/ls_test.go @@ -0,0 +1,158 @@ +package config + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestConfigListErrors(t *testing.T) { + testCases := []struct { + args []string + configListFunc func(types.ConfigListOptions) ([]swarm.Config, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{}, errors.Errorf("error listing configs") + }, + expectedError: "error listing configs", + }, + } + for _, tc := range testCases { + cmd := newConfigListCommand( + test.NewFakeCli(&fakeClient{ + configListFunc: tc.configListFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigList(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-1-foo"), + ConfigName("1-foo"), + ConfigVersion(swarm.Version{Index: 10}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Config(ConfigID("ID-10-foo"), + ConfigName("10-foo"), + ConfigVersion(swarm.Version{Index: 11}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Config(ConfigID("ID-2-foo"), + ConfigName("2-foo"), + ConfigVersion(swarm.Version{Index: 11}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-sort.golden") +} + +func TestConfigListWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-foo"), ConfigName("foo")), + *Config(ConfigID("ID-bar"), ConfigName("bar"), ConfigLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-quiet-option.golden") +} + +func TestConfigListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-foo"), ConfigName("foo")), + *Config(ConfigID("ID-bar"), ConfigName("bar"), ConfigLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + ConfigFormat: "{{ .Name }} {{ .Labels }}", + }) + cmd := newConfigListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-config-format.golden") +} + +func TestConfigListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + return []swarm.Config{ + *Config(ConfigID("ID-foo"), ConfigName("foo")), + *Config(ConfigID("ID-bar"), ConfigName("bar"), ConfigLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-format.golden") +} + +func TestConfigListWithFilter(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + configListFunc: func(options types.ConfigListOptions) ([]swarm.Config, error) { + assert.Check(t, is.Equal("foo", options.Filters.Get("name")[0])) + assert.Check(t, is.Equal("lbl1=Label-bar", options.Filters.Get("label")[0])) + return []swarm.Config{ + *Config(ConfigID("ID-foo"), + ConfigName("foo"), + ConfigVersion(swarm.Version{Index: 10}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Config(ConfigID("ID-bar"), + ConfigName("bar"), + ConfigVersion(swarm.Version{Index: 11}), + ConfigCreatedAt(time.Now().Add(-2*time.Hour)), + ConfigUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + }, nil + }, + }) + cmd := newConfigListCommand(cli) + cmd.Flags().Set("filter", "name=foo") + cmd.Flags().Set("filter", "label=lbl1=Label-bar") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "config-list-with-filter.golden") +} diff --git a/cli/cli/command/config/remove.go b/cli/cli/command/config/remove.go new file mode 100644 index 00000000..7c30bd46 --- /dev/null +++ b/cli/cli/command/config/remove.go @@ -0,0 +1,55 @@ +package config + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// RemoveOptions contains options for the docker config rm command. +type RemoveOptions struct { + Names []string +} + +func newConfigRemoveCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "rm CONFIG [CONFIG...]", + Aliases: []string{"remove"}, + Short: "Remove one or more configs", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := RemoveOptions{ + Names: args, + } + return RunConfigRemove(dockerCli, opts) + }, + } +} + +// RunConfigRemove removes the given Swarm configs. +func RunConfigRemove(dockerCli command.Cli, opts RemoveOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, name := range opts.Names { + if err := client.ConfigRemove(ctx, name); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), name) + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/config/remove_test.go b/cli/cli/command/config/remove_test.go new file mode 100644 index 00000000..4a1980bc --- /dev/null +++ b/cli/cli/command/config/remove_test.go @@ -0,0 +1,79 @@ +package config + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + configRemoveFunc func(string) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + args: []string{"foo"}, + configRemoveFunc: func(name string) error { + return errors.Errorf("error removing config") + }, + expectedError: "error removing config", + }, + } + for _, tc := range testCases { + cmd := newConfigRemoveCommand( + test.NewFakeCli(&fakeClient{ + configRemoveFunc: tc.configRemoveFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestConfigRemoveWithName(t *testing.T) { + names := []string{"foo", "bar"} + var removedConfigs []string + cli := test.NewFakeCli(&fakeClient{ + configRemoveFunc: func(name string) error { + removedConfigs = append(removedConfigs, name) + return nil + }, + }) + cmd := newConfigRemoveCommand(cli) + cmd.SetArgs(names) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(names, strings.Split(strings.TrimSpace(cli.OutBuffer().String()), "\n"))) + assert.Check(t, is.DeepEqual(names, removedConfigs)) +} + +func TestConfigRemoveContinueAfterError(t *testing.T) { + names := []string{"foo", "bar"} + var removedConfigs []string + + cli := test.NewFakeCli(&fakeClient{ + configRemoveFunc: func(name string) error { + removedConfigs = append(removedConfigs, name) + if name == "foo" { + return errors.Errorf("error removing config: %s", name) + } + return nil + }, + }) + + cmd := newConfigRemoveCommand(cli) + cmd.SetArgs(names) + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), "error removing config: foo") + assert.Check(t, is.DeepEqual(names, removedConfigs)) +} diff --git a/cli/cli/command/config/testdata/config-create-with-name.golden b/cli/cli/command/config/testdata/config-create-with-name.golden new file mode 100644 index 00000000..7b28bb3f --- /dev/null +++ b/cli/cli/command/config/testdata/config-create-with-name.golden @@ -0,0 +1 @@ +config_foo_bar diff --git a/cli/cli/command/config/testdata/config-inspect-pretty.simple.golden b/cli/cli/command/config/testdata/config-inspect-pretty.simple.golden new file mode 100644 index 00000000..60b5c7fa --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-pretty.simple.golden @@ -0,0 +1,8 @@ +ID: configID +Name: configName +Labels: + - lbl1=value1 +Created at: 0001-01-01 00:00:00 +0000 utc +Updated at: 0001-01-01 00:00:00 +0000 utc +Data: +payload here diff --git a/cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden b/cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden new file mode 100644 index 00000000..aab678f8 --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-with-format.json-template.golden @@ -0,0 +1 @@ +{"label1":"label-foo"} diff --git a/cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden b/cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden new file mode 100644 index 00000000..257cc564 --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-with-format.simple-template.golden @@ -0,0 +1 @@ +foo diff --git a/cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden b/cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden new file mode 100644 index 00000000..b01a400c --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-without-format.multiple-configs-with-labels.golden @@ -0,0 +1,26 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": { + "label1": "label-foo" + } + } + }, + { + "ID": "ID-bar", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "bar", + "Labels": { + "label1": "label-foo" + } + } + } +] diff --git a/cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden b/cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden new file mode 100644 index 00000000..c4f41c10 --- /dev/null +++ b/cli/cli/command/config/testdata/config-inspect-without-format.single-config.golden @@ -0,0 +1,12 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": null + } + } +] diff --git a/cli/cli/command/config/testdata/config-list-sort.golden b/cli/cli/command/config/testdata/config-list-sort.golden new file mode 100644 index 00000000..141057c3 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-sort.golden @@ -0,0 +1,4 @@ +ID NAME CREATED UPDATED +ID-1-foo 1-foo 2 hours ago About an hour ago +ID-2-foo 2-foo 2 hours ago About an hour ago +ID-10-foo 10-foo 2 hours ago About an hour ago diff --git a/cli/cli/command/config/testdata/config-list-with-config-format.golden b/cli/cli/command/config/testdata/config-list-with-config-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-config-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/config/testdata/config-list-with-filter.golden b/cli/cli/command/config/testdata/config-list-with-filter.golden new file mode 100644 index 00000000..6fdc13b8 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-filter.golden @@ -0,0 +1,3 @@ +ID NAME CREATED UPDATED +ID-bar bar 2 hours ago About an hour ago +ID-foo foo 2 hours ago About an hour ago diff --git a/cli/cli/command/config/testdata/config-list-with-format.golden b/cli/cli/command/config/testdata/config-list-with-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/config/testdata/config-list-with-quiet-option.golden b/cli/cli/command/config/testdata/config-list-with-quiet-option.golden new file mode 100644 index 00000000..145fc38d --- /dev/null +++ b/cli/cli/command/config/testdata/config-list-with-quiet-option.golden @@ -0,0 +1,2 @@ +ID-bar +ID-foo diff --git a/cli/cli/command/container/attach.go b/cli/cli/command/container/attach.go new file mode 100644 index 00000000..de96a3b7 --- /dev/null +++ b/cli/cli/command/container/attach.go @@ -0,0 +1,181 @@ +package container + +import ( + "context" + "fmt" + "io" + "net/http/httputil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type attachOptions struct { + noStdin bool + proxy bool + detachKeys string + + container string +} + +func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, args string) (*types.ContainerJSON, error) { + c, err := cli.ContainerInspect(ctx, args) + if err != nil { + return nil, err + } + if !c.State.Running { + return nil, errors.New("You cannot attach to a stopped container, start it first") + } + if c.State.Paused { + return nil, errors.New("You cannot attach to a paused container, unpause it first") + } + if c.State.Restarting { + return nil, errors.New("You cannot attach to a restarting container, wait until it is running") + } + + return &c, nil +} + +// NewAttachCommand creates a new cobra.Command for `docker attach` +func NewAttachCommand(dockerCli command.Cli) *cobra.Command { + var opts attachOptions + + cmd := &cobra.Command{ + Use: "attach [OPTIONS] CONTAINER", + Short: "Attach local standard input, output, and error streams to a running container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runAttach(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + return cmd +} + +func runAttach(dockerCli command.Cli, opts *attachOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + // request channel to wait for client + resultC, errC := client.ContainerWait(ctx, opts.container, "") + + c, err := inspectContainerAndCheckState(ctx, client, opts.container) + if err != nil { + return err + } + + if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { + return err + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: !opts.noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = dockerCli.In() + } + + if opts.proxy && !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, opts.container) + defer signal.StopCatch(sigc) + } + + resp, errAttach := client.ContainerAttach(ctx, opts.container, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + // If use docker attach command to attach to a stop container, it will return + // "You cannot attach to a stopped container" error, it's ok, but when + // attach to a running container, it(docker attach) use inspect to check + // the container's state, if it pass the state check on the client side, + // and then the container is stopped, docker attach command still attach to + // the container and not exit. + // + // Recheck the container's state to avoid attach block. + _, err = inspectContainerAndCheckState(ctx, client, opts.container) + if err != nil { + return err + } + + if c.Config.Tty && dockerCli.Out().IsTerminal() { + resizeTTY(ctx, dockerCli, opts.container) + } + + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: dockerCli.Out(), + errorStream: dockerCli.Err(), + resp: resp, + tty: c.Config.Tty, + detachKeys: options.DetachKeys, + } + + if err := streamer.stream(ctx); err != nil { + return err + } + + if errAttach != nil { + return errAttach + } + + return getExitStatus(errC, resultC) +} + +func getExitStatus(errC <-chan error, resultC <-chan container.ContainerWaitOKBody) error { + select { + case result := <-resultC: + if result.Error != nil { + return fmt.Errorf(result.Error.Message) + } + if result.StatusCode != 0 { + return cli.StatusError{StatusCode: int(result.StatusCode)} + } + case err := <-errC: + return err + } + + return nil +} + +func resizeTTY(ctx context.Context, dockerCli command.Cli, containerID string) { + height, width := dockerCli.Out().GetTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + resizeTtyTo(ctx, dockerCli.Client(), containerID, height+1, width+1, false) + + // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back + // to the actual size. + if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } +} diff --git a/cli/cli/command/container/attach_test.go b/cli/cli/command/container/attach_test.go new file mode 100644 index 00000000..7d8d3f6e --- /dev/null +++ b/cli/cli/command/container/attach_test.go @@ -0,0 +1,129 @@ +package container + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNewAttachCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + containerInspectFunc func(img string) (types.ContainerJSON, error) + }{ + { + name: "client-error", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "something went wrong", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{}, errors.Errorf("something went wrong") + }, + }, + { + name: "client-stopped", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "You cannot attach to a stopped container", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + c := types.ContainerJSON{} + c.ContainerJSONBase = &types.ContainerJSONBase{} + c.ContainerJSONBase.State = &types.ContainerState{Running: false} + return c, nil + }, + }, + { + name: "client-paused", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "You cannot attach to a paused container", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + c := types.ContainerJSON{} + c.ContainerJSONBase = &types.ContainerJSONBase{} + c.ContainerJSONBase.State = &types.ContainerState{ + Running: true, + Paused: true, + } + return c, nil + }, + }, + { + name: "client-restarting", + args: []string{"5cb5bb5e4a3b"}, + expectedError: "You cannot attach to a restarting container", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + c := types.ContainerJSON{} + c.ContainerJSONBase = &types.ContainerJSONBase{} + c.ContainerJSONBase.State = &types.ContainerState{ + Running: true, + Paused: false, + Restarting: true, + } + return c, nil + }, + }, + } + for _, tc := range testCases { + cmd := NewAttachCommand(test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestGetExitStatus(t *testing.T) { + var ( + expectedErr = fmt.Errorf("unexpected error") + errC = make(chan error, 1) + resultC = make(chan container.ContainerWaitOKBody, 1) + ) + + testcases := []struct { + result *container.ContainerWaitOKBody + err error + expectedError error + }{ + { + result: &container.ContainerWaitOKBody{ + StatusCode: 0, + }, + }, + { + err: expectedErr, + expectedError: expectedErr, + }, + { + result: &container.ContainerWaitOKBody{ + Error: &container.ContainerWaitOKBodyError{Message: expectedErr.Error()}, + }, + expectedError: expectedErr, + }, + { + result: &container.ContainerWaitOKBody{ + StatusCode: 15, + }, + expectedError: cli.StatusError{StatusCode: 15}, + }, + } + + for _, testcase := range testcases { + if testcase.err != nil { + errC <- testcase.err + } + if testcase.result != nil { + resultC <- *testcase.result + } + err := getExitStatus(errC, resultC) + if testcase.expectedError == nil { + assert.NilError(t, err) + } else { + assert.Error(t, err, testcase.expectedError.Error()) + } + } +} diff --git a/cli/cli/command/container/client_test.go b/cli/cli/command/container/client_test.go new file mode 100644 index 00000000..b883ad1d --- /dev/null +++ b/cli/cli/command/container/client_test.go @@ -0,0 +1,145 @@ +package container + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + inspectFunc func(string) (types.ContainerJSON, error) + execInspectFunc func(execID string) (types.ContainerExecInspect, error) + execCreateFunc func(container string, config types.ExecConfig) (types.IDResponse, error) + createContainerFunc func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string) (container.ContainerCreateCreatedBody, error) + containerStartFunc func(container string, options types.ContainerStartOptions) error + imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + infoFunc func() (types.Info, error) + containerStatPathFunc func(container, path string) (types.ContainerPathStat, error) + containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + logFunc func(string, types.ContainerLogsOptions) (io.ReadCloser, error) + waitFunc func(string) (<-chan container.ContainerWaitOKBody, <-chan error) + containerListFunc func(types.ContainerListOptions) ([]types.Container, error) + containerExportFunc func(string) (io.ReadCloser, error) + containerExecResizeFunc func(id string, options types.ResizeOptions) error + Version string +} + +func (f *fakeClient) ContainerList(_ context.Context, options types.ContainerListOptions) ([]types.Container, error) { + if f.containerListFunc != nil { + return f.containerListFunc(options) + } + return []types.Container{}, nil +} + +func (f *fakeClient) ContainerInspect(_ context.Context, containerID string) (types.ContainerJSON, error) { + if f.inspectFunc != nil { + return f.inspectFunc(containerID) + } + return types.ContainerJSON{}, nil +} + +func (f *fakeClient) ContainerExecCreate(_ context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + if f.execCreateFunc != nil { + return f.execCreateFunc(container, config) + } + return types.IDResponse{}, nil +} + +func (f *fakeClient) ContainerExecInspect(_ context.Context, execID string) (types.ContainerExecInspect, error) { + if f.execInspectFunc != nil { + return f.execInspectFunc(execID) + } + return types.ContainerExecInspect{}, nil +} + +func (f *fakeClient) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + return nil +} + +func (f *fakeClient) ContainerCreate( + _ context.Context, + config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, +) (container.ContainerCreateCreatedBody, error) { + if f.createContainerFunc != nil { + return f.createContainerFunc(config, hostConfig, networkingConfig, containerName) + } + return container.ContainerCreateCreatedBody{}, nil +} + +func (f *fakeClient) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + if f.imageCreateFunc != nil { + return f.imageCreateFunc(parentReference, options) + } + return nil, nil +} + +func (f *fakeClient) Info(_ context.Context) (types.Info, error) { + if f.infoFunc != nil { + return f.infoFunc() + } + return types.Info{}, nil +} + +func (f *fakeClient) ContainerStatPath(_ context.Context, container, path string) (types.ContainerPathStat, error) { + if f.containerStatPathFunc != nil { + return f.containerStatPathFunc(container, path) + } + return types.ContainerPathStat{}, nil +} + +func (f *fakeClient) CopyFromContainer(_ context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + if f.containerCopyFromFunc != nil { + return f.containerCopyFromFunc(container, srcPath) + } + return nil, types.ContainerPathStat{}, nil +} + +func (f *fakeClient) ContainerLogs(_ context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + if f.logFunc != nil { + return f.logFunc(container, options) + } + return nil, nil +} + +func (f *fakeClient) ClientVersion() string { + return f.Version +} + +func (f *fakeClient) ContainerWait(_ context.Context, container string, _ container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if f.waitFunc != nil { + return f.waitFunc(container) + } + return nil, nil +} + +func (f *fakeClient) ContainerStart(_ context.Context, container string, options types.ContainerStartOptions) error { + if f.containerStartFunc != nil { + return f.containerStartFunc(container, options) + } + return nil +} + +func (f *fakeClient) ContainerExport(_ context.Context, container string) (io.ReadCloser, error) { + if f.containerExportFunc != nil { + return f.containerExportFunc(container) + } + return nil, nil +} + +func (f *fakeClient) ContainerExecResize(_ context.Context, id string, options types.ResizeOptions) error { + if f.containerExecResizeFunc != nil { + return f.containerExecResizeFunc(id, options) + } + return nil +} diff --git a/cli/cli/command/container/cmd.go b/cli/cli/command/container/cmd.go new file mode 100644 index 00000000..dcf8116e --- /dev/null +++ b/cli/cli/command/container/cmd.go @@ -0,0 +1,45 @@ +package container + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewContainerCommand returns a cobra command for `container` subcommands +func NewContainerCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "container", + Short: "Manage containers", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewAttachCommand(dockerCli), + NewCommitCommand(dockerCli), + NewCopyCommand(dockerCli), + NewCreateCommand(dockerCli), + NewDiffCommand(dockerCli), + NewExecCommand(dockerCli), + NewExportCommand(dockerCli), + NewKillCommand(dockerCli), + NewLogsCommand(dockerCli), + NewPauseCommand(dockerCli), + NewPortCommand(dockerCli), + NewRenameCommand(dockerCli), + NewRestartCommand(dockerCli), + NewRmCommand(dockerCli), + NewRunCommand(dockerCli), + NewStartCommand(dockerCli), + NewStatsCommand(dockerCli), + NewStopCommand(dockerCli), + NewTopCommand(dockerCli), + NewUnpauseCommand(dockerCli), + NewUpdateCommand(dockerCli), + NewWaitCommand(dockerCli), + newListCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/container/commit.go b/cli/cli/command/container/commit.go new file mode 100644 index 00000000..0a30f55d --- /dev/null +++ b/cli/cli/command/container/commit.go @@ -0,0 +1,75 @@ +package container + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type commitOptions struct { + container string + reference string + + pause bool + comment string + author string + changes opts.ListOpts +} + +// NewCommitCommand creates a new cobra.Command for `docker commit` +func NewCommitCommand(dockerCli command.Cli) *cobra.Command { + var options commitOptions + + cmd := &cobra.Command{ + Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", + Short: "Create a new image from a container's changes", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.container = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runCommit(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.BoolVarP(&options.pause, "pause", "p", true, "Pause container during commit") + flags.StringVarP(&options.comment, "message", "m", "", "Commit message") + flags.StringVarP(&options.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") + + options.changes = opts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + + return cmd +} + +func runCommit(dockerCli command.Cli, options *commitOptions) error { + ctx := context.Background() + + name := options.container + reference := options.reference + + commitOptions := types.ContainerCommitOptions{ + Reference: reference, + Comment: options.comment, + Author: options.author, + Changes: options.changes.GetAll(), + Pause: options.pause, + } + + response, err := dockerCli.Client().ContainerCommit(ctx, name, commitOptions) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} diff --git a/cli/cli/command/container/cp.go b/cli/cli/command/container/cp.go new file mode 100644 index 00000000..20809bc3 --- /dev/null +++ b/cli/cli/command/container/cp.go @@ -0,0 +1,313 @@ +package container + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type copyOptions struct { + source string + destination string + followLink bool + copyUIDGID bool +} + +type copyDirection int + +const ( + fromContainer copyDirection = 1 << iota + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool + copyUIDGID bool + sourcePath string + destPath string + container string +} + +// NewCopyCommand creates a new `docker cp` command +func NewCopyCommand(dockerCli command.Cli) *cobra.Command { + var opts copyOptions + + cmd := &cobra.Command{ + Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, + Short: "Copy files/folders between a container and the local filesystem", + Long: strings.Join([]string{ + "Copy files/folders between a container and the local filesystem\n", + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + if args[0] == "" { + return errors.New("source can not be empty") + } + if args[1] == "" { + return errors.New("destination can not be empty") + } + opts.source = args[0] + opts.destination = args[1] + return runCopy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") + flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)") + return cmd +} + +func runCopy(dockerCli command.Cli, opts copyOptions) error { + srcContainer, srcPath := splitCpArg(opts.source) + destContainer, destPath := splitCpArg(opts.destination) + + copyConfig := cpConfig{ + followLink: opts.followLink, + copyUIDGID: opts.copyUIDGID, + sourcePath: srcPath, + destPath: destPath, + } + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + copyConfig.container = srcContainer + } + if destContainer != "" { + direction |= toContainer + copyConfig.container = destContainer + } + + ctx := context.Background() + + switch direction { + case fromContainer: + return copyFromContainer(ctx, dockerCli, copyConfig) + case toContainer: + return copyToContainer(ctx, dockerCli, copyConfig) + case acrossContainers: + return errors.New("copying between containers is not supported") + default: + return errors.New("must specify at least one container source") + } +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + return archive.PreserveTrailingDotOrSeparator(absPath, localPath, filepath.Separator), nil +} + +func copyFromContainer(ctx context.Context, dockerCli command.Cli, copyConfig cpConfig) (err error) { + dstPath := copyConfig.destPath + srcPath := copyConfig.sourcePath + + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + if err := command.ValidateOutputPath(dstPath); err != nil { + return err + } + + client := dockerCli.Client() + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if copyConfig.followLink { + srcStat, err := client.ContainerStatPath(ctx, copyConfig.container, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := client.CopyFromContainer(ctx, copyConfig.container, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + _, err = io.Copy(dockerCli.Out(), content) + return err + } + + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +// In order to get the copy behavior right, we need to know information +// about both the source and destination. The API is a simple tar +// archive/extract API but we can use the stat info header about the +// destination to be more informed about exactly what the destination is. +func copyToContainer(ctx context.Context, dockerCli command.Cli, copyConfig cpConfig) (err error) { + srcPath := copyConfig.sourcePath + dstPath := copyConfig.destPath + + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + client := dockerCli.Client() + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := client.ContainerStatPath(ctx, copyConfig.container, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = client.ContainerStatPath(ctx, copyConfig.container, linkTarget) + } + + // Validate the destination path + if err := command.ValidateOutputPathFileMode(dstStat.Mode); err != nil { + return errors.Wrapf(err, `destination "%s:%s" must be a directory or a regular file`, copyConfig.container, dstPath) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return errors.Errorf("destination \"%s:%s\" must be a directory", copyConfig.container, dstPath) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, copyConfig.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + CopyUIDGID: copyConfig.copyUIDGID, + } + return client.CopyToContainer(ctx, copyConfig.container, resolvedDstPath, content, options) +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} diff --git a/cli/cli/command/container/cp_test.go b/cli/cli/command/container/cp_test.go new file mode 100644 index 00000000..f08b7ee5 --- /dev/null +++ b/cli/cli/command/container/cp_test.go @@ -0,0 +1,201 @@ +package container + +import ( + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" + "gotest.tools/skip" +) + +func TestRunCopyWithInvalidArguments(t *testing.T) { + var testcases = []struct { + doc string + options copyOptions + expectedErr string + }{ + { + doc: "copy between container", + options: copyOptions{ + source: "first:/path", + destination: "second:/path", + }, + expectedErr: "copying between containers is not supported", + }, + { + doc: "copy without a container", + options: copyOptions{ + source: "./source", + destination: "./dest", + }, + expectedErr: "must specify at least one container source", + }, + } + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + err := runCopy(test.NewFakeCli(nil), testcase.options) + assert.Error(t, err, testcase.expectedErr) + }) + } +} + +func TestRunCopyFromContainerToStdout(t *testing.T) { + tarContent := "the tar content" + + fakeClient := &fakeClient{ + containerCopyFromFunc: func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + assert.Check(t, is.Equal("container", container)) + return ioutil.NopCloser(strings.NewReader(tarContent)), types.ContainerPathStat{}, nil + }, + } + options := copyOptions{source: "container:/path", destination: "-"} + cli := test.NewFakeCli(fakeClient) + err := runCopy(cli, options) + assert.NilError(t, err) + assert.Check(t, is.Equal(tarContent, cli.OutBuffer().String())) + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) +} + +func TestRunCopyFromContainerToFilesystem(t *testing.T) { + destDir := fs.NewDir(t, "cp-test", + fs.WithFile("file1", "content\n")) + defer destDir.Remove() + + fakeClient := &fakeClient{ + containerCopyFromFunc: func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + assert.Check(t, is.Equal("container", container)) + readCloser, err := archive.TarWithOptions(destDir.Path(), &archive.TarOptions{}) + return readCloser, types.ContainerPathStat{}, err + }, + } + options := copyOptions{source: "container:/path", destination: destDir.Path()} + cli := test.NewFakeCli(fakeClient) + err := runCopy(cli, options) + assert.NilError(t, err) + assert.Check(t, is.Equal("", cli.OutBuffer().String())) + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) + + content, err := ioutil.ReadFile(destDir.Join("file1")) + assert.NilError(t, err) + assert.Check(t, is.Equal("content\n", string(content))) +} + +func TestRunCopyFromContainerToFilesystemMissingDestinationDirectory(t *testing.T) { + destDir := fs.NewDir(t, "cp-test", + fs.WithFile("file1", "content\n")) + defer destDir.Remove() + + fakeClient := &fakeClient{ + containerCopyFromFunc: func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + assert.Check(t, is.Equal("container", container)) + readCloser, err := archive.TarWithOptions(destDir.Path(), &archive.TarOptions{}) + return readCloser, types.ContainerPathStat{}, err + }, + } + + options := copyOptions{ + source: "container:/path", + destination: destDir.Join("missing", "foo"), + } + cli := test.NewFakeCli(fakeClient) + err := runCopy(cli, options) + assert.ErrorContains(t, err, destDir.Join("missing")) +} + +func TestRunCopyToContainerFromFileWithTrailingSlash(t *testing.T) { + srcFile := fs.NewFile(t, t.Name()) + defer srcFile.Remove() + + options := copyOptions{ + source: srcFile.Path() + string(os.PathSeparator), + destination: "container:/path", + } + cli := test.NewFakeCli(&fakeClient{}) + err := runCopy(cli, options) + + expectedError := "not a directory" + if runtime.GOOS == "windows" { + expectedError = "The filename, directory name, or volume label syntax is incorrect" + } + assert.ErrorContains(t, err, expectedError) +} + +func TestRunCopyToContainerSourceDoesNotExist(t *testing.T) { + options := copyOptions{ + source: "/does/not/exist", + destination: "container:/path", + } + cli := test.NewFakeCli(&fakeClient{}) + err := runCopy(cli, options) + expected := "no such file or directory" + if runtime.GOOS == "windows" { + expected = "cannot find the file specified" + } + assert.ErrorContains(t, err, expected) +} + +func TestSplitCpArg(t *testing.T) { + var testcases = []struct { + doc string + path string + os string + expectedContainer string + expectedPath string + }{ + { + doc: "absolute path with colon", + os: "linux", + path: "/abs/path:withcolon", + expectedPath: "/abs/path:withcolon", + }, + { + doc: "relative path with colon", + path: "./relative:path", + expectedPath: "./relative:path", + }, + { + doc: "absolute path with drive", + os: "windows", + path: `d:\abs\path`, + expectedPath: `d:\abs\path`, + }, + { + doc: "no separator", + path: "relative/path", + expectedPath: "relative/path", + }, + { + doc: "with separator", + path: "container:/opt/foo", + expectedPath: "/opt/foo", + expectedContainer: "container", + }, + } + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + skip.If(t, testcase.os != "" && testcase.os != runtime.GOOS) + + container, path := splitCpArg(testcase.path) + assert.Check(t, is.Equal(testcase.expectedContainer, container)) + assert.Check(t, is.Equal(testcase.expectedPath, path)) + }) + } +} + +func TestRunCopyFromContainerToFilesystemIrregularDestination(t *testing.T) { + options := copyOptions{source: "container:/dev/null", destination: "/dev/random"} + cli := test.NewFakeCli(nil) + err := runCopy(cli, options) + assert.Assert(t, err != nil) + expected := `"/dev/random" must be a directory or a regular file` + assert.ErrorContains(t, err, expected) +} diff --git a/cli/cli/command/container/create.go b/cli/cli/command/container/create.go new file mode 100644 index 00000000..868e4fb4 --- /dev/null +++ b/cli/cli/command/container/create.go @@ -0,0 +1,277 @@ +package container + +import ( + "context" + "fmt" + "io" + "os" + "regexp" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/opts" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type createOptions struct { + name string + platform string + untrusted bool +} + +// NewCreateCommand creates a new cobra.Command for `docker create` +func NewCreateCommand(dockerCli command.Cli) *cobra.Command { + var opts createOptions + var copts *containerOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddPlatformFlag(flags, &opts.platform) + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + copts = addFlags(flags) + return cmd +} + +func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, options *createOptions, copts *containerOptions) error { + proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(copts.env.GetAll())) + newEnv := []string{} + for k, v := range proxyConfig { + if v == nil { + newEnv = append(newEnv, k) + } else { + newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v)) + } + } + copts.env = *opts.NewListOptsRef(&newEnv, nil) + containerConfig, err := parse(flags, copts, dockerCli.ServerInfo().OSType) + if err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + if err = validateAPIVersion(containerConfig, dockerCli.Client().ClientVersion()); err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + response, err := createContainer(context.Background(), dockerCli, containerConfig, options) + if err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} + +func pullImage(ctx context.Context, dockerCli command.Cli, image string, platform string, out io.Writer) error { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + RegistryAuth: encodedAuth, + Platform: platform, + } + + responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream( + responseBody, + out, + dockerCli.Out().FD(), + dockerCli.Out().IsTerminal(), + nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func (cid *cidFile) Close() error { + if cid.file == nil { + return nil + } + cid.file.Close() + + if cid.written { + return nil + } + if err := os.Remove(cid.path); err != nil { + return errors.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if cid.file == nil { + return nil + } + if _, err := cid.file.Write([]byte(id)); err != nil { + return errors.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func newCIDFile(path string) (*cidFile, error) { + if path == "" { + return &cidFile{}, nil + } + if _, err := os.Stat(path); err == nil { + return nil, errors.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, errors.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.ContainerCreateCreatedBody, error) { + config := containerConfig.Config + hostConfig := containerConfig.HostConfig + networkingConfig := containerConfig.NetworkingConfig + stderr := dockerCli.Err() + + warnOnOomKillDisable(*hostConfig, stderr) + warnOnLocalhostDNS(*hostConfig, stderr) + + var ( + trustedRef reference.Canonical + namedRef reference.Named + ) + + containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile) + if err != nil { + return nil, err + } + defer containerIDFile.Close() + + ref, err := reference.ParseAnyReference(config.Image) + if err != nil { + return nil, err + } + if named, ok := ref.(reference.Named); ok { + namedRef = reference.TagNameOnly(named) + + if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.untrusted { + var err error + trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil) + if err != nil { + return nil, err + } + config.Image = reference.FamiliarString(trustedRef) + } + } + + //create the container + response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, opts.name) + + //if image not found try to pull it + if err != nil { + if apiclient.IsErrNotFound(err) && namedRef != nil { + fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef)) + + // we don't want to write to stdout anything apart from container.ID + if err := pullImage(ctx, dockerCli, config.Image, opts.platform, stderr); err != nil { + return nil, err + } + if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil { + if err := image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, opts.name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(stderr, "WARNING: %s\n", warning) + } + err = containerIDFile.Write(response.ID) + return &response, err +} + +func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) { + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.") + } +} + +// check the DNS settings passed via --dns against localhost regexp to warn if +// they are trying to set a DNS to a localhost address +func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) { + for _, dnsIP := range hostConfig.DNS { + if isLocalhost(dnsIP) { + fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + return + } + } +} + +// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. +const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + +var localhostIPRegexp = regexp.MustCompile(ipLocalhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func isLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} diff --git a/cli/cli/command/container/create_test.go b/cli/cli/command/container/create_test.go new file mode 100644 index 00000000..29912d44 --- /dev/null +++ b/cli/cli/command/container/create_test.go @@ -0,0 +1,280 @@ +package container + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "sort" + "strings" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" + "gotest.tools/golden" +) + +func TestCIDFileNoOPWithNoFilename(t *testing.T) { + file, err := newCIDFile("") + assert.NilError(t, err) + assert.DeepEqual(t, &cidFile{}, file, cmp.AllowUnexported(cidFile{})) + + assert.NilError(t, file.Write("id")) + assert.NilError(t, file.Close()) +} + +func TestNewCIDFileWhenFileAlreadyExists(t *testing.T) { + tempfile := fs.NewFile(t, "test-cid-file") + defer tempfile.Remove() + + _, err := newCIDFile(tempfile.Path()) + assert.ErrorContains(t, err, "Container ID file found") +} + +func TestCIDFileCloseWithNoWrite(t *testing.T) { + tempdir := fs.NewDir(t, "test-cid-file") + defer tempdir.Remove() + + path := tempdir.Join("cidfile") + file, err := newCIDFile(path) + assert.NilError(t, err) + assert.Check(t, is.Equal(file.path, path)) + + assert.NilError(t, file.Close()) + _, err = os.Stat(path) + assert.Check(t, os.IsNotExist(err)) +} + +func TestCIDFileCloseWithWrite(t *testing.T) { + tempdir := fs.NewDir(t, "test-cid-file") + defer tempdir.Remove() + + path := tempdir.Join("cidfile") + file, err := newCIDFile(path) + assert.NilError(t, err) + + content := "id" + assert.NilError(t, file.Write(content)) + + actual, err := ioutil.ReadFile(path) + assert.NilError(t, err) + assert.Check(t, is.Equal(content, string(actual))) + + assert.NilError(t, file.Close()) + _, err = os.Stat(path) + assert.NilError(t, err) +} + +func TestCreateContainerPullsImageIfMissing(t *testing.T) { + imageName := "does-not-exist-locally" + responseCounter := 0 + containerID := "abcdef" + + client := &fakeClient{ + createContainerFunc: func( + config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + defer func() { responseCounter++ }() + switch responseCounter { + case 0: + return container.ContainerCreateCreatedBody{}, fakeNotFound{} + case 1: + return container.ContainerCreateCreatedBody{ID: containerID}, nil + default: + return container.ContainerCreateCreatedBody{}, errors.New("unexpected") + } + }, + imageCreateFunc: func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: "http://indexserver"}, nil + }, + } + cli := test.NewFakeCli(client) + config := &containerConfig{ + Config: &container.Config{ + Image: imageName, + }, + HostConfig: &container.HostConfig{}, + } + body, err := createContainer(context.Background(), cli, config, &createOptions{ + name: "name", + platform: runtime.GOOS, + untrusted: true, + }) + assert.NilError(t, err) + expected := container.ContainerCreateCreatedBody{ID: containerID} + assert.Check(t, is.DeepEqual(expected, *body)) + stderr := cli.ErrBuffer().String() + assert.Check(t, is.Contains(stderr, "Unable to find image 'does-not-exist-locally:latest' locally")) +} + +func TestNewCreateCommandWithContentTrustErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + name: "offline-notary-server", + notaryFunc: notary.GetOfflineNotaryRepository, + expectedError: "client is offline", + args: []string{"image:tag"}, + }, + { + name: "uninitialized-notary-server", + notaryFunc: notary.GetUninitializedNotaryRepository, + expectedError: "remote trust data does not exist", + args: []string{"image:tag"}, + }, + { + name: "empty-notary-server", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + expectedError: "No valid trust data for tag", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{}, fmt.Errorf("shouldn't try to pull image") + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := NewCreateCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.ErrorContains(t, err, tc.expectedError) + } +} + +func TestNewCreateCommandWithWarnings(t *testing.T) { + testCases := []struct { + name string + args []string + warning bool + }{ + { + name: "container-create-without-oom-kill-disable", + args: []string{"image:tag"}, + }, + { + name: "container-create-oom-kill-disable-false", + args: []string{"--oom-kill-disable=false", "image:tag"}, + }, + { + name: "container-create-oom-kill-without-memory-limit", + args: []string{"--oom-kill-disable", "image:tag"}, + warning: true, + }, + { + name: "container-create-oom-kill-true-without-memory-limit", + args: []string{"--oom-kill-disable=true", "image:tag"}, + warning: true, + }, + { + name: "container-create-oom-kill-true-with-memory-limit", + args: []string{"--oom-kill-disable=true", "--memory=100M", "image:tag"}, + }, + { + name: "container-create-localhost-dns", + args: []string{"--dns=127.0.0.11", "image:tag"}, + warning: true, + }, + { + name: "container-create-localhost-dns-ipv6", + args: []string{"--dns=::1", "image:tag"}, + warning: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{}, nil + }, + }) + cmd := NewCreateCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + if tc.warning { + golden.Assert(t, cli.ErrBuffer().String(), tc.name+".golden") + } else { + assert.Equal(t, cli.ErrBuffer().String(), "") + } + }) + } +} + +func TestCreateContainerWithProxyConfig(t *testing.T) { + expected := []string{ + "HTTP_PROXY=httpProxy", + "http_proxy=httpProxy", + "HTTPS_PROXY=httpsProxy", + "https_proxy=httpsProxy", + "NO_PROXY=noProxy", + "no_proxy=noProxy", + "FTP_PROXY=ftpProxy", + "ftp_proxy=ftpProxy", + } + sort.Strings(expected) + + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + sort.Strings(config.Env) + assert.DeepEqual(t, config.Env, expected) + return container.ContainerCreateCreatedBody{}, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + Proxies: map[string]configfile.ProxyConfig{ + "default": { + HTTPProxy: "httpProxy", + HTTPSProxy: "httpsProxy", + NoProxy: "noProxy", + FTPProxy: "ftpProxy", + }, + }, + }) + cmd := NewCreateCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"image:tag"}) + err := cmd.Execute() + assert.NilError(t, err) +} + +type fakeNotFound struct{} + +func (f fakeNotFound) NotFound() bool { return true } +func (f fakeNotFound) Error() string { return "error fake not found" } diff --git a/cli/cli/command/container/diff.go b/cli/cli/command/container/diff.go new file mode 100644 index 00000000..1cfb63c6 --- /dev/null +++ b/cli/cli/command/container/diff.go @@ -0,0 +1,47 @@ +package container + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type diffOptions struct { + container string +} + +// NewDiffCommand creates a new cobra.Command for `docker diff` +func NewDiffCommand(dockerCli command.Cli) *cobra.Command { + var opts diffOptions + + return &cobra.Command{ + Use: "diff CONTAINER", + Short: "Inspect changes to files or directories on a container's filesystem", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runDiff(dockerCli, &opts) + }, + } +} + +func runDiff(dockerCli command.Cli, opts *diffOptions) error { + if opts.container == "" { + return errors.New("Container name cannot be empty") + } + ctx := context.Background() + + changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) + if err != nil { + return err + } + diffCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewDiffFormat("{{.Type}} {{.Path}}"), + } + return DiffFormatWrite(diffCtx, changes) +} diff --git a/cli/cli/command/container/exec.go b/cli/cli/command/container/exec.go new file mode 100644 index 00000000..c96f4055 --- /dev/null +++ b/cli/cli/command/container/exec.go @@ -0,0 +1,214 @@ +package container + +import ( + "context" + "fmt" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type execOptions struct { + detachKeys string + interactive bool + tty bool + detach bool + user string + privileged bool + env opts.ListOpts + workdir string + container string + command []string +} + +func newExecOptions() execOptions { + return execOptions{env: opts.NewListOpts(opts.ValidateEnv)} +} + +// NewExecCommand creates a new cobra.Command for `docker exec` +func NewExecCommand(dockerCli command.Cli) *cobra.Command { + options := newExecOptions() + + cmd := &cobra.Command{ + Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", + Short: "Run a command in a running container", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.container = args[0] + options.command = args[1:] + return runExec(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVarP(&options.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") + flags.BoolVarP(&options.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.BoolVarP(&options.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.BoolVarP(&options.detach, "detach", "d", false, "Detached mode: run command in the background") + flags.StringVarP(&options.user, "user", "u", "", "Username or UID (format: [:])") + flags.BoolVarP(&options.privileged, "privileged", "", false, "Give extended privileges to the command") + flags.VarP(&options.env, "env", "e", "Set environment variables") + flags.SetAnnotation("env", "version", []string{"1.25"}) + flags.StringVarP(&options.workdir, "workdir", "w", "", "Working directory inside the container") + flags.SetAnnotation("workdir", "version", []string{"1.35"}) + + return cmd +} + +func runExec(dockerCli command.Cli, options execOptions) error { + execConfig := parseExec(options, dockerCli.ConfigFile()) + ctx := context.Background() + client := dockerCli.Client() + + // We need to check the tty _before_ we do the ContainerExecCreate, because + // otherwise if we error out we will leak execIDs on the server (and + // there's no easy way to clean those up). But also in order to make "not + // exist" errors take precedence we do a dummy inspect first. + if _, err := client.ContainerInspect(ctx, options.container); err != nil { + return err + } + if !execConfig.Detach { + if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } + + response, err := client.ContainerExecCreate(ctx, options.container, *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + return errors.New("exec ID empty") + } + + if execConfig.Detach { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + return client.ContainerExecStart(ctx, execID, execStartCheck) + } + return interactiveExec(ctx, dockerCli, execConfig, execID) +} + +func interactiveExec(ctx context.Context, dockerCli command.Cli, execConfig *types.ExecConfig, execID string) error { + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + ) + + if execConfig.AttachStdin { + in = dockerCli.In() + } + if execConfig.AttachStdout { + out = dockerCli.Out() + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = dockerCli.Out() + } else { + stderr = dockerCli.Err() + } + } + + client := dockerCli.Client() + execStartCheck := types.ExecStartCheck{ + Tty: execConfig.Tty, + } + resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck) + if err != nil { + return err + } + defer resp.Close() + + errCh := make(chan error, 1) + + go func() { + defer close(errCh) + errCh <- func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: out, + errorStream: stderr, + resp: resp, + tty: execConfig.Tty, + detachKeys: execConfig.DetachKeys, + } + + return streamer.stream(ctx) + }() + }() + + if execConfig.Tty && dockerCli.In().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { + fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + return getExecExitStatus(ctx, client, execID) +} + +func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error { + resp, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !apiclient.IsErrConnectionFailed(err) { + return err + } + return cli.StatusError{StatusCode: -1} + } + status := resp.ExitCode + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +// parseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +func parseExec(opts execOptions, configFile *configfile.ConfigFile) *types.ExecConfig { + execConfig := &types.ExecConfig{ + User: opts.user, + Privileged: opts.privileged, + Tty: opts.tty, + Cmd: opts.command, + Detach: opts.detach, + Env: opts.env.GetAll(), + WorkingDir: opts.workdir, + } + + // If -d is not set, attach to everything by default + if !opts.detach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if opts.interactive { + execConfig.AttachStdin = true + } + } + + if opts.detachKeys != "" { + execConfig.DetachKeys = opts.detachKeys + } else { + execConfig.DetachKeys = configFile.DetachKeys + } + return execConfig +} diff --git a/cli/cli/command/container/exec_test.go b/cli/cli/command/container/exec_test.go new file mode 100644 index 00000000..0c6e2614 --- /dev/null +++ b/cli/cli/command/container/exec_test.go @@ -0,0 +1,227 @@ +package container + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func withDefaultOpts(options execOptions) execOptions { + options.env = opts.NewListOpts(opts.ValidateEnv) + if len(options.command) == 0 { + options.command = []string{"command"} + } + return options +} + +func TestParseExec(t *testing.T) { + testcases := []struct { + options execOptions + configFile configfile.ConfigFile + expected types.ExecConfig + }{ + { + expected: types.ExecConfig{ + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + options: withDefaultOpts(execOptions{}), + }, + { + expected: types.ExecConfig{ + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + options: withDefaultOpts(execOptions{ + command: []string{"command1", "command2"}, + }), + }, + { + options: withDefaultOpts(execOptions{ + interactive: true, + tty: true, + user: "uid", + }), + expected: types.ExecConfig{ + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Cmd: []string{"command"}, + }, + }, + { + options: withDefaultOpts(execOptions{detach: true}), + expected: types.ExecConfig{ + Detach: true, + Cmd: []string{"command"}, + }, + }, + { + options: withDefaultOpts(execOptions{ + tty: true, + interactive: true, + detach: true, + }), + expected: types.ExecConfig{ + Detach: true, + Tty: true, + Cmd: []string{"command"}, + }, + }, + { + options: withDefaultOpts(execOptions{detach: true}), + configFile: configfile.ConfigFile{DetachKeys: "de"}, + expected: types.ExecConfig{ + Cmd: []string{"command"}, + DetachKeys: "de", + Detach: true, + }, + }, + { + options: withDefaultOpts(execOptions{ + detach: true, + detachKeys: "ab", + }), + configFile: configfile.ConfigFile{DetachKeys: "de"}, + expected: types.ExecConfig{ + Cmd: []string{"command"}, + DetachKeys: "ab", + Detach: true, + }, + }, + } + + for _, testcase := range testcases { + execConfig := parseExec(testcase.options, &testcase.configFile) + assert.Check(t, is.DeepEqual(testcase.expected, *execConfig)) + } +} + +func TestRunExec(t *testing.T) { + var testcases = []struct { + doc string + options execOptions + client fakeClient + expectedError string + expectedOut string + expectedErr string + }{ + { + doc: "successful detach", + options: withDefaultOpts(execOptions{ + container: "thecontainer", + detach: true, + }), + client: fakeClient{execCreateFunc: execCreateWithID}, + }, + { + doc: "inspect error", + options: newExecOptions(), + client: fakeClient{ + inspectFunc: func(string) (types.ContainerJSON, error) { + return types.ContainerJSON{}, errors.New("failed inspect") + }, + }, + expectedError: "failed inspect", + }, + { + doc: "missing exec ID", + options: newExecOptions(), + expectedError: "exec ID empty", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + cli := test.NewFakeCli(&testcase.client) + + err := runExec(cli, testcase.options) + if testcase.expectedError != "" { + assert.ErrorContains(t, err, testcase.expectedError) + } else { + if !assert.Check(t, err) { + return + } + } + assert.Check(t, is.Equal(testcase.expectedOut, cli.OutBuffer().String())) + assert.Check(t, is.Equal(testcase.expectedErr, cli.ErrBuffer().String())) + }) + } +} + +func execCreateWithID(_ string, _ types.ExecConfig) (types.IDResponse, error) { + return types.IDResponse{ID: "execid"}, nil +} + +func TestGetExecExitStatus(t *testing.T) { + execID := "the exec id" + expecatedErr := errors.New("unexpected error") + + testcases := []struct { + inspectError error + exitCode int + expectedError error + }{ + { + inspectError: nil, + exitCode: 0, + }, + { + inspectError: expecatedErr, + expectedError: expecatedErr, + }, + { + exitCode: 15, + expectedError: cli.StatusError{StatusCode: 15}, + }, + } + + for _, testcase := range testcases { + client := &fakeClient{ + execInspectFunc: func(id string) (types.ContainerExecInspect, error) { + assert.Check(t, is.Equal(execID, id)) + return types.ContainerExecInspect{ExitCode: testcase.exitCode}, testcase.inspectError + }, + } + err := getExecExitStatus(context.Background(), client, execID) + assert.Check(t, is.Equal(testcase.expectedError, err)) + } +} + +func TestNewExecCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + containerInspectFunc func(img string) (types.ContainerJSON, error) + }{ + { + name: "client-error", + args: []string{"5cb5bb5e4a3b", "-t", "-i", "bash"}, + expectedError: "something went wrong", + containerInspectFunc: func(containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc}) + cmd := NewExecCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} diff --git a/cli/cli/command/container/export.go b/cli/cli/command/container/export.go new file mode 100644 index 00000000..ee77cdb5 --- /dev/null +++ b/cli/cli/command/container/export.go @@ -0,0 +1,62 @@ +package container + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type exportOptions struct { + container string + output string +} + +// NewExportCommand creates a new `docker export` command +func NewExportCommand(dockerCli command.Cli) *cobra.Command { + var opts exportOptions + + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTAINER", + Short: "Export a container's filesystem as a tar archive", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runExport(dockerCli command.Cli, opts exportOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + if err := command.ValidateOutputPath(opts.output); err != nil { + return errors.Wrap(err, "failed to export container") + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ContainerExport(context.Background(), opts.container) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/cli/cli/command/container/export_test.go b/cli/cli/command/container/export_test.go new file mode 100644 index 00000000..340d55e1 --- /dev/null +++ b/cli/cli/command/container/export_test.go @@ -0,0 +1,49 @@ +package container + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + "gotest.tools/fs" +) + +func TestContainerExportOutputToFile(t *testing.T) { + dir := fs.NewDir(t, "export-test") + defer dir.Remove() + + cli := test.NewFakeCli(&fakeClient{ + containerExportFunc: func(container string) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("bar")), nil + }, + }) + cmd := NewExportCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"-o", dir.Join("foo"), "container"}) + assert.NilError(t, cmd.Execute()) + + expected := fs.Expected(t, + fs.WithFile("foo", "bar", fs.MatchAnyFileMode), + ) + + assert.Assert(t, fs.Equal(dir.Path(), expected)) +} + +func TestContainerExportOutputToIrregularFile(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerExportFunc: func(container string) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("foo")), nil + }, + }) + cmd := NewExportCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"-o", "/dev/random", "container"}) + + err := cmd.Execute() + assert.Assert(t, err != nil) + expected := `"/dev/random" must be a directory or a regular file` + assert.ErrorContains(t, err, expected) +} diff --git a/cli/cli/command/container/formatter_diff.go b/cli/cli/command/container/formatter_diff.go new file mode 100644 index 00000000..cf6a0b0b --- /dev/null +++ b/cli/cli/command/container/formatter_diff.go @@ -0,0 +1,73 @@ +package container + +import ( + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" +) + +const ( + defaultDiffTableFormat = "table {{.Type}}\t{{.Path}}" + + changeTypeHeader = "CHANGE TYPE" + pathHeader = "PATH" +) + +// NewDiffFormat returns a format for use with a diff Context +func NewDiffFormat(source string) formatter.Format { + switch source { + case formatter.TableFormatKey: + return defaultDiffTableFormat + } + return formatter.Format(source) +} + +// DiffFormatWrite writes formatted diff using the Context +func DiffFormatWrite(ctx formatter.Context, changes []container.ContainerChangeResponseItem) error { + + render := func(format func(subContext formatter.SubContext) error) error { + for _, change := range changes { + if err := format(&diffContext{c: change}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newDiffContext(), render) +} + +type diffContext struct { + formatter.HeaderContext + c container.ContainerChangeResponseItem +} + +func newDiffContext() *diffContext { + diffCtx := diffContext{} + diffCtx.Header = formatter.SubHeaderContext{ + "Type": changeTypeHeader, + "Path": pathHeader, + } + return &diffCtx +} + +func (d *diffContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(d) +} + +func (d *diffContext) Type() string { + var kind string + switch d.c.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + return kind + +} + +func (d *diffContext) Path() string { + return d.c.Path +} diff --git a/cli/cli/command/container/formatter_diff_test.go b/cli/cli/command/container/formatter_diff_test.go new file mode 100644 index 00000000..e29e8ead --- /dev/null +++ b/cli/cli/command/container/formatter_diff_test.go @@ -0,0 +1,61 @@ +package container + +import ( + "bytes" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDiffContextFormatWrite(t *testing.T) { + // Check default output format (verbose and non-verbose mode) for table headers + cases := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{Format: NewDiffFormat("table")}, + `CHANGE TYPE PATH +C /var/log/app.log +A /usr/app/app.js +D /usr/app/old_app.js +`, + }, + { + formatter.Context{Format: NewDiffFormat("table {{.Path}}")}, + `PATH +/var/log/app.log +/usr/app/app.js +/usr/app/old_app.js +`, + }, + { + formatter.Context{Format: NewDiffFormat("{{.Type}}: {{.Path}}")}, + `C: /var/log/app.log +A: /usr/app/app.js +D: /usr/app/old_app.js +`, + }, + } + + diffs := []container.ContainerChangeResponseItem{ + {Kind: archive.ChangeModify, Path: "/var/log/app.log"}, + {Kind: archive.ChangeAdd, Path: "/usr/app/app.js"}, + {Kind: archive.ChangeDelete, Path: "/usr/app/old_app.js"}, + } + + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + err := DiffFormatWrite(testcase.context, diffs) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/container/formatter_stats.go b/cli/cli/command/container/formatter_stats.go new file mode 100644 index 00000000..565de447 --- /dev/null +++ b/cli/cli/command/container/formatter_stats.go @@ -0,0 +1,225 @@ +package container + +import ( + "fmt" + "sync" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + winOSType = "windows" + defaultStatsTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + winDefaultStatsTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + containerHeader = "CONTAINER" + cpuPercHeader = "CPU %" + netIOHeader = "NET I/O" + blockIOHeader = "BLOCK I/O" + memPercHeader = "MEM %" // Used only on Linux + winMemUseHeader = "PRIV WORKING SET" // Used only on Windows + memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux + pidsHeader = "PIDS" // Used only on Linux +) + +// StatsEntry represents represents the statistics data collected from a container +type StatsEntry struct { + Container string + Name string + ID string + CPUPercentage float64 + Memory float64 // On Windows this is the private working set + MemoryLimit float64 // Not used on Windows + MemoryPercentage float64 // Not used on Windows + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 // Not used on Windows + IsInvalid bool +} + +// Stats represents an entity to store containers statistics synchronously +type Stats struct { + mutex sync.Mutex + StatsEntry + err error +} + +// GetError returns the container statistics error. +// This is used to determine whether the statistics are valid or not +func (cs *Stats) GetError() error { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.err +} + +// SetErrorAndReset zeroes all the container statistics and store the error. +// It is used when receiving time out error during statistics collecting to reduce lock overhead +func (cs *Stats) SetErrorAndReset(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.CPUPercentage = 0 + cs.Memory = 0 + cs.MemoryPercentage = 0 + cs.MemoryLimit = 0 + cs.NetworkRx = 0 + cs.NetworkTx = 0 + cs.BlockRead = 0 + cs.BlockWrite = 0 + cs.PidsCurrent = 0 + cs.err = err + cs.IsInvalid = true +} + +// SetError sets container statistics error +func (cs *Stats) SetError(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.err = err + if err != nil { + cs.IsInvalid = true + } +} + +// SetStatistics set the container statistics +func (cs *Stats) SetStatistics(s StatsEntry) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + s.Container = cs.Container + cs.StatsEntry = s +} + +// GetStatistics returns container statistics with other meta data such as the container name +func (cs *Stats) GetStatistics() StatsEntry { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.StatsEntry +} + +// NewStatsFormat returns a format for rendering an CStatsContext +func NewStatsFormat(source, osType string) formatter.Format { + if source == formatter.TableFormatKey { + if osType == winOSType { + return formatter.Format(winDefaultStatsTableFormat) + } + return formatter.Format(defaultStatsTableFormat) + } + return formatter.Format(source) +} + +// NewStats returns a new Stats entity and sets in it the given name +func NewStats(container string) *Stats { + return &Stats{StatsEntry: StatsEntry{Container: container}} +} + +// statsFormatWrite renders the context for a list of containers statistics +func statsFormatWrite(ctx formatter.Context, Stats []StatsEntry, osType string, trunc bool) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, cstats := range Stats { + statsCtx := &statsContext{ + s: cstats, + os: osType, + trunc: trunc, + } + if err := format(statsCtx); err != nil { + return err + } + } + return nil + } + memUsage := memUseHeader + if osType == winOSType { + memUsage = winMemUseHeader + } + statsCtx := statsContext{} + statsCtx.Header = formatter.SubHeaderContext{ + "Container": containerHeader, + "Name": formatter.NameHeader, + "ID": formatter.ContainerIDHeader, + "CPUPerc": cpuPercHeader, + "MemUsage": memUsage, + "MemPerc": memPercHeader, + "NetIO": netIOHeader, + "BlockIO": blockIOHeader, + "PIDs": pidsHeader, + } + statsCtx.os = osType + return ctx.Write(&statsCtx, render) +} + +type statsContext struct { + formatter.HeaderContext + s StatsEntry + os string + trunc bool +} + +func (c *statsContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *statsContext) Container() string { + return c.s.Container +} + +func (c *statsContext) Name() string { + if len(c.s.Name) > 1 { + return c.s.Name[1:] + } + return "--" +} + +func (c *statsContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.s.ID) + } + return c.s.ID +} + +func (c *statsContext) CPUPerc() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) +} + +func (c *statsContext) MemUsage() string { + if c.s.IsInvalid { + return fmt.Sprintf("-- / --") + } + if c.os == winOSType { + return units.BytesSize(c.s.Memory) + } + return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) +} + +func (c *statsContext) MemPerc() string { + if c.s.IsInvalid || c.os == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) +} + +func (c *statsContext) NetIO() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) +} + +func (c *statsContext) BlockIO() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) +} + +func (c *statsContext) PIDs() string { + if c.s.IsInvalid || c.os == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%d", c.s.PidsCurrent) +} diff --git a/cli/cli/command/container/formatter_stats_test.go b/cli/cli/command/container/formatter_stats_test.go new file mode 100644 index 00000000..68e76625 --- /dev/null +++ b/cli/cli/command/container/formatter_stats_test.go @@ -0,0 +1,302 @@ +package container + +import ( + "bytes" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestContainerStatsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + + var ctx statsContext + tt := []struct { + stats StatsEntry + osType string + expValue string + expHeader string + call func() string + }{ + {StatsEntry{Container: containerID}, "", containerID, containerHeader, ctx.Container}, + {StatsEntry{CPUPercentage: 5.5}, "", "5.50%", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "", "--", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "", "0.31B / 12.3B", netIOHeader, ctx.NetIO}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "", "--", netIOHeader, ctx.NetIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "", "0.1B / 2.3B", blockIOHeader, ctx.BlockIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "", "--", blockIOHeader, ctx.BlockIO}, + {StatsEntry{MemoryPercentage: 10.2}, "", "10.20%", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "", "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2}, "windows", "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "", "24B / 30B", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "", "-- / --", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "windows", "24B", winMemUseHeader, ctx.MemUsage}, + {StatsEntry{PidsCurrent: 10}, "", "10", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "", "--", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10}, "windows", "--", pidsHeader, ctx.PIDs}, + } + + for _, te := range tt { + ctx = statsContext{s: te.stats, os: te.osType} + if v := te.call(); v != te.expValue { + t.Fatalf("Expected %q, got %q", te.expValue, v) + } + } +} + +func TestContainerStatsContextWrite(t *testing.T) { + tt := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + formatter.Context{Format: "table {{.MemUsage}}"}, + `MEM USAGE / LIMIT +20B / 20B +-- / -- +`, + }, + { + formatter.Context{Format: "{{.Container}} {{.ID}} {{.Name}}"}, + `container1 abcdef foo +container2 -- +`, + }, + { + formatter.Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + ID: "abcdef", + Name: "/foo", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + }, + } + var out bytes.Buffer + te.context.Output = &out + err := statsFormatWrite(te.context, stats, "linux", false) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Check(t, is.Equal(te.expected, out.String())) + } + } +} + +func TestContainerStatsContextWriteWindows(t *testing.T) { + tt := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{Format: "table {{.MemUsage}}"}, + `PRIV WORKING SET +20B +-- / -- +`, + }, + { + formatter.Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + { + formatter.Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, + `container1 -- -- +container2 -- -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + }, + } + var out bytes.Buffer + te.context.Output = &out + err := statsFormatWrite(te.context, stats, "windows", false) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Check(t, is.Equal(te.expected, out.String())) + } + } +} + +func TestContainerStatsContextWriteWithNoStats(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + formatter.Context{ + Format: "table {{.Container}}", + Output: &out, + }, + "CONTAINER\n", + }, + { + formatter.Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}", + Output: &out, + }, + "CONTAINER CPU %\n", + }, + } + + for _, context := range contexts { + statsFormatWrite(context.context, []StatsEntry{}, "linux", false) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} + +func TestContainerStatsContextWriteWithNoStatsWindows(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + formatter.Context{ + Format: "table {{.Container}}\t{{.MemUsage}}", + Output: &out, + }, + "CONTAINER PRIV WORKING SET\n", + }, + { + formatter.Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + Output: &out, + }, + "CONTAINER CPU % PRIV WORKING SET\n", + }, + } + + for _, context := range contexts { + statsFormatWrite(context.context, []StatsEntry{}, "windows", false) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} + +func TestContainerStatsContextWriteTrunc(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context formatter.Context + trunc bool + expected string + }{ + { + formatter.Context{ + Format: "{{.ID}}", + Output: &out, + }, + false, + "b95a83497c9161c9b444e3d70e1a9dfba0c1840d41720e146a95a08ebf938afc\n", + }, + { + formatter.Context{ + Format: "{{.ID}}", + Output: &out, + }, + true, + "b95a83497c91\n", + }, + } + + for _, context := range contexts { + statsFormatWrite(context.context, []StatsEntry{{ID: "b95a83497c9161c9b444e3d70e1a9dfba0c1840d41720e146a95a08ebf938afc"}}, "linux", context.trunc) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} diff --git a/cli/cli/command/container/hijack.go b/cli/cli/command/container/hijack.go new file mode 100644 index 00000000..78fbebe0 --- /dev/null +++ b/cli/cli/command/container/hijack.go @@ -0,0 +1,208 @@ +package container + +import ( + "context" + "fmt" + "io" + "runtime" + "sync" + + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/sirupsen/logrus" +) + +// The default escape key sequence: ctrl-p, ctrl-q +// TODO: This could be moved to `pkg/term`. +var defaultEscapeKeys = []byte{16, 17} + +// A hijackedIOStreamer handles copying input to and output from streams to the +// connection. +type hijackedIOStreamer struct { + streams command.Streams + inputStream io.ReadCloser + outputStream io.Writer + errorStream io.Writer + + resp types.HijackedResponse + + tty bool + detachKeys string +} + +// stream handles setting up the IO and then begins streaming stdin/stdout +// to/from the hijacked connection, blocking until it is either done reading +// output, the user inputs the detach key sequence when in TTY mode, or when +// the given context is cancelled. +func (h *hijackedIOStreamer) stream(ctx context.Context) error { + restoreInput, err := h.setupInput() + if err != nil { + return fmt.Errorf("unable to setup input stream: %s", err) + } + + defer restoreInput() + + outputDone := h.beginOutputStream(restoreInput) + inputDone, detached := h.beginInputStream(restoreInput) + + select { + case err := <-outputDone: + return err + case <-inputDone: + // Input stream has closed. + if h.outputStream != nil || h.errorStream != nil { + // Wait for output to complete streaming. + select { + case err := <-outputDone: + return err + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + case err := <-detached: + // Got a detach key sequence. + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +func (h *hijackedIOStreamer) setupInput() (restore func(), err error) { + if h.inputStream == nil || !h.tty { + // No need to setup input TTY. + // The restore func is a nop. + return func() {}, nil + } + + if err := setRawTerminal(h.streams); err != nil { + return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err) + } + + // Use sync.Once so we may call restore multiple times but ensure we + // only restore the terminal once. + var restoreOnce sync.Once + restore = func() { + restoreOnce.Do(func() { + restoreTerminal(h.streams, h.inputStream) + }) + } + + // Wrap the input to detect detach escape sequence. + // Use default escape keys if an invalid sequence is given. + escapeKeys := defaultEscapeKeys + if h.detachKeys != "" { + customEscapeKeys, err := term.ToBytes(h.detachKeys) + if err != nil { + logrus.Warnf("invalid detach escape keys, using default: %s", err) + } else { + escapeKeys = customEscapeKeys + } + } + + h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close) + + return restore, nil +} + +func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error { + if h.outputStream == nil && h.errorStream == nil { + // There is no need to copy output. + return nil + } + + outputDone := make(chan error) + go func() { + var err error + + // When TTY is ON, use regular copy + if h.outputStream != nil && h.tty { + _, err = io.Copy(h.outputStream, h.resp.Reader) + // We should restore the terminal as soon as possible + // once the connection ends so any following print + // messages will be in normal type. + restoreInput() + } else { + _, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader) + } + + logrus.Debug("[hijack] End of stdout") + + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + } + + outputDone <- err + }() + + return outputDone +} + +func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) { + inputDone := make(chan struct{}) + detached := make(chan error) + + go func() { + if h.inputStream != nil { + _, err := io.Copy(h.resp.Conn, h.inputStream) + // We should restore the terminal as soon as possible + // once the connection ends so any following print + // messages will be in normal type. + restoreInput() + + logrus.Debug("[hijack] End of stdin") + + if _, ok := err.(term.EscapeError); ok { + detached <- err + return + } + + if err != nil { + // This error will also occur on the receive + // side (from stdout) where it will be + // propagated back to the caller. + logrus.Debugf("Error sendStdin: %s", err) + } + } + + if err := h.resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + + close(inputDone) + }() + + return inputDone, detached +} + +func setRawTerminal(streams command.Streams) error { + if err := streams.In().SetRawTerminal(); err != nil { + return err + } + return streams.Out().SetRawTerminal() +} + +// nolint: unparam +func restoreTerminal(streams command.Streams, in io.Closer) error { + streams.In().RestoreTerminal() + streams.Out().RestoreTerminal() + // WARNING: DO NOT REMOVE THE OS CHECKS !!! + // For some reason this Close call blocks on darwin.. + // As the client exits right after, simply discard the close + // until we find a better solution. + // + // This can also cause the client on Windows to get stuck in Win32 CloseHandle() + // in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442 + // Tracked internally at Microsoft by VSO #11352156. In the + // Windows case, you hit this if you are using the native/v2 console, + // not the "legacy" console, and you start the client in a new window. eg + // `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar` + // will hang. Remove start, and it won't repro. + if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" { + return in.Close() + } + return nil +} diff --git a/cli/cli/command/container/inspect.go b/cli/cli/command/container/inspect.go new file mode 100644 index 00000000..4f50e2a0 --- /dev/null +++ b/cli/cli/command/container/inspect.go @@ -0,0 +1,47 @@ +package container + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + size bool + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker container inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Display detailed information on one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ContainerInspectWithRaw(ctx, ref, opts.size) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/cli/cli/command/container/kill.go b/cli/cli/command/container/kill.go new file mode 100644 index 00000000..feedbc01 --- /dev/null +++ b/cli/cli/command/container/kill.go @@ -0,0 +1,56 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type killOptions struct { + signal string + + containers []string +} + +// NewKillCommand creates a new cobra.Command for `docker kill` +func NewKillCommand(dockerCli command.Cli) *cobra.Command { + var opts killOptions + + cmd := &cobra.Command{ + Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Kill one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runKill(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") + return cmd +} + +func runKill(dockerCli command.Cli, opts *killOptions) error { + var errs []string + ctx := context.Background() + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + return dockerCli.Client().ContainerKill(ctx, container, opts.signal) + }) + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintln(dockerCli.Out(), name) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/list.go b/cli/cli/command/container/list.go new file mode 100644 index 00000000..a79507e7 --- /dev/null +++ b/cli/cli/command/container/list.go @@ -0,0 +1,140 @@ +package container + +import ( + "context" + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type psOptions struct { + quiet bool + size bool + all bool + noTrunc bool + nLatest bool + last int + format string + filter opts.FilterOpt +} + +// NewPsCommand creates a new cobra.Command for `docker ps` +func NewPsCommand(dockerCli command.Cli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS]", + Short: "List containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPs(dockerCli, &options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display numeric IDs") + flags.BoolVarP(&options.size, "size", "s", false, "Display total file sizes") + flags.BoolVarP(&options.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVarP(&options.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") + flags.IntVarP(&options.last, "last", "n", -1, "Show n last created containers (includes all states)") + flags.StringVarP(&options.format, "format", "", "", "Pretty-print containers using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewPsCommand(dockerCli) + cmd.Aliases = []string{"ps", "list"} + cmd.Use = "ls [OPTIONS]" + return &cmd +} + +// listOptionsProcessor is used to set any container list options which may only +// be embedded in the format template. +// This is passed directly into tmpl.Execute in order to allow the preprocessor +// to set any list options that were not provided by flags (e.g. `.Size`). +// It is using a `map[string]bool` so that unknown fields passed into the +// template format do not cause errors. These errors will get picked up when +// running through the actual template processor. +type listOptionsProcessor map[string]bool + +// Size sets the size of the map when called by a template execution. +func (o listOptionsProcessor) Size() bool { + o["size"] = true + return true +} + +// Label is needed here as it allows the correct pre-processing +// because Label() is a method with arguments +func (o listOptionsProcessor) Label(name string) string { + return "" +} + +func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { + options := &types.ContainerListOptions{ + All: opts.all, + Limit: opts.last, + Size: opts.size, + Filters: opts.filter.Value(), + } + + if opts.nLatest && opts.last == -1 { + options.Limit = 1 + } + + tmpl, err := templates.Parse(opts.format) + + if err != nil { + return nil, err + } + + optionsProcessor := listOptionsProcessor{} + // This shouldn't error out but swallowing the error makes it harder + // to track down if preProcessor issues come up. Ref #24696 + if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { + return nil, err + } + // At the moment all we need is to capture .Size for preprocessor + options.Size = opts.size || optionsProcessor["size"] + + return options, nil +} + +func runPs(dockerCli command.Cli, options *psOptions) error { + ctx := context.Background() + + listOptions, err := buildContainerListOptions(options) + if err != nil { + return err + } + + containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PsFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().PsFormat + } else { + format = formatter.TableFormatKey + } + } + + containerCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewContainerFormat(format, options.quiet, listOptions.Size), + Trunc: !options.noTrunc, + } + return formatter.ContainerWrite(containerCtx, containers) +} diff --git a/cli/cli/command/container/list_test.go b/cli/cli/command/container/list_test.go new file mode 100644 index 00000000..2bc1949a --- /dev/null +++ b/cli/cli/command/container/list_test.go @@ -0,0 +1,164 @@ +package container + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestContainerListErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + containerListFunc func(types.ContainerListOptions) ([]types.Container, error) + expectedError string + }{ + { + flags: map[string]string{ + "format": "{{invalid}}", + }, + expectedError: `function "invalid" not defined`, + }, + { + flags: map[string]string{ + "format": "{{join}}", + }, + expectedError: `wrong number of args for join`, + }, + { + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return nil, fmt.Errorf("error listing containers") + }, + expectedError: "error listing containers", + }, + } + for _, tc := range testCases { + cmd := newListCommand( + test.NewFakeCli(&fakeClient{ + containerListFunc: tc.containerListFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestContainerListWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1"), + *Container("c2", WithName("foo")), + *Container("c3", WithPort(80, 80, TCP), WithPort(81, 81, TCP), WithPort(82, 82, TCP)), + *Container("c4", WithPort(81, 81, UDP)), + *Container("c5", WithPort(82, 82, IP("8.8.8.8"), TCP)), + }, nil + }, + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-without-format.golden") +} + +func TestContainerListNoTrunc(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1"), + *Container("c2", WithName("foo/bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("no-trunc", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-without-format-no-trunc.golden") +} + +// Test for GitHub issue docker/docker#21772 +func TestContainerListNamesMultipleTime(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1"), + *Container("c2", WithName("foo/bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{.Names}} {{.Names}}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-format-name-name.golden") +} + +// Test for GitHub issue docker/docker#30291 +func TestContainerListFormatTemplateWithArg(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1", WithLabel("some.label", "value")), + *Container("c2", WithName("foo/bar"), WithLabel("foo", "bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", `{{.Names}} {{.Label "some.label"}}`) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-format-with-arg.golden") +} + +func TestContainerListFormatSizeSetsOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(options types.ContainerListOptions) ([]types.Container, error) { + assert.Check(t, options.Size) + return []types.Container{}, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", `{{.Size}}`) + assert.NilError(t, cmd.Execute()) +} + +func TestContainerListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1", WithLabel("some.label", "value")), + *Container("c2", WithName("foo/bar"), WithLabel("foo", "bar")), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + PsFormat: "{{ .Names }} {{ .Image }} {{ .Labels }}", + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-with-config-format.golden") +} + +func TestContainerListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + containerListFunc: func(_ types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + *Container("c1", WithLabel("some.label", "value")), + *Container("c2", WithName("foo/bar"), WithLabel("foo", "bar")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{ .Names }} {{ .Image }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "container-list-with-format.golden") +} diff --git a/cli/cli/command/container/logs.go b/cli/cli/command/container/logs.go new file mode 100644 index 00000000..d9bc73ba --- /dev/null +++ b/cli/cli/command/container/logs.go @@ -0,0 +1,80 @@ +package container + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +type logsOptions struct { + follow bool + since string + until string + timestamps bool + details bool + tail string + + container string +} + +// NewLogsCommand creates a new cobra.Command for `docker logs` +func NewLogsCommand(dockerCli command.Cli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] CONTAINER", + Short: "Fetch the logs of a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runLogs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.StringVar(&opts.until, "until", "", "Show logs before a timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.SetAnnotation("until", "version", []string{"1.35"}) + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli command.Cli, opts *logsOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Until: opts.until, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + } else { + _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) + } + return err +} diff --git a/cli/cli/command/container/logs_test.go b/cli/cli/command/container/logs_test.go new file mode 100644 index 00000000..a618ad5e --- /dev/null +++ b/cli/cli/command/container/logs_test.go @@ -0,0 +1,62 @@ +package container + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +var logFn = func(expectedOut string) func(string, types.ContainerLogsOptions) (io.ReadCloser, error) { + return func(container string, opts types.ContainerLogsOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader(expectedOut)), nil + } +} + +func TestRunLogs(t *testing.T) { + inspectFn := func(containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{ + Config: &container.Config{Tty: true}, + ContainerJSONBase: &types.ContainerJSONBase{State: &types.ContainerState{Running: false}}, + }, nil + } + + var testcases = []struct { + doc string + options *logsOptions + client fakeClient + expectedError string + expectedOut string + expectedErr string + }{ + { + doc: "successful logs", + expectedOut: "foo", + options: &logsOptions{}, + client: fakeClient{logFunc: logFn("foo"), inspectFunc: inspectFn}, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + cli := test.NewFakeCli(&testcase.client) + + err := runLogs(cli, testcase.options) + if testcase.expectedError != "" { + assert.ErrorContains(t, err, testcase.expectedError) + } else { + if !assert.Check(t, err) { + return + } + } + assert.Check(t, is.Equal(testcase.expectedOut, cli.OutBuffer().String())) + assert.Check(t, is.Equal(testcase.expectedErr, cli.ErrBuffer().String())) + }) + } +} diff --git a/cli/cli/command/container/opts.go b/cli/cli/command/container/opts.go new file mode 100644 index 00000000..19a2d158 --- /dev/null +++ b/cli/cli/command/container/opts.go @@ -0,0 +1,1034 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +var ( + deviceCgroupRuleRegexp = regexp.MustCompile(`^[acb] ([0-9]+|\*):([0-9]+|\*) [rwm]{1,3}$`) +) + +// containerOptions is a data object with all the options for creating a container +type containerOptions struct { + attach opts.ListOpts + volumes opts.ListOpts + tmpfs opts.ListOpts + mounts opts.MountOpt + blkioWeightDevice opts.WeightdeviceOpt + deviceReadBps opts.ThrottledeviceOpt + deviceWriteBps opts.ThrottledeviceOpt + links opts.ListOpts + aliases opts.ListOpts + linkLocalIPs opts.ListOpts + deviceReadIOps opts.ThrottledeviceOpt + deviceWriteIOps opts.ThrottledeviceOpt + env opts.ListOpts + labels opts.ListOpts + deviceCgroupRules opts.ListOpts + devices opts.ListOpts + gpus opts.GpuOpts + ulimits *opts.UlimitOpt + sysctls *opts.MapOpts + publish opts.ListOpts + expose opts.ListOpts + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOptions opts.ListOpts + extraHosts opts.ListOpts + volumesFrom opts.ListOpts + envFile opts.ListOpts + capAdd opts.ListOpts + capDrop opts.ListOpts + groupAdd opts.ListOpts + securityOpt opts.ListOpts + storageOpt opts.ListOpts + labelsFile opts.ListOpts + loggingOpts opts.ListOpts + privileged bool + pidMode string + utsMode string + usernsMode string + publishAll bool + stdin bool + tty bool + oomKillDisable bool + oomScoreAdj int + containerIDFile string + entrypoint string + hostname string + domainname string + memory opts.MemBytes + memoryReservation opts.MemBytes + memorySwap opts.MemSwapBytes + kernelMemory opts.MemBytes + user string + workingDir string + cpuCount int64 + cpuShares int64 + cpuPercent int64 + cpuPeriod int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpuQuota int64 + cpus opts.NanoCPUs + cpusetCpus string + cpusetMems string + blkioWeight uint16 + ioMaxBandwidth opts.MemBytes + ioMaxIOps uint64 + swappiness int64 + netMode opts.NetworkOpt + macAddress string + ipv4Address string + ipv6Address string + ipcMode string + pidsLimit int64 + restartPolicy string + readonlyRootfs bool + loggingDriver string + cgroupParent string + volumeDriver string + stopSignal string + stopTimeout int + isolation string + shmSize opts.MemBytes + noHealthcheck bool + healthCmd string + healthInterval time.Duration + healthTimeout time.Duration + healthStartPeriod time.Duration + healthRetries int + runtime string + autoRemove bool + init bool + + Image string + Args []string +} + +// addFlags adds all command line flags that will be used by parse to the FlagSet +func addFlags(flags *pflag.FlagSet) *containerOptions { + copts := &containerOptions{ + aliases: opts.NewListOpts(nil), + attach: opts.NewListOpts(validateAttach), + blkioWeightDevice: opts.NewWeightdeviceOpt(opts.ValidateWeightDevice), + capAdd: opts.NewListOpts(nil), + capDrop: opts.NewListOpts(nil), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOptions: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + deviceCgroupRules: opts.NewListOpts(validateDeviceCgroupRule), + deviceReadBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), + deviceReadIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), + deviceWriteBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), + deviceWriteIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), + devices: opts.NewListOpts(nil), // devices can only be validated after we know the server OS + env: opts.NewListOpts(opts.ValidateEnv), + envFile: opts.NewListOpts(nil), + expose: opts.NewListOpts(nil), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + groupAdd: opts.NewListOpts(nil), + labels: opts.NewListOpts(opts.ValidateLabel), + labelsFile: opts.NewListOpts(nil), + linkLocalIPs: opts.NewListOpts(nil), + links: opts.NewListOpts(opts.ValidateLink), + loggingOpts: opts.NewListOpts(nil), + publish: opts.NewListOpts(nil), + securityOpt: opts.NewListOpts(nil), + storageOpt: opts.NewListOpts(nil), + sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), + tmpfs: opts.NewListOpts(nil), + ulimits: opts.NewUlimitOpt(nil), + volumes: opts.NewListOpts(nil), + volumesFrom: opts.NewListOpts(nil), + } + + // General purpose flags + flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") + flags.Var(&copts.deviceCgroupRules, "device-cgroup-rule", "Add a rule to the cgroup allowed devices list") + flags.Var(&copts.devices, "device", "Add a host device to the container") + flags.Var(&copts.gpus, "gpus", "GPU devices to add to the container ('all' to pass all GPUs)") + flags.SetAnnotation("gpus", "version", []string{"1.40"}) + flags.VarP(&copts.env, "env", "e", "Set environment variables") + flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") + flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") + flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") + flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") + flags.StringVar(&copts.domainname, "domainname", "", "Container NIS domain name") + flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") + flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") + flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") + flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") + flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, "Signal to stop a container") + flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") + flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) + flags.Var(copts.sysctls, "sysctl", "Sysctl options") + flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.Var(copts.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") + flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") + flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") + + // Security + flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") + flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") + flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") + flags.Var(&copts.securityOpt, "security-opt", "Security Options") + flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") + + // Network and port publishing flag + flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.Var(&copts.dns, "dns", "Set custom DNS servers") + // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. + // This is to be consistent with service create/update + flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") + flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") + flags.MarkHidden("dns-opt") + flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") + flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") + flags.StringVar(&copts.ipv4Address, "ip", "", "IPv4 address (e.g., 172.30.100.104)") + flags.StringVar(&copts.ipv6Address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") + flags.Var(&copts.links, "link", "Add link to another container") + flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") + flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g., 92:d0:c6:0a:29:33)") + flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") + flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") + // We allow for both "--net" and "--network", although the latter is the recommended way. + flags.Var(&copts.netMode, "net", "Connect a container to a network") + flags.Var(&copts.netMode, "network", "Connect a container to a network") + flags.MarkHidden("net") + // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. + flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") + flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") + flags.MarkHidden("net-alias") + + // Logging and storage + flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") + flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") + flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") + flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") + flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") + flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") + flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") + flags.Var(&copts.mounts, "mount", "Attach a filesystem mount to the container") + + // Health-checking + flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") + flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ms|s|m|h) (default 0s)") + flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") + flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ms|s|m|h) (default 0s)") + flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)") + flags.SetAnnotation("health-start-period", "version", []string{"1.29"}) + flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") + + // Resource management + flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") + flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") + flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") + flags.SetAnnotation("cpu-count", "ostype", []string{"windows"}) + flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") + flags.SetAnnotation("cpu-percent", "ostype", []string{"windows"}) + flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") + flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) + flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") + flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) + flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Var(&copts.cpus, "cpus", "Number of CPUs") + flags.SetAnnotation("cpus", "version", []string{"1.25"}) + flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") + flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") + flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") + flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") + flags.Var(&copts.ioMaxBandwidth, "io-maxbandwidth", "Maximum IO bandwidth limit for the system drive (Windows only)") + flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"}) + flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") + flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"}) + flags.Var(&copts.kernelMemory, "kernel-memory", "Kernel memory limit") + flags.VarP(&copts.memory, "memory", "m", "Memory limit") + flags.Var(&copts.memoryReservation, "memory-reservation", "Memory soft limit") + flags.Var(&copts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") + flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") + flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") + flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + + // Low-level execution (cgroups, namespaces, ...) + flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&copts.ipcMode, "ipc", "", "IPC mode to use") + flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") + flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") + flags.Var(&copts.shmSize, "shm-size", "Size of /dev/shm") + flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") + flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") + + flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") + flags.SetAnnotation("init", "version", []string{"1.25"}) + return copts +} + +type containerConfig struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *networktypes.NetworkingConfig +} + +// parse parses the args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +// nolint: gocyclo +func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*containerConfig, error) { + var ( + attachStdin = copts.attach.Get("stdin") + attachStdout = copts.attach.Get("stdout") + attachStderr = copts.attach.Get("stderr") + ) + + // Validate the input mac address + if copts.macAddress != "" { + if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil { + return nil, errors.Errorf("%s is not a valid mac address", copts.macAddress) + } + } + if copts.stdin { + attachStdin = true + } + // If -a is not set, attach to stdout and stderr + if copts.attach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + swappiness := copts.swappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, errors.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + mounts := copts.mounts.Value() + if len(mounts) > 0 && copts.volumeDriver != "" { + logrus.Warn("`--volume-driver` is ignored for volumes specified via `--mount`. Use `--mount type=volume,volume-driver=...` instead.") + } + var binds []string + volumes := copts.volumes.GetMap() + // add any bind targets to the list of container volumes + for bind := range copts.volumes.GetMap() { + parsed, _ := loader.ParseVolume(bind) + if parsed.Source != "" { + // after creating the bind mount we want to delete it from the copts.volumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if + // there are duplicates entries. + delete(volumes, bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range copts.tmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + ) + + if len(copts.Args) > 0 { + runCmd = strslice.StrSlice(copts.Args) + } + + if copts.entrypoint != "" { + entrypoint = strslice.StrSlice{copts.entrypoint} + } else if flags.Changed("entrypoint") { + // if `--entrypoint=` is parsed then Entrypoint is reset + entrypoint = []string{""} + } + + publishOpts := copts.publish.GetAll() + var ports map[nat.Port]struct{} + var portBindings map[nat.Port][]nat.PortBinding + + ports, portBindings, err = nat.ParsePortSpecs(publishOpts) + + // If simple port parsing fails try to parse as long format + if err != nil { + publishOpts, err = parsePortOpts(publishOpts) + if err != nil { + return nil, err + } + + ports, portBindings, err = nat.ParsePortSpecs(publishOpts) + + if err != nil { + return nil, err + } + } + + // Merge in exposed ports to the map of published ports + for _, e := range copts.expose.GetAll() { + if strings.Contains(e, ":") { + return nil, errors.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, errors.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // validate and parse device mappings. Note we do late validation of the + // device path (as opposed to during flag parsing), as at the time we are + // parsing flags, we haven't yet sent a _ping to the daemon to determine + // what operating system it is. + deviceMappings := []container.DeviceMapping{} + for _, device := range copts.devices.GetAll() { + var ( + validated string + deviceMapping container.DeviceMapping + err error + ) + validated, err = validateDevice(device, serverOS) + if err != nil { + return nil, err + } + deviceMapping, err = parseDevice(validated, serverOS) + if err != nil { + return nil, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := opts.ReadKVEnvStrings(copts.envFile.GetAll(), copts.env.GetAll()) + if err != nil { + return nil, err + } + + // collect all the labels for the container + labels, err := opts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) + if err != nil { + return nil, err + } + + pidMode := container.PidMode(copts.pidMode) + if !pidMode.Valid() { + return nil, errors.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(copts.utsMode) + if !utsMode.Valid() { + return nil, errors.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(copts.usernsMode) + if !usernsMode.Valid() { + return nil, errors.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := opts.ParseRestartPolicy(copts.restartPolicy) + if err != nil { + return nil, err + } + + loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) + if err != nil { + return nil, err + } + + securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) + if err != nil { + return nil, err + } + + securityOpts, maskedPaths, readonlyPaths := parseSystemPaths(securityOpts) + + storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) + if err != nil { + return nil, err + } + + // Healthcheck + var healthConfig *container.HealthConfig + haveHealthSettings := copts.healthCmd != "" || + copts.healthInterval != 0 || + copts.healthTimeout != 0 || + copts.healthStartPeriod != 0 || + copts.healthRetries != 0 + if copts.noHealthcheck { + if haveHealthSettings { + return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options") + } + test := strslice.StrSlice{"NONE"} + healthConfig = &container.HealthConfig{Test: test} + } else if haveHealthSettings { + var probe strslice.StrSlice + if copts.healthCmd != "" { + args := []string{"CMD-SHELL", copts.healthCmd} + probe = strslice.StrSlice(args) + } + if copts.healthInterval < 0 { + return nil, errors.Errorf("--health-interval cannot be negative") + } + if copts.healthTimeout < 0 { + return nil, errors.Errorf("--health-timeout cannot be negative") + } + if copts.healthRetries < 0 { + return nil, errors.Errorf("--health-retries cannot be negative") + } + if copts.healthStartPeriod < 0 { + return nil, fmt.Errorf("--health-start-period cannot be negative") + } + + healthConfig = &container.HealthConfig{ + Test: probe, + Interval: copts.healthInterval, + Timeout: copts.healthTimeout, + StartPeriod: copts.healthStartPeriod, + Retries: copts.healthRetries, + } + } + + resources := container.Resources{ + CgroupParent: copts.cgroupParent, + Memory: copts.memory.Value(), + MemoryReservation: copts.memoryReservation.Value(), + MemorySwap: copts.memorySwap.Value(), + MemorySwappiness: &copts.swappiness, + KernelMemory: copts.kernelMemory.Value(), + OomKillDisable: &copts.oomKillDisable, + NanoCPUs: copts.cpus.Value(), + CPUCount: copts.cpuCount, + CPUPercent: copts.cpuPercent, + CPUShares: copts.cpuShares, + CPUPeriod: copts.cpuPeriod, + CpusetCpus: copts.cpusetCpus, + CpusetMems: copts.cpusetMems, + CPUQuota: copts.cpuQuota, + CPURealtimePeriod: copts.cpuRealtimePeriod, + CPURealtimeRuntime: copts.cpuRealtimeRuntime, + PidsLimit: &copts.pidsLimit, + BlkioWeight: copts.blkioWeight, + BlkioWeightDevice: copts.blkioWeightDevice.GetList(), + BlkioDeviceReadBps: copts.deviceReadBps.GetList(), + BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), + BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), + BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), + IOMaximumIOps: copts.ioMaxIOps, + IOMaximumBandwidth: uint64(copts.ioMaxBandwidth), + Ulimits: copts.ulimits.GetList(), + DeviceCgroupRules: copts.deviceCgroupRules.GetAll(), + Devices: deviceMappings, + DeviceRequests: copts.gpus.Value(), + } + + config := &container.Config{ + Hostname: copts.hostname, + Domainname: copts.domainname, + ExposedPorts: ports, + User: copts.user, + Tty: copts.tty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: copts.stdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: copts.Image, + Volumes: volumes, + MacAddress: copts.macAddress, + Entrypoint: entrypoint, + WorkingDir: copts.workingDir, + Labels: opts.ConvertKVStringsToMap(labels), + Healthcheck: healthConfig, + } + if flags.Changed("stop-signal") { + config.StopSignal = copts.stopSignal + } + if flags.Changed("stop-timeout") { + config.StopTimeout = &copts.stopTimeout + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: copts.containerIDFile, + OomScoreAdj: copts.oomScoreAdj, + AutoRemove: copts.autoRemove, + Privileged: copts.privileged, + PortBindings: portBindings, + Links: copts.links.GetAll(), + PublishAllPorts: copts.publishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: copts.dns.GetAllOrEmpty(), + DNSSearch: copts.dnsSearch.GetAllOrEmpty(), + DNSOptions: copts.dnsOptions.GetAllOrEmpty(), + ExtraHosts: copts.extraHosts.GetAll(), + VolumesFrom: copts.volumesFrom.GetAll(), + IpcMode: container.IpcMode(copts.ipcMode), + NetworkMode: container.NetworkMode(copts.netMode.NetworkMode()), + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), + CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), + GroupAdd: copts.groupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + StorageOpt: storageOpts, + ReadonlyRootfs: copts.readonlyRootfs, + LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, + VolumeDriver: copts.volumeDriver, + Isolation: container.Isolation(copts.isolation), + ShmSize: copts.shmSize.Value(), + Resources: resources, + Tmpfs: tmpfs, + Sysctls: copts.sysctls.GetAll(), + Runtime: copts.runtime, + Mounts: mounts, + MaskedPaths: maskedPaths, + ReadonlyPaths: readonlyPaths, + } + + if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, errors.Errorf("Conflicting options: --restart and --rm") + } + + // only set this value if the user provided the flag, else it should default to nil + if flags.Changed("init") { + hostConfig.Init = &copts.init + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + networkingConfig.EndpointsConfig, err = parseNetworkOpts(copts) + if err != nil { + return nil, err + } + + return &containerConfig{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + }, nil +} + +// parseNetworkOpts converts --network advanced options to endpoint-specs, and combines +// them with the old --network-alias and --links. If returns an error if conflicting options +// are found. +// +// this function may return _multiple_ endpoints, which is not currently supported +// by the daemon, but may be in future; it's up to the daemon to produce an error +// in case that is not supported. +func parseNetworkOpts(copts *containerOptions) (map[string]*networktypes.EndpointSettings, error) { + var ( + endpoints = make(map[string]*networktypes.EndpointSettings, len(copts.netMode.Value())) + hasUserDefined, hasNonUserDefined bool + ) + + for i, n := range copts.netMode.Value() { + if container.NetworkMode(n.Target).IsUserDefined() { + hasUserDefined = true + } else { + hasNonUserDefined = true + } + if i == 0 { + // The first network corresponds with what was previously the "only" + // network, and what would be used when using the non-advanced syntax + // `--network-alias`, `--link`, `--ip`, `--ip6`, and `--link-local-ip` + // are set on this network, to preserve backward compatibility with + // the non-advanced notation + if err := applyContainerOptions(&n, copts); err != nil { + return nil, err + } + } + ep, err := parseNetworkAttachmentOpt(n) + if err != nil { + return nil, err + } + if _, ok := endpoints[n.Target]; ok { + return nil, errdefs.InvalidParameter(errors.Errorf("network %q is specified multiple times", n.Target)) + } + + // For backward compatibility: if no custom options are provided for the network, + // and only a single network is specified, omit the endpoint-configuration + // on the client (the daemon will still create it when creating the container) + if i == 0 && len(copts.netMode.Value()) == 1 { + if ep == nil || reflect.DeepEqual(*ep, networktypes.EndpointSettings{}) { + continue + } + } + endpoints[n.Target] = ep + } + if hasUserDefined && hasNonUserDefined { + return nil, errdefs.InvalidParameter(errors.New("conflicting options: cannot attach both user-defined and non-user-defined network-modes")) + } + return endpoints, nil +} + +func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOptions) error { + // TODO should copts.MacAddress actually be set on the first network? (currently it's not) + // TODO should we error if _any_ advanced option is used? (i.e. forbid to combine advanced notation with the "old" flags (`--network-alias`, `--link`, `--ip`, `--ip6`)? + if len(n.Aliases) > 0 && copts.aliases.Len() > 0 { + return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --network-alias and per-network alias")) + } + if len(n.Links) > 0 && copts.links.Len() > 0 { + return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --link and per-network links")) + } + if copts.aliases.Len() > 0 { + n.Aliases = make([]string, copts.aliases.Len()) + copy(n.Aliases, copts.aliases.GetAll()) + } + if copts.links.Len() > 0 { + n.Links = make([]string, copts.links.Len()) + copy(n.Links, copts.links.GetAll()) + } + + // TODO add IPv4/IPv6 options to the csv notation for --network, and error-out in case of conflicting options + n.IPv4Address = copts.ipv4Address + n.IPv6Address = copts.ipv6Address + + // TODO should linkLocalIPs be added to the _first_ network only, or to _all_ networks? (should this be a per-network option as well?) + if copts.linkLocalIPs.Len() > 0 { + n.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) + copy(n.LinkLocalIPs, copts.linkLocalIPs.GetAll()) + } + return nil +} + +func parseNetworkAttachmentOpt(ep opts.NetworkAttachmentOpts) (*networktypes.EndpointSettings, error) { + if strings.TrimSpace(ep.Target) == "" { + return nil, errors.New("no name set for network") + } + if !container.NetworkMode(ep.Target).IsUserDefined() { + if len(ep.Aliases) > 0 { + return nil, errors.New("network-scoped aliases are only supported for user-defined networks") + } + if len(ep.Links) > 0 { + return nil, errors.New("links are only supported for user-defined networks") + } + } + + epConfig := &networktypes.EndpointSettings{} + epConfig.Aliases = append(epConfig.Aliases, ep.Aliases...) + if len(ep.DriverOpts) > 0 { + epConfig.DriverOpts = make(map[string]string) + epConfig.DriverOpts = ep.DriverOpts + } + if len(ep.Links) > 0 { + epConfig.Links = ep.Links + } + if ep.IPv4Address != "" || ep.IPv6Address != "" || len(ep.LinkLocalIPs) > 0 { + epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: ep.IPv4Address, + IPv6Address: ep.IPv6Address, + LinkLocalIPs: ep.LinkLocalIPs, + } + } + return epConfig, nil +} + +func parsePortOpts(publishOpts []string) ([]string, error) { + optsList := []string{} + for _, publish := range publishOpts { + params := map[string]string{"protocol": "tcp"} + for _, param := range strings.Split(publish, ",") { + opt := strings.Split(param, "=") + if len(opt) < 2 { + return optsList, errors.Errorf("invalid publish opts format (should be name=value but got '%s')", param) + } + + params[opt[0]] = opt[1] + } + optsList = append(optsList, fmt.Sprintf("%s:%s/%s", params["target"], params["published"], params["protocol"])) + } + return optsList, nil +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := opts.ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, errors.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, errors.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// parseSystemPaths checks if `systempaths=unconfined` security option is set, +// and returns the `MaskedPaths` and `ReadonlyPaths` accordingly. An updated +// list of security options is returned with this option removed, because the +// `unconfined` option is handled client-side, and should not be sent to the +// daemon. +func parseSystemPaths(securityOpts []string) (filtered, maskedPaths, readonlyPaths []string) { + filtered = securityOpts[:0] + for _, opt := range securityOpts { + if opt == "systempaths=unconfined" { + maskedPaths = []string{} + readonlyPaths = []string{} + } else { + filtered = append(filtered, opt) + } + } + + return filtered, maskedPaths, readonlyPaths +} + +// parses storage options per container into a map +func parseStorageOpts(storageOpts []string) (map[string]string, error) { + m := make(map[string]string) + for _, option := range storageOpts { + if strings.Contains(option, "=") { + opt := strings.SplitN(option, "=", 2) + m[opt[0]] = opt[1] + } else { + return nil, errors.Errorf("invalid storage option") + } + } + return m, nil +} + +// parseDevice parses a device mapping string to a container.DeviceMapping struct +func parseDevice(device, serverOS string) (container.DeviceMapping, error) { + switch serverOS { + case "linux": + return parseLinuxDevice(device) + case "windows": + return parseWindowsDevice(device) + } + return container.DeviceMapping{}, errors.Errorf("unknown server OS: %s", serverOS) +} + +// parseLinuxDevice parses a device mapping string to a container.DeviceMapping struct +// knowing that the target is a Linux daemon +func parseLinuxDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if validDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, errors.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// parseWindowsDevice parses a device mapping string to a container.DeviceMapping struct +// knowing that the target is a Windows daemon +func parseWindowsDevice(device string) (container.DeviceMapping, error) { + return container.DeviceMapping{PathOnHost: device}, nil +} + +// validateDeviceCgroupRule validates a device cgroup rule string format +// It will make sure 'val' is in the form: +// 'type major:minor mode' +func validateDeviceCgroupRule(val string) (string, error) { + if deviceCgroupRuleRegexp.MatchString(val) { + return val, nil + } + + return val, errors.Errorf("invalid device cgroup format '%s'", val) +} + +// validDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func validDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// validateDevice validates a path for devices +func validateDevice(val string, serverOS string) (string, error) { + switch serverOS { + case "linux": + return validateLinuxPath(val, validDeviceMode) + case "windows": + // Windows does validation entirely server-side + return val, nil + } + return "", errors.Errorf("unknown server OS: %s", serverOS) +} + +// validateLinuxPath is the implementation of validateDevice knowing that the +// target server operating system is a Linux daemon. +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func validateLinuxPath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, errors.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, errors.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, errors.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, errors.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// validateAttach validates that the specified string is a valid attach option. +func validateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, errors.Errorf("valid streams are STDIN, STDOUT and STDERR") +} + +func validateAPIVersion(c *containerConfig, serverAPIVersion string) error { + for _, m := range c.HostConfig.Mounts { + if m.BindOptions != nil && m.BindOptions.NonRecursive && versions.LessThan(serverAPIVersion, "1.40") { + return errors.Errorf("bind-nonrecursive requires API v1.40 or later") + } + } + return nil +} diff --git a/cli/cli/command/container/opts_test.go b/cli/cli/command/container/opts_test.go new file mode 100644 index 00000000..a64b7c6a --- /dev/null +++ b/cli/cli/command/container/opts_test.go @@ -0,0 +1,856 @@ +package container + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := validateAttach("invalid"); err == nil { + t.Fatal("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := validateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + flags, copts := setupRunFlags() + if err := flags.Parse(args); err != nil { + return nil, nil, nil, err + } + // TODO: fix tests to accept ContainerConfig + containerConfig, err := parse(flags, copts, runtime.GOOS) + if err != nil { + return nil, nil, nil, err + } + return containerConfig.Config, containerConfig.HostConfig, containerConfig.NetworkingConfig, err +} + +func setupRunFlags() (*pflag.FlagSet, *containerOptions) { + flags := pflag.NewFlagSet("run", pflag.ContinueOnError) + flags.SetOutput(ioutil.Discard) + flags.Usage = nil + copts := addFlags(flags) + return flags, copts +} + +func parseMustError(t *testing.T, args string) { + _, _, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + assert.ErrorContains(t, err, "", args) +} + +func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { + config, hostConfig, _, err := parseRun(append(strings.Split(args, " "), "ubuntu", "bash")) + assert.NilError(t, err) + return config, hostConfig +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } +} + +func TestParseRunWithInvalidArgs(t *testing.T) { + parseMustError(t, "-a") + parseMustError(t, "-a invalid") + parseMustError(t, "-a invalid -a stdout") + parseMustError(t, "-a stdout -a stderr -d") + parseMustError(t, "-a stdin -d") + parseMustError(t, "-a stdout -d") + parseMustError(t, "-a stderr -d") + parseMustError(t, "-d --rm") +} + +// nolint: gocyclo +func TestParseWithVolumes(t *testing.T) { + + // A single volume + arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) + } + + // Two volumes + arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) + } + + // A single bind mount + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) + } + + // Two bind mounts. + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Two bind mounts, first read-only, second read-write. + // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 + arr, tryit = setupPlatformVolume( + []string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, + []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Similar to previous test but with alternate modes which are only supported by Linux + if runtime.GOOS != "windows" { + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + } + + // One bind mount and one volume + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) + } + + // Root to non-c: drive letter (Windows specific) + if runtime.GOOS == "windows" { + arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { + t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) + } + } + +} + +// setupPlatformVolume takes two arrays of volume specs - a Unix style +// spec and a Windows style spec. Depending on the platform being unit tested, +// it returns one of them, along with a volume string that would be passed +// on the docker CLI (e.g. -v /bar -v /foo). +func setupPlatformVolume(u []string, w []string) ([]string, string) { + var a []string + if runtime.GOOS == "windows" { + a = w + } else { + a = u + } + s := "" + for _, v := range a { + s = s + "-v " + v + " " + } + return a, s +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return errors.Errorf("strings don't match") +} + +// Simple parse with MacAddress validation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestRunFlagsParseWithMemory(t *testing.T) { + flags, _ := setupRunFlags() + args := []string{"--memory=invalid", "img", "cmd"} + err := flags.Parse(args) + assert.ErrorContains(t, err, `invalid argument "invalid" for "-m, --memory" flag`) + + _, hostconfig := mustParse(t, "--memory=1G") + assert.Check(t, is.Equal(int64(1073741824), hostconfig.Memory)) +} + +func TestParseWithMemorySwap(t *testing.T) { + flags, _ := setupRunFlags() + args := []string{"--memory-swap=invalid", "img", "cmd"} + err := flags.Parse(args) + assert.ErrorContains(t, err, `invalid argument "invalid" for "--memory-swap" flag`) + + _, hostconfig := mustParse(t, "--memory-swap=1G") + assert.Check(t, is.Equal(int64(1073741824), hostconfig.MemorySwap)) + + _, hostconfig = mustParse(t, "--memory-swap=-1") + assert.Check(t, is.Equal(int64(-1), hostconfig.MemorySwap)) +} + +func TestParseHostname(t *testing.T) { + validHostnames := map[string]string{ + "hostname": "hostname", + "host-name": "host-name", + "hostname123": "hostname123", + "123hostname": "123hostname", + "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", + } + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + for hostname, expectedHostname := range validHostnames { + if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { + t.Fatalf("Expected the config to have 'hostname' as %q, got %q", expectedHostname, config.Hostname) + } + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" || config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got %q", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" || config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got %q", config.Hostname) + } +} + +func TestParseHostnameDomainname(t *testing.T) { + validDomainnames := map[string]string{ + "domainname": "domainname", + "domain-name": "domain-name", + "domainname123": "domainname123", + "123domainname": "123domainname", + "domainname-63-bytes-long-should-be-valid-and-without-any-errors": "domainname-63-bytes-long-should-be-valid-and-without-any-errors", + } + for domainname, expectedDomainname := range validDomainnames { + if config, _ := mustParse(t, "--domainname="+domainname); config.Domainname != expectedDomainname { + t.Fatalf("Expected the config to have 'domainname' as %q, got %q", expectedDomainname, config.Domainname) + } + } + if config, _ := mustParse(t, "--hostname=some.prefix --domainname=domainname"); config.Hostname != "some.prefix" || config.Domainname != "domainname" { + t.Fatalf("Expected the config to have 'hostname' as 'some.prefix' and 'domainname' as 'domainname', got %q and %q", config.Hostname, config.Domainname) + } + if config, _ := mustParse(t, "--hostname=another-prefix --domainname=domainname.tld"); config.Hostname != "another-prefix" || config.Domainname != "domainname.tld" { + t.Fatalf("Expected the config to have 'hostname' as 'another-prefix' and 'domainname' as 'domainname.tld', got %q and %q", config.Hostname, config.Domainname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "invalid port format for --expose: :", + "8080:9090": "invalid port format for --expose: 8080:9090", + "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") // Windows validates server-side + valids := map[string]container.DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rw", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rw", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseNetworkConfig(t *testing.T) { + tests := []struct { + name string + flags []string + expected map[string]*networktypes.EndpointSettings + expectedCfg container.HostConfig + expectedErr string + }{ + { + name: "single-network-legacy", + flags: []string{"--network", "net1"}, + expected: map[string]*networktypes.EndpointSettings{}, + expectedCfg: container.HostConfig{NetworkMode: "net1"}, + }, + { + name: "single-network-advanced", + flags: []string{"--network", "name=net1"}, + expected: map[string]*networktypes.EndpointSettings{}, + expectedCfg: container.HostConfig{NetworkMode: "net1"}, + }, + { + name: "single-network-legacy-with-options", + flags: []string{ + "--ip", "172.20.88.22", + "--ip6", "2001:db8::8822", + "--link", "foo:bar", + "--link", "bar:baz", + "--link-local-ip", "169.254.2.2", + "--link-local-ip", "fe80::169:254:2:2", + "--network", "name=net1", + "--network-alias", "web1", + "--network-alias", "web2", + }, + expected: map[string]*networktypes.EndpointSettings{ + "net1": { + IPAMConfig: &networktypes.EndpointIPAMConfig{ + IPv4Address: "172.20.88.22", + IPv6Address: "2001:db8::8822", + LinkLocalIPs: []string{"169.254.2.2", "fe80::169:254:2:2"}, + }, + Links: []string{"foo:bar", "bar:baz"}, + Aliases: []string{"web1", "web2"}, + }, + }, + expectedCfg: container.HostConfig{NetworkMode: "net1"}, + }, + { + name: "multiple-network-advanced-mixed", + flags: []string{ + "--ip", "172.20.88.22", + "--ip6", "2001:db8::8822", + "--link", "foo:bar", + "--link", "bar:baz", + "--link-local-ip", "169.254.2.2", + "--link-local-ip", "fe80::169:254:2:2", + "--network", "name=net1,driver-opt=field1=value1", + "--network-alias", "web1", + "--network-alias", "web2", + "--network", "net2", + "--network", "name=net3,alias=web3,driver-opt=field3=value3", + }, + expected: map[string]*networktypes.EndpointSettings{ + "net1": { + DriverOpts: map[string]string{"field1": "value1"}, + IPAMConfig: &networktypes.EndpointIPAMConfig{ + IPv4Address: "172.20.88.22", + IPv6Address: "2001:db8::8822", + LinkLocalIPs: []string{"169.254.2.2", "fe80::169:254:2:2"}, + }, + Links: []string{"foo:bar", "bar:baz"}, + Aliases: []string{"web1", "web2"}, + }, + "net2": {}, + "net3": { + DriverOpts: map[string]string{"field3": "value3"}, + Aliases: []string{"web3"}, + }, + }, + expectedCfg: container.HostConfig{NetworkMode: "net1"}, + }, + { + name: "single-network-advanced-with-options", + flags: []string{"--network", "name=net1,alias=web1,alias=web2,driver-opt=field1=value1,driver-opt=field2=value2"}, + expected: map[string]*networktypes.EndpointSettings{ + "net1": { + DriverOpts: map[string]string{ + "field1": "value1", + "field2": "value2", + }, + Aliases: []string{"web1", "web2"}, + }, + }, + expectedCfg: container.HostConfig{NetworkMode: "net1"}, + }, + { + name: "multiple-networks", + flags: []string{"--network", "net1", "--network", "name=net2"}, + expected: map[string]*networktypes.EndpointSettings{"net1": {}, "net2": {}}, + expectedCfg: container.HostConfig{NetworkMode: "net1"}, + }, + { + name: "conflict-network", + flags: []string{"--network", "duplicate", "--network", "name=duplicate"}, + expectedErr: `network "duplicate" is specified multiple times`, + }, + { + name: "conflict-options", + flags: []string{"--network", "name=net1,alias=web1", "--network-alias", "web1"}, + expectedErr: `conflicting options: cannot specify both --network-alias and per-network alias`, + }, + { + name: "invalid-mixed-network-types", + flags: []string{"--network", "name=host", "--network", "net1"}, + expectedErr: `conflicting options: cannot attach both user-defined and non-user-defined network-modes`, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, hConfig, nwConfig, err := parseRun(tc.flags) + + if tc.expectedErr != "" { + assert.Error(t, err, tc.expectedErr) + return + } + + assert.NilError(t, err) + assert.DeepEqual(t, hConfig.NetworkMode, tc.expectedCfg.NetworkMode) + assert.DeepEqual(t, nwConfig.EndpointsConfig, tc.expected) + }) + } +} + +func TestParseModes(t *testing.T) { + // pid ko + flags, copts := setupRunFlags() + args := []string{"--pid=container:", "img", "cmd"} + assert.NilError(t, flags.Parse(args)) + _, err := parse(flags, copts, runtime.GOOS) + assert.ErrorContains(t, err, "--pid: invalid PID mode") + + // pid ok + _, hostconfig, _, err := parseRun([]string{"--pid=host", "img", "cmd"}) + assert.NilError(t, err) + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + + // uts ko + _, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"}) + assert.ErrorContains(t, err, "--uts: invalid UTS mode") + + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + assert.NilError(t, err) + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } +} + +func TestRunFlagsParseShmSize(t *testing.T) { + // shm-size ko + flags, _ := setupRunFlags() + args := []string{"--shm-size=a128m", "img", "cmd"} + expectedErr := `invalid argument "a128m" for "--shm-size" flag: invalid size: 'a128m'` + err := flags.Parse(args) + assert.ErrorContains(t, err, expectedErr) + + // shm-size ok + _, hostconfig, _, err := parseRun([]string{"--shm-size=128m", "img", "cmd"}) + assert.NilError(t, err) + if hostconfig.ShmSize != 134217728 { + t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "always:2:3": "invalid restart policy format", + "on-failure:invalid": "maximum retry count must be an integer", + } + valids := map[string]container.RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseRestartPolicyAutoRemove(t *testing.T) { + expected := "Conflicting options: --restart and --rm" + _, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) + if err == nil || err.Error() != expected { + t.Fatalf("Expected error %v, but got none", expected) + } +} + +func TestParseHealth(t *testing.T) { + checkOk := func(args ...string) *container.HealthConfig { + config, _, _, err := parseRun(args) + if err != nil { + t.Fatalf("%#v: %v", args, err) + } + return config.Healthcheck + } + checkError := func(expected string, args ...string) { + config, _, _, err := parseRun(args) + if err == nil { + t.Fatalf("Expected error, but got %#v", config) + } + if err.Error() != expected { + t.Fatalf("Expected %#v, got %#v", expected, err) + } + } + health := checkOk("--no-healthcheck", "img", "cmd") + if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { + t.Fatalf("--no-healthcheck failed: %#v", health) + } + + health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") + if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { + t.Fatalf("--health-cmd: got %#v", health.Test) + } + if health.Timeout != 0 { + t.Fatalf("--health-cmd: timeout = %s", health.Timeout) + } + + checkError("--no-healthcheck conflicts with --health-* options", + "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") + + health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "--health-start-period=5s", "img", "cmd") + if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond || health.StartPeriod != 5*time.Second { + t.Fatalf("--health-*: got %#v", health) + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=testdata/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=testdata/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { + // UTF8 with BOM + config, _, _, err := parseRun([]string{"--env-file=testdata/utf8.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} + if len(config.Env) != len(env) { + t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) + } + for i, v := range env { + if config.Env[i] != v { + t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) + } + } + + // UTF16 with BOM + e := "contains invalid utf8 bytes at line" + if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // UTF16BE with BOM + if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=testdata/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=testdata/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } +} + +func TestValidateDevice(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") // Windows validates server-side + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:r", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for path: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for path: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for path: :test", + ":/test": "bad format for path: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for path: :test:", + "::": "bad format for path: ::", + ":::": "bad format for path: :::", + "/tmp:::": "bad format for path: /tmp:::", + ":/tmp::": "bad format for path: :/tmp::", + "path:ro": "ro is not an absolute path", + "path:rr": "rr is not an absolute path", + "a:/b:ro": "bad mode specified: ro", + "a:/b:rr": "bad mode specified: rr", + } + + for _, path := range valid { + if _, err := validateDevice(path, runtime.GOOS); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := validateDevice(path, runtime.GOOS); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestParseSystemPaths(t *testing.T) { + tests := []struct { + doc string + in, out, masked, readonly []string + }{ + { + doc: "not set", + in: []string{}, + out: []string{}, + }, + { + doc: "not set, preserve other options", + in: []string{ + "seccomp=unconfined", + "apparmor=unconfined", + "label=user:USER", + "foo=bar", + }, + out: []string{ + "seccomp=unconfined", + "apparmor=unconfined", + "label=user:USER", + "foo=bar", + }, + }, + { + doc: "unconfined", + in: []string{"systempaths=unconfined"}, + out: []string{}, + masked: []string{}, + readonly: []string{}, + }, + { + doc: "unconfined and other options", + in: []string{"foo=bar", "bar=baz", "systempaths=unconfined"}, + out: []string{"foo=bar", "bar=baz"}, + masked: []string{}, + readonly: []string{}, + }, + { + doc: "unknown option", + in: []string{"foo=bar", "systempaths=unknown", "bar=baz"}, + out: []string{"foo=bar", "systempaths=unknown", "bar=baz"}, + }, + } + + for _, tc := range tests { + securityOpts, maskedPaths, readonlyPaths := parseSystemPaths(tc.in) + assert.DeepEqual(t, securityOpts, tc.out) + assert.DeepEqual(t, maskedPaths, tc.masked) + assert.DeepEqual(t, readonlyPaths, tc.readonly) + } +} diff --git a/cli/cli/command/container/pause.go b/cli/cli/command/container/pause.go new file mode 100644 index 00000000..1118b7f0 --- /dev/null +++ b/cli/cli/command/container/pause.go @@ -0,0 +1,49 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type pauseOptions struct { + containers []string +} + +// NewPauseCommand creates a new cobra.Command for `docker pause` +func NewPauseCommand(dockerCli command.Cli) *cobra.Command { + var opts pauseOptions + + return &cobra.Command{ + Use: "pause CONTAINER [CONTAINER...]", + Short: "Pause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runPause(dockerCli, &opts) + }, + } +} + +func runPause(dockerCli command.Cli, opts *pauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/port.go b/cli/cli/command/container/port.go new file mode 100644 index 00000000..83e16a98 --- /dev/null +++ b/cli/cli/command/container/port.go @@ -0,0 +1,78 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type portOptions struct { + container string + + port string +} + +// NewPortCommand creates a new cobra.Command for `docker port` +func NewPortCommand(dockerCli command.Cli) *cobra.Command { + var opts portOptions + + cmd := &cobra.Command{ + Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", + Short: "List port mappings or a specific mapping for the container", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.port = args[1] + } + return runPort(dockerCli, &opts) + }, + } + return cmd +} + +func runPort(dockerCli command.Cli, opts *portOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if opts.port != "" { + port := opts.port + proto := "tcp" + parts := strings.SplitN(port, "/", 2) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return errors.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/cli/cli/command/container/prune.go b/cli/cli/command/container/prune.go new file mode 100644 index 00000000..1c820fee --- /dev/null +++ b/cli/cli/command/container/prune.go @@ -0,0 +1,78 @@ +package container + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for containers +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all stopped containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const warning = `WARNING! This will remove all stopped containers. +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return 0, "", nil + } + + report, err := dockerCli.Client().ContainersPrune(context.Background(), pruneFilters) + if err != nil { + return 0, "", err + } + + if len(report.ContainersDeleted) > 0 { + output = "Deleted Containers:\n" + for _, id := range report.ContainersDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return spaceReclaimed, output, nil +} + +// RunPrune calls the Container Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, filter: filter}) +} diff --git a/cli/cli/command/container/ps_test.go b/cli/cli/command/container/ps_test.go new file mode 100644 index 00000000..28853576 --- /dev/null +++ b/cli/cli/command/container/ps_test.go @@ -0,0 +1,119 @@ +package container + +import ( + "testing" + + "github.com/docker/cli/opts" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestBuildContainerListOptions(t *testing.T) { + filters := opts.NewFilterOpt() + assert.NilError(t, filters.Set("foo=bar")) + assert.NilError(t, filters.Set("baz=foo")) + + contexts := []struct { + psOpts *psOptions + expectedAll bool + expectedSize bool + expectedLimit int + expectedFilters map[string]string + }{ + { + psOpts: &psOptions{ + all: true, + size: true, + last: 5, + filter: filters, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: true, + last: -1, + nLatest: true, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 1, + expectedFilters: make(map[string]string), + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // Without .Size, size should be false + format: "{{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: false, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + } + + for _, c := range contexts { + options, err := buildContainerListOptions(c.psOpts) + assert.NilError(t, err) + + assert.Check(t, is.Equal(c.expectedAll, options.All)) + assert.Check(t, is.Equal(c.expectedSize, options.Size)) + assert.Check(t, is.Equal(c.expectedLimit, options.Limit)) + assert.Check(t, is.Equal(len(c.expectedFilters), options.Filters.Len())) + + for k, v := range c.expectedFilters { + f := options.Filters + if !f.ExactMatch(k, v) { + t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) + } + } + } +} diff --git a/cli/cli/command/container/rename.go b/cli/cli/command/container/rename.go new file mode 100644 index 00000000..bc58ea20 --- /dev/null +++ b/cli/cli/command/container/rename.go @@ -0,0 +1,51 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type renameOptions struct { + oldName string + newName string +} + +// NewRenameCommand creates a new cobra.Command for `docker rename` +func NewRenameCommand(dockerCli command.Cli) *cobra.Command { + var opts renameOptions + + cmd := &cobra.Command{ + Use: "rename CONTAINER NEW_NAME", + Short: "Rename a container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.oldName = args[0] + opts.newName = args[1] + return runRename(dockerCli, &opts) + }, + } + return cmd +} + +func runRename(dockerCli command.Cli, opts *renameOptions) error { + ctx := context.Background() + + oldName := strings.TrimSpace(opts.oldName) + newName := strings.TrimSpace(opts.newName) + + if oldName == "" || newName == "" { + return errors.New("Error: Neither old nor new names may be empty") + } + + if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return errors.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/cli/cli/command/container/restart.go b/cli/cli/command/container/restart.go new file mode 100644 index 00000000..6e02ee46 --- /dev/null +++ b/cli/cli/command/container/restart.go @@ -0,0 +1,62 @@ +package container + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type restartOptions struct { + nSeconds int + nSecondsChanged bool + + containers []string +} + +// NewRestartCommand creates a new cobra.Command for `docker restart` +func NewRestartCommand(dockerCli command.Cli) *cobra.Command { + var opts restartOptions + + cmd := &cobra.Command{ + Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Restart one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nSecondsChanged = cmd.Flags().Changed("time") + return runRestart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") + return cmd +} + +func runRestart(dockerCli command.Cli, opts *restartOptions) error { + ctx := context.Background() + var errs []string + var timeout *time.Duration + if opts.nSecondsChanged { + timeoutValue := time.Duration(opts.nSeconds) * time.Second + timeout = &timeoutValue + } + + for _, name := range opts.containers { + if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/rm.go b/cli/cli/command/container/rm.go new file mode 100644 index 00000000..2dcd4b6a --- /dev/null +++ b/cli/cli/command/container/rm.go @@ -0,0 +1,73 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type rmOptions struct { + rmVolumes bool + rmLink bool + force bool + + containers []string +} + +// NewRmCommand creates a new cobra.Command for `docker rm` +func NewRmCommand(dockerCli command.Cli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Remove one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runRm(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") + flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") + return cmd +} + +func runRm(dockerCli command.Cli, opts *rmOptions) error { + ctx := context.Background() + + var errs []string + options := types.ContainerRemoveOptions{ + RemoveVolumes: opts.rmVolumes, + RemoveLinks: opts.rmLink, + Force: opts.force, + } + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + container = strings.Trim(container, "/") + if container == "" { + return errors.New("Container name cannot be empty") + } + return dockerCli.Client().ContainerRemove(ctx, container, options) + }) + + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/run.go b/cli/cli/command/container/run.go new file mode 100644 index 00000000..2ce3f367 --- /dev/null +++ b/cli/cli/command/container/run.go @@ -0,0 +1,312 @@ +package container + +import ( + "context" + "fmt" + "io" + "net/http/httputil" + "os" + "runtime" + "strings" + "syscall" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type runOptions struct { + createOptions + detach bool + sigProxy bool + detachKeys string +} + +// NewRunCommand create a new `docker run` command +func NewRunCommand(dockerCli command.Cli) *cobra.Command { + var opts runOptions + var copts *containerOptions + + cmd := &cobra.Command{ + Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Run a command in a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runRun(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + // These are flags not stored in Config/HostConfig + flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") + flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddPlatformFlag(flags, &opts.platform) + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + copts = addFlags(flags) + return cmd +} + +func runRun(dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error { + proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(copts.env.GetAll())) + newEnv := []string{} + for k, v := range proxyConfig { + if v == nil { + newEnv = append(newEnv, k) + } else { + newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v)) + } + } + copts.env = *opts.NewListOptsRef(&newEnv, nil) + containerConfig, err := parse(flags, copts, dockerCli.ServerInfo().OSType) + // just in case the parse does not exit + if err != nil { + reportError(dockerCli.Err(), "run", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + if err = validateAPIVersion(containerConfig, dockerCli.Client().ClientVersion()); err != nil { + reportError(dockerCli.Err(), "run", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + return runContainer(dockerCli, ropts, copts, containerConfig) +} + +// nolint: gocyclo +func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptions, containerConfig *containerConfig) error { + config := containerConfig.Config + hostConfig := containerConfig.HostConfig + stdout, stderr := dockerCli.Out(), dockerCli.Err() + client := dockerCli.Client() + + config.ArgsEscaped = false + + if !opts.detach { + if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if copts.attach.Len() != 0 { + return errors.New("Conflicting options: -a and -d") + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable sigProxy when in TTY mode + if config.Tty { + opts.sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() + } + + ctx, cancelFun := context.WithCancel(context.Background()) + defer cancelFun() + + createResponse, err := createContainer(ctx, dockerCli, containerConfig, &opts.createOptions) + if err != nil { + reportError(stderr, "run", err.Error(), true) + return runStartContainerErr(err) + } + if opts.sigProxy { + sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintln(stdout, createResponse.ID) + }() + } + attach := config.AttachStdin || config.AttachStdout || config.AttachStderr + if attach { + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + close, err := attachContainer(ctx, dockerCli, &errCh, config, createResponse.ID) + + if err != nil { + return err + } + defer close() + } + + statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, copts.autoRemove) + + //start the container + if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { + // If we have hijackedIOStreamer, we should notify + // hijackedIOStreamer we are going to exit and wait + // to avoid the terminal are not restored. + if attach { + cancelFun() + <-errCh + } + + reportError(stderr, "run", err.Error(), false) + if copts.autoRemove { + // wait container to be removed + <-statusChan + } + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { + fmt.Fprintln(stderr, "Error monitoring TTY size:", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + status := <-statusChan + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +func attachContainer( + ctx context.Context, + dockerCli command.Cli, + errCh *chan error, + config *container.Config, + containerID string, +) (func(), error) { + stdout, stderr := dockerCli.Out(), dockerCli.Err() + var ( + out, cerr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = dockerCli.In() + } + if config.AttachStdout { + out = stdout + } + if config.AttachStderr { + if config.Tty { + cerr = stdout + } else { + cerr = stderr + } + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return nil, errAttach + } + + ch := make(chan error, 1) + *errCh = ch + + go func() { + ch <- func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: out, + errorStream: cerr, + resp: resp, + tty: config.Tty, + detachKeys: options.DetachKeys, + } + + if errHijack := streamer.stream(ctx); errHijack != nil { + return errHijack + } + return errAttach + }() + }() + return resp.Close, nil +} + +// reportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func reportError(stderr io.Writer, name string, str string, withHelp bool) { + str = strings.TrimSuffix(str, ".") + "." + if withHelp { + str += "\nSee '" + os.Args[0] + " " + name + " --help'." + } + fmt.Fprintf(stderr, "%s: %s\n", os.Args[0], str) +} + +// if container start fails with 'not found'/'no such' error, return 127 +// if container start fails with 'permission denied' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") + statusError := cli.StatusError{StatusCode: 125} + if strings.Contains(trimmedErr, "executable file not found") || + strings.Contains(trimmedErr, "no such file or directory") || + strings.Contains(trimmedErr, "system cannot find the file specified") { + statusError = cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { + statusError = cli.StatusError{StatusCode: 126} + } + + return statusError +} diff --git a/cli/cli/command/container/run_test.go b/cli/cli/command/container/run_test.go new file mode 100644 index 00000000..eecbe46c --- /dev/null +++ b/cli/cli/command/container/run_test.go @@ -0,0 +1,74 @@ +package container + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRunLabel(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ string) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{ + ID: "id", + }, nil + }, + Version: "1.36", + }) + cmd := NewRunCommand(cli) + cmd.SetArgs([]string{"--detach=true", "--label", "foo", "busybox"}) + assert.NilError(t, cmd.Execute()) +} + +func TestRunCommandWithContentTrustErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + name: "offline-notary-server", + notaryFunc: notary.GetOfflineNotaryRepository, + expectedError: "client is offline", + args: []string{"image:tag"}, + }, + { + name: "uninitialized-notary-server", + notaryFunc: notary.GetUninitializedNotaryRepository, + expectedError: "remote trust data does not exist", + args: []string{"image:tag"}, + }, + { + name: "empty-notary-server", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + expectedError: "No valid trust data for tag", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + createContainerFunc: func(config *container.Config, + hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, + containerName string, + ) (container.ContainerCreateCreatedBody, error) { + return container.ContainerCreateCreatedBody{}, fmt.Errorf("shouldn't try to pull image") + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := NewRunCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + err := cmd.Execute() + assert.Assert(t, err != nil) + assert.Assert(t, is.Contains(cli.ErrBuffer().String(), tc.expectedError)) + } +} diff --git a/cli/cli/command/container/start.go b/cli/cli/command/container/start.go new file mode 100644 index 00000000..e3883028 --- /dev/null +++ b/cli/cli/command/container/start.go @@ -0,0 +1,202 @@ +package container + +import ( + "context" + "fmt" + "io" + "net/http/httputil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type startOptions struct { + attach bool + openStdin bool + detachKeys string + checkpoint string + checkpointDir string + + containers []string +} + +// NewStartCommand creates a new cobra.Command for `docker start` +func NewStartCommand(dockerCli command.Cli) *cobra.Command { + var opts startOptions + + cmd := &cobra.Command{ + Use: "start [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Start one or more stopped containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") + flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") + flags.SetAnnotation("checkpoint", "experimental", nil) + flags.SetAnnotation("checkpoint", "ostype", []string{"linux"}) + flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") + flags.SetAnnotation("checkpoint-dir", "experimental", nil) + flags.SetAnnotation("checkpoint-dir", "ostype", []string{"linux"}) + return cmd +} + +// nolint: gocyclo +func runStart(dockerCli command.Cli, opts *startOptions) error { + ctx, cancelFun := context.WithCancel(context.Background()) + defer cancelFun() + + if opts.attach || opts.openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if len(opts.containers) > 1 { + return errors.New("you cannot start and attach multiple containers at once") + } + + // 2. Attach to the container. + container := opts.containers[0] + c, err := dockerCli.Client().ContainerInspect(ctx, container) + if err != nil { + return err + } + + // We always use c.ID instead of container to maintain consistency during `docker start` + if !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, c.ID) + defer signal.StopCatch(sigc) + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: opts.openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + + if options.Stdin { + in = dockerCli.In() + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach return an ErrPersistEOF (connection closed) + // means server met an error and already put it in Hijacked connection, + // we would keep the error and read the detailed error message from hijacked connection + return errAttach + } + defer resp.Close() + + cErr := make(chan error, 1) + + go func() { + cErr <- func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: dockerCli.Out(), + errorStream: dockerCli.Err(), + resp: resp, + tty: c.Config.Tty, + detachKeys: options.DetachKeys, + } + + errHijack := streamer.stream(ctx) + if errHijack == nil { + return errAttach + } + return errHijack + }() + }() + + // 3. We should open a channel for receiving status code of the container + // no matter it's detached, removed on daemon side(--rm) or exit normally. + statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + + // 4. Start the container. + if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { + cancelFun() + <-cErr + if c.HostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return err + } + + // 5. Wait for attachment to break. + if c.Config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { + fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } + } + if attachErr := <-cErr; attachErr != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + return attachErr + } + + if status := <-statusChan; status != 0 { + return cli.StatusError{StatusCode: status} + } + } else if opts.checkpoint != "" { + if len(opts.containers) > 1 { + return errors.New("you cannot restore multiple containers at once") + } + container := opts.containers[0] + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + return dockerCli.Client().ContainerStart(ctx, container, startOptions) + + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) + } + + return nil +} + +func startContainersWithoutAttachments(ctx context.Context, dockerCli command.Cli, containers []string) error { + var failedContainers []string + for _, container := range containers { + if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + failedContainers = append(failedContainers, container) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + + if len(failedContainers) > 0 { + return errors.Errorf("Error: failed to start containers: %s", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/cli/cli/command/container/stats.go b/cli/cli/command/container/stats.go new file mode 100644 index 00000000..e8309e33 --- /dev/null +++ b/cli/cli/command/container/stats.go @@ -0,0 +1,245 @@ +package container + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type statsOptions struct { + all bool + noStream bool + noTrunc bool + format string + containers []string +} + +// NewStatsCommand creates a new cobra.Command for `docker stats` +func NewStatsCommand(dockerCli command.Cli) *cobra.Command { + var opts statsOptions + + cmd := &cobra.Command{ + Use: "stats [OPTIONS] [CONTAINER...]", + Short: "Display a live stream of container(s) resource usage statistics", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStats(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + return cmd +} + +// runStats displays a live stream of resource usage statistics for one or more containers. +// This shows real-time information on CPU usage, memory usage, and network I/O. +// nolint: gocyclo +func runStats(dockerCli command.Cli, opts *statsOptions) error { + showAll := len(opts.containers) == 0 + closeChan := make(chan error) + + ctx := context.Background() + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + + eventq, errq := dockerCli.Client().Events(ctx, options) + + // Whether we successfully subscribed to eventq or not, we can now + // unblock the main goroutine. + close(started) + + for { + select { + case event := <-eventq: + c <- event + case err := <-errq: + closeChan <- err + return + } + } + } + + // Get the daemonOSType if not set already + if daemonOSType == "" { + svctx := context.Background() + sv, err := dockerCli.Client().ServerVersion(svctx) + if err != nil { + return err + } + daemonOSType = sv.Os + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: opts.all, + } + cs, err := dockerCli.Client().ContainerList(ctx, options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := NewStats(container.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := command.InitEventHandler() + eh.Handle("create", func(e events.Message) { + if opts.all { + s := NewStats(e.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := NewStats(e.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !opts.all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range opts.containers { + s := NewStats(name) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + if err := c.GetError(); err != nil { + errs = append(errs, err.Error()) + } + } + cStats.mu.Unlock() + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().StatsFormat) > 0 { + format = dockerCli.ConfigFile().StatsFormat + } else { + format = formatter.TableFormatKey + } + } + statsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewStatsFormat(format, daemonOSType), + } + cleanScreen := func() { + if !opts.noStream { + fmt.Fprint(dockerCli.Out(), "\033[2J") + fmt.Fprint(dockerCli.Out(), "\033[H") + } + } + + var err error + for range time.Tick(500 * time.Millisecond) { + cleanScreen() + ccstats := []StatsEntry{} + cStats.mu.Lock() + for _, c := range cStats.cs { + ccstats = append(ccstats, c.GetStatistics()) + } + cStats.mu.Unlock() + if err = statsFormatWrite(statsCtx, ccstats, daemonOSType, !opts.noTrunc); err != nil { + break + } + if len(cStats.cs) == 0 && !showAll { + break + } + if opts.noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return err +} diff --git a/cli/cli/command/container/stats_helpers.go b/cli/cli/command/container/stats_helpers.go new file mode 100644 index 00000000..b2e59d5b --- /dev/null +++ b/cli/cli/command/container/stats_helpers.go @@ -0,0 +1,240 @@ +package container + +import ( + "context" + "encoding/json" + "io" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type stats struct { + mu sync.Mutex + cs []*Stats +} + +// daemonOSType is set once we have at least one stat for a container +// from the daemon. It is used to ensure we print the right header based +// on the daemon platform. +var daemonOSType string + +func (s *stats) add(cs *Stats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Container); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Container == cid { + return i, true + } + } + return -1, false +} + +func collect(ctx context.Context, s *Stats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + logrus.Debugf("collecting stats for %s", s.Container) + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + response, err := cli.ContainerStats(ctx, s.Container, streamStats) + if err != nil { + s.SetError(err) + return + } + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + go func() { + for { + var ( + v *types.StatsJSON + memPercent, cpuPercent float64 + blkRead, blkWrite uint64 // Only used on Linux + mem, memLimit float64 + pidsStatsCurrent uint64 + ) + + if err := dec.Decode(&v); err != nil { + dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) + u <- err + if err == io.EOF { + break + } + time.Sleep(100 * time.Millisecond) + continue + } + + daemonOSType = response.OSType + + if daemonOSType != "windows" { + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) + blkRead, blkWrite = calculateBlockIO(v.BlkioStats) + mem = calculateMemUsageUnixNoCache(v.MemoryStats) + memLimit = float64(v.MemoryStats.Limit) + memPercent = calculateMemPercentUnixNoCache(memLimit, mem) + pidsStatsCurrent = v.PidsStats.Current + } else { + cpuPercent = calculateCPUPercentWindows(v) + blkRead = v.StorageStats.ReadSizeBytes + blkWrite = v.StorageStats.WriteSizeBytes + mem = float64(v.MemoryStats.PrivateWorkingSet) + } + netRx, netTx := calculateNetwork(v.Networks) + s.SetStatistics(StatsEntry{ + Name: v.Name, + ID: v.ID, + CPUPercentage: cpuPercent, + Memory: mem, + MemoryPercentage: memPercent, + MemoryLimit: memLimit, + NetworkRx: netRx, + NetworkTx: netTx, + BlockRead: float64(blkRead), + BlockWrite: float64(blkWrite), + PidsCurrent: pidsStatsCurrent, + }) + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.SetErrorAndReset(errors.New("timeout waiting for stats")) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + s.SetError(err) + if err == io.EOF { + break + } + if err != nil { + continue + } + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + onlineCPUs = float64(v.CPUStats.OnlineCPUs) + ) + + if onlineCPUs == 0.0 { + onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage)) + } + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 + } + return cpuPercent +} + +func calculateCPUPercentWindows(v *types.StatsJSON) float64 { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + return float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + return 0.00 +} + +func calculateBlockIO(blkio types.BlkioStats) (uint64, uint64) { + var blkRead, blkWrite uint64 + for _, bioEntry := range blkio.IoServiceBytesRecursive { + if len(bioEntry.Op) == 0 { + continue + } + switch bioEntry.Op[0] { + case 'r', 'R': + blkRead = blkRead + bioEntry.Value + case 'w', 'W': + blkWrite = blkWrite + bioEntry.Value + } + } + return blkRead, blkWrite +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} + +// calculateMemUsageUnixNoCache calculate memory usage of the container. +// Page cache is intentionally excluded to avoid misinterpretation of the output. +func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { + return float64(mem.Usage - mem.Stats["cache"]) +} + +func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if limit != 0 { + return usedNoCache / limit * 100.0 + } + return 0 +} diff --git a/cli/cli/command/container/stats_helpers_test.go b/cli/cli/command/container/stats_helpers_test.go new file mode 100644 index 00000000..a9657e2e --- /dev/null +++ b/cli/cli/command/container/stats_helpers_test.go @@ -0,0 +1,47 @@ +package container + +import ( + "fmt" + "testing" + + "github.com/docker/docker/api/types" + "gotest.tools/assert" +) + +func TestCalculateMemUsageUnixNoCache(t *testing.T) { + // Given + stats := types.MemoryStats{Usage: 500, Stats: map[string]uint64{"cache": 400}} + + // When + result := calculateMemUsageUnixNoCache(stats) + + // Then + assert.Assert(t, inDelta(100.0, result, 1e-6)) +} + +func TestCalculateMemPercentUnixNoCache(t *testing.T) { + // Given + someLimit := float64(100.0) + noLimit := float64(0.0) + used := float64(70.0) + + // When and Then + t.Run("Limit is set", func(t *testing.T) { + result := calculateMemPercentUnixNoCache(someLimit, used) + assert.Assert(t, inDelta(70.0, result, 1e-6)) + }) + t.Run("No limit, no cgroup data", func(t *testing.T) { + result := calculateMemPercentUnixNoCache(noLimit, used) + assert.Assert(t, inDelta(0.0, result, 1e-6)) + }) +} + +func inDelta(x, y, delta float64) func() (bool, string) { + return func() (bool, string) { + diff := x - y + if diff < -delta || diff > delta { + return false, fmt.Sprintf("%f != %f within %f", x, y, delta) + } + return true, "" + } +} diff --git a/cli/cli/command/container/stats_unit_test.go b/cli/cli/command/container/stats_unit_test.go new file mode 100644 index 00000000..9aac0ab9 --- /dev/null +++ b/cli/cli/command/container/stats_unit_test.go @@ -0,0 +1,30 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCalculateBlockIO(t *testing.T) { + blkio := types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{ + {Major: 8, Minor: 0, Op: "read", Value: 1234}, + {Major: 8, Minor: 1, Op: "read", Value: 4567}, + {Major: 8, Minor: 0, Op: "Read", Value: 6}, + {Major: 8, Minor: 1, Op: "Read", Value: 8}, + {Major: 8, Minor: 0, Op: "write", Value: 123}, + {Major: 8, Minor: 1, Op: "write", Value: 456}, + {Major: 8, Minor: 0, Op: "Write", Value: 6}, + {Major: 8, Minor: 1, Op: "Write", Value: 8}, + {Major: 8, Minor: 1, Op: "", Value: 456}, + }, + } + blkRead, blkWrite := calculateBlockIO(blkio) + if blkRead != 5815 { + t.Fatalf("blkRead = %d, want 5815", blkRead) + } + if blkWrite != 593 { + t.Fatalf("blkWrite = %d, want 593", blkWrite) + } +} diff --git a/cli/cli/command/container/stop.go b/cli/cli/command/container/stop.go new file mode 100644 index 00000000..e2991754 --- /dev/null +++ b/cli/cli/command/container/stop.go @@ -0,0 +1,67 @@ +package container + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type stopOptions struct { + time int + timeChanged bool + + containers []string +} + +// NewStopCommand creates a new cobra.Command for `docker stop` +func NewStopCommand(dockerCli command.Cli) *cobra.Command { + var opts stopOptions + + cmd := &cobra.Command{ + Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Stop one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.timeChanged = cmd.Flags().Changed("time") + return runStop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") + return cmd +} + +func runStop(dockerCli command.Cli, opts *stopOptions) error { + ctx := context.Background() + + var timeout *time.Duration + if opts.timeChanged { + timeoutValue := time.Duration(opts.time) * time.Second + timeout = &timeoutValue + } + + var errs []string + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { + return dockerCli.Client().ContainerStop(ctx, id, timeout) + }) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/testdata/container-create-localhost-dns-ipv6.golden b/cli/cli/command/container/testdata/container-create-localhost-dns-ipv6.golden new file mode 100644 index 00000000..5c98b977 --- /dev/null +++ b/cli/cli/command/container/testdata/container-create-localhost-dns-ipv6.golden @@ -0,0 +1 @@ +WARNING: Localhost DNS setting (--dns=::1) may fail in containers. diff --git a/cli/cli/command/container/testdata/container-create-localhost-dns.golden b/cli/cli/command/container/testdata/container-create-localhost-dns.golden new file mode 100644 index 00000000..1c8b0e1f --- /dev/null +++ b/cli/cli/command/container/testdata/container-create-localhost-dns.golden @@ -0,0 +1 @@ +WARNING: Localhost DNS setting (--dns=127.0.0.11) may fail in containers. diff --git a/cli/cli/command/container/testdata/container-create-oom-kill-true-without-memory-limit.golden b/cli/cli/command/container/testdata/container-create-oom-kill-true-without-memory-limit.golden new file mode 100644 index 00000000..5fb6aeb4 --- /dev/null +++ b/cli/cli/command/container/testdata/container-create-oom-kill-true-without-memory-limit.golden @@ -0,0 +1 @@ +WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous. diff --git a/cli/cli/command/container/testdata/container-create-oom-kill-without-memory-limit.golden b/cli/cli/command/container/testdata/container-create-oom-kill-without-memory-limit.golden new file mode 100644 index 00000000..5fb6aeb4 --- /dev/null +++ b/cli/cli/command/container/testdata/container-create-oom-kill-without-memory-limit.golden @@ -0,0 +1 @@ +WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous. diff --git a/cli/cli/command/container/testdata/container-list-format-name-name.golden b/cli/cli/command/container/testdata/container-list-format-name-name.golden new file mode 100644 index 00000000..858ec961 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-format-name-name.golden @@ -0,0 +1,2 @@ +c1 c1 +c2 c2 diff --git a/cli/cli/command/container/testdata/container-list-format-with-arg.golden b/cli/cli/command/container/testdata/container-list-format-with-arg.golden new file mode 100644 index 00000000..782ace94 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-format-with-arg.golden @@ -0,0 +1,2 @@ +c1 value +c2 diff --git a/cli/cli/command/container/testdata/container-list-with-config-format.golden b/cli/cli/command/container/testdata/container-list-with-config-format.golden new file mode 100644 index 00000000..6333bf57 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-with-config-format.golden @@ -0,0 +1,2 @@ +c1 busybox:latest some.label=value +c2 busybox:latest foo=bar diff --git a/cli/cli/command/container/testdata/container-list-with-format.golden b/cli/cli/command/container/testdata/container-list-with-format.golden new file mode 100644 index 00000000..6333bf57 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-with-format.golden @@ -0,0 +1,2 @@ +c1 busybox:latest some.label=value +c2 busybox:latest foo=bar diff --git a/cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden b/cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden new file mode 100644 index 00000000..5b0d652e --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-without-format-no-trunc.golden @@ -0,0 +1,3 @@ +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +container_id busybox:latest "top" Less than a second ago Up 1 second c1 +container_id busybox:latest "top" Less than a second ago Up 1 second c2,foo/bar diff --git a/cli/cli/command/container/testdata/container-list-without-format.golden b/cli/cli/command/container/testdata/container-list-without-format.golden new file mode 100644 index 00000000..7acd4045 --- /dev/null +++ b/cli/cli/command/container/testdata/container-list-without-format.golden @@ -0,0 +1,6 @@ +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +container_id busybox:latest "top" Less than a second ago Up 1 second c1 +container_id busybox:latest "top" Less than a second ago Up 1 second c2 +container_id busybox:latest "top" Less than a second ago Up 1 second 80-82/tcp c3 +container_id busybox:latest "top" Less than a second ago Up 1 second 81/udp c4 +container_id busybox:latest "top" Less than a second ago Up 1 second 8.8.8.8:82->82/tcp c5 diff --git a/cli/cli/command/container/testdata/utf16.env b/cli/cli/command/container/testdata/utf16.env new file mode 100755 index 0000000000000000000000000000000000000000..3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a GIT binary patch literal 54 ucmezW&yB$!2yGdh7#tab7 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/update.go b/cli/cli/command/container/update.go new file mode 100644 index 00000000..8669936b --- /dev/null +++ b/cli/cli/command/container/update.go @@ -0,0 +1,140 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + containertypes "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type updateOptions struct { + blkioWeight uint16 + cpuPeriod int64 + cpuQuota int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpusetCpus string + cpusetMems string + cpuShares int64 + memory opts.MemBytes + memoryReservation opts.MemBytes + memorySwap opts.MemSwapBytes + kernelMemory opts.MemBytes + restartPolicy string + pidsLimit int64 + cpus opts.NanoCPUs + + nFlag int + + containers []string +} + +// NewUpdateCommand creates a new cobra.Command for `docker update` +func NewUpdateCommand(dockerCli command.Cli) *cobra.Command { + var options updateOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Update configuration of one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.containers = args + options.nFlag = cmd.Flags().NFlag() + return runUpdate(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.Uint16Var(&options.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&options.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) + flags.Int64Var(&options.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) + flags.StringVar(&options.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memoryReservation, "memory-reservation", "Memory soft limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.kernelMemory, "kernel-memory", "Kernel memory limit") + flags.StringVar(&options.restartPolicy, "restart", "", "Restart policy to apply when a container exits") + flags.Int64Var(&options.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + flags.SetAnnotation("pids-limit", "version", []string{"1.40"}) + + flags.Var(&options.cpus, "cpus", "Number of CPUs") + flags.SetAnnotation("cpus", "version", []string{"1.29"}) + + return cmd +} + +func runUpdate(dockerCli command.Cli, options *updateOptions) error { + var err error + + if options.nFlag == 0 { + return errors.New("you must provide one or more flags when using this command") + } + + var restartPolicy containertypes.RestartPolicy + if options.restartPolicy != "" { + restartPolicy, err = opts.ParseRestartPolicy(options.restartPolicy) + if err != nil { + return err + } + } + + resources := containertypes.Resources{ + BlkioWeight: options.blkioWeight, + CpusetCpus: options.cpusetCpus, + CpusetMems: options.cpusetMems, + CPUShares: options.cpuShares, + Memory: options.memory.Value(), + MemoryReservation: options.memoryReservation.Value(), + MemorySwap: options.memorySwap.Value(), + KernelMemory: options.kernelMemory.Value(), + CPUPeriod: options.cpuPeriod, + CPUQuota: options.cpuQuota, + CPURealtimePeriod: options.cpuRealtimePeriod, + CPURealtimeRuntime: options.cpuRealtimeRuntime, + NanoCPUs: options.cpus.Value(), + } + + if options.pidsLimit != 0 { + resources.PidsLimit = &options.pidsLimit + } + + updateConfig := containertypes.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + ctx := context.Background() + + var ( + warns []string + errs []string + ) + for _, container := range options.containers { + r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintln(dockerCli.Out(), container) + } + warns = append(warns, r.Warnings...) + } + if len(warns) > 0 { + fmt.Fprintln(dockerCli.Out(), strings.Join(warns, "\n")) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/container/utils.go b/cli/cli/command/container/utils.go new file mode 100644 index 00000000..f3292614 --- /dev/null +++ b/cli/cli/command/container/utils.go @@ -0,0 +1,162 @@ +package container + +import ( + "context" + "strconv" + + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/sirupsen/logrus" +) + +func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int { + if len(containerID) == 0 { + // containerID can never be empty + panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") + } + + // Older versions used the Events API, and even older versions did not + // support server-side removal. This legacyWaitExitOrRemoved method + // preserves that old behavior and any issues it may have. + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") { + return legacyWaitExitOrRemoved(ctx, dockerCli, containerID, waitRemove) + } + + condition := container.WaitConditionNextExit + if waitRemove { + condition = container.WaitConditionRemoved + } + + resultC, errC := dockerCli.Client().ContainerWait(ctx, containerID, condition) + + statusC := make(chan int) + go func() { + select { + case result := <-resultC: + if result.Error != nil { + logrus.Errorf("Error waiting for container: %v", result.Error.Message) + statusC <- 125 + } else { + statusC <- int(result.StatusCode) + } + case err := <-errC: + logrus.Errorf("error waiting for container: %v", err) + statusC <- 125 + } + }() + + return statusC +} + +func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int { + var removeErr error + statusChan := make(chan int) + exitCode := 125 + + // Get events via Events API + f := filters.NewArgs() + f.Add("type", "container") + f.Add("container", containerID) + options := types.EventsOptions{ + Filters: f, + } + eventCtx, cancel := context.WithCancel(ctx) + eventq, errq := dockerCli.Client().Events(eventCtx, options) + + eventProcessor := func(e events.Message) bool { + stopProcessing := false + switch e.Status { + case "die": + if v, ok := e.Actor.Attributes["exitCode"]; ok { + code, cerr := strconv.Atoi(v) + if cerr != nil { + logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) + } else { + exitCode = code + } + } + if !waitRemove { + stopProcessing = true + } else { + // If we are talking to an older daemon, `AutoRemove` is not supported. + // We need to fall back to the old behavior, which is client-side removal + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { + go func() { + removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) + if removeErr != nil { + logrus.Errorf("error removing container: %v", removeErr) + cancel() // cancel the event Q + } + }() + } + } + case "detach": + exitCode = 0 + stopProcessing = true + case "destroy": + stopProcessing = true + } + return stopProcessing + } + + go func() { + defer func() { + statusChan <- exitCode // must always send an exit code or the caller will block + cancel() + }() + + for { + select { + case <-eventCtx.Done(): + if removeErr != nil { + return + } + case evt := <-eventq: + if eventProcessor(evt) { + return + } + case err := <-errq: + logrus.Errorf("error getting events from daemon: %v", err) + return + } + } + }() + + return statusChan +} + +func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { + if len(containers) == 0 { + return nil + } + const defaultParallel int = 50 + sem := make(chan struct{}, defaultParallel) + errChan := make(chan error) + + // make sure result is printed in correct order + output := map[string]chan error{} + for _, c := range containers { + output[c] = make(chan error, 1) + } + go func() { + for _, c := range containers { + err := <-output[c] + errChan <- err + } + }() + + go func() { + for _, c := range containers { + sem <- struct{}{} // Wait for active queue sem to drain. + go func(container string) { + output[container] <- op(ctx, container) + <-sem + }(c) + } + }() + return errChan +} diff --git a/cli/cli/command/container/utils_test.go b/cli/cli/command/container/utils_test.go new file mode 100644 index 00000000..970549c0 --- /dev/null +++ b/cli/cli/command/container/utils_test.go @@ -0,0 +1,70 @@ +package container + +import ( + "context" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func waitFn(cid string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + var res container.ContainerWaitOKBody + + go func() { + switch { + case strings.Contains(cid, "exit-code-42"): + res.StatusCode = 42 + resC <- res + case strings.Contains(cid, "non-existent"): + err := errors.Errorf("No such container: %v", cid) + errC <- err + case strings.Contains(cid, "wait-error"): + res.Error = &container.ContainerWaitOKBodyError{Message: "removal failed"} + resC <- res + default: + // normal exit + resC <- res + } + }() + + return resC, errC +} + +func TestWaitExitOrRemoved(t *testing.T) { + testcases := []struct { + cid string + exitCode int + }{ + { + cid: "normal-container", + exitCode: 0, + }, + { + cid: "give-me-exit-code-42", + exitCode: 42, + }, + { + cid: "i-want-a-wait-error", + exitCode: 125, + }, + { + cid: "non-existent-container-id", + exitCode: 125, + }, + } + + client := test.NewFakeCli(&fakeClient{waitFunc: waitFn, Version: api.DefaultVersion}) + for _, testcase := range testcases { + statusC := waitExitOrRemoved(context.Background(), client, testcase.cid, true) + exitCode := <-statusC + assert.Check(t, is.Equal(testcase.exitCode, exitCode)) + } +} diff --git a/cli/cli/command/container/wait.go b/cli/cli/command/container/wait.go new file mode 100644 index 00000000..8602e253 --- /dev/null +++ b/cli/cli/command/container/wait.go @@ -0,0 +1,53 @@ +package container + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type waitOptions struct { + containers []string +} + +// NewWaitCommand creates a new cobra.Command for `docker wait` +func NewWaitCommand(dockerCli command.Cli) *cobra.Command { + var opts waitOptions + + cmd := &cobra.Command{ + Use: "wait CONTAINER [CONTAINER...]", + Short: "Block until one or more containers stop, then print their exit codes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runWait(dockerCli, &opts) + }, + } + + return cmd +} + +func runWait(dockerCli command.Cli, opts *waitOptions) error { + ctx := context.Background() + + var errs []string + for _, container := range opts.containers { + resultC, errC := dockerCli.Client().ContainerWait(ctx, container, "") + + select { + case result := <-resultC: + fmt.Fprintf(dockerCli.Out(), "%d\n", result.StatusCode) + case err := <-errC: + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/context.go b/cli/cli/command/context.go new file mode 100644 index 00000000..0de75dc9 --- /dev/null +++ b/cli/cli/command/context.go @@ -0,0 +1,27 @@ +package command + +import ( + "errors" + + "github.com/docker/cli/cli/context/store" +) + +// DockerContext is a typed representation of what we put in Context metadata +type DockerContext struct { + Description string `json:",omitempty"` + StackOrchestrator Orchestrator `json:",omitempty"` +} + +// GetDockerContext extracts metadata from stored context metadata +func GetDockerContext(storeMetadata store.Metadata) (DockerContext, error) { + if storeMetadata.Metadata == nil { + // can happen if we save endpoints before assigning a context metadata + // it is totally valid, and we should return a default initialized value + return DockerContext{}, nil + } + res, ok := storeMetadata.Metadata.(DockerContext) + if !ok { + return DockerContext{}, errors.New("context metadata is not a valid DockerContext") + } + return res, nil +} diff --git a/cli/cli/command/context/cmd.go b/cli/cli/command/context/cmd.go new file mode 100644 index 00000000..1b689845 --- /dev/null +++ b/cli/cli/command/context/cmd.go @@ -0,0 +1,49 @@ +package context + +import ( + "errors" + "fmt" + "regexp" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewContextCommand returns the context cli subcommand +func NewContextCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "context", + Short: "Manage contexts", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newListCommand(dockerCli), + newUseCommand(dockerCli), + newExportCommand(dockerCli), + newImportCommand(dockerCli), + newRemoveCommand(dockerCli), + newUpdateCommand(dockerCli), + newInspectCommand(dockerCli), + ) + return cmd +} + +const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$" + +var restrictedNameRegEx = regexp.MustCompile(restrictedNamePattern) + +func validateContextName(name string) error { + if name == "" { + return errors.New("context name cannot be empty") + } + if name == "default" { + return errors.New(`"default" is a reserved context name`) + } + if !restrictedNameRegEx.MatchString(name) { + return fmt.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern) + } + return nil +} diff --git a/cli/cli/command/context/create.go b/cli/cli/command/context/create.go new file mode 100644 index 00000000..9d5865a7 --- /dev/null +++ b/cli/cli/command/context/create.go @@ -0,0 +1,199 @@ +package context + +import ( + "bytes" + "fmt" + "text/tabwriter" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/kubernetes" + "github.com/docker/cli/cli/context/store" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// CreateOptions are the options used for creating a context +type CreateOptions struct { + Name string + Description string + DefaultStackOrchestrator string + Docker map[string]string + Kubernetes map[string]string + From string +} + +func longCreateDescription() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Create a context\n\nDocker endpoint config:\n\n") + tw := tabwriter.NewWriter(buf, 20, 1, 3, ' ', 0) + fmt.Fprintln(tw, "NAME\tDESCRIPTION") + for _, d := range dockerConfigKeysDescriptions { + fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description) + } + tw.Flush() + buf.WriteString("\nKubernetes endpoint config:\n\n") + tw = tabwriter.NewWriter(buf, 20, 1, 3, ' ', 0) + fmt.Fprintln(tw, "NAME\tDESCRIPTION") + for _, d := range kubernetesConfigKeysDescriptions { + fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description) + } + tw.Flush() + buf.WriteString("\nExample:\n\n$ docker context create my-context --description \"some description\" --docker \"host=tcp://myserver:2376,ca=~/ca-file,cert=~/cert-file,key=~/key-file\"\n") + return buf.String() +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + opts := &CreateOptions{} + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONTEXT", + Short: "Create a context", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Name = args[0] + return RunCreate(dockerCli, opts) + }, + Long: longCreateDescription(), + } + flags := cmd.Flags() + flags.StringVar(&opts.Description, "description", "", "Description of the context") + flags.StringVar( + &opts.DefaultStackOrchestrator, + "default-stack-orchestrator", "", + "Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)") + flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint") + flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint") + flags.StringVar(&opts.From, "from", "", "create context from a named context") + return cmd +} + +// RunCreate creates a Docker context +func RunCreate(cli command.Cli, o *CreateOptions) error { + s := cli.ContextStore() + if err := checkContextNameForCreation(s, o.Name); err != nil { + return err + } + stackOrchestrator, err := command.NormalizeOrchestrator(o.DefaultStackOrchestrator) + if err != nil { + return errors.Wrap(err, "unable to parse default-stack-orchestrator") + } + switch { + case o.From == "" && o.Docker == nil && o.Kubernetes == nil: + err = createFromExistingContext(s, cli.CurrentContext(), stackOrchestrator, o) + case o.From != "": + err = createFromExistingContext(s, o.From, stackOrchestrator, o) + default: + err = createNewContext(o, stackOrchestrator, cli, s) + } + if err == nil { + fmt.Fprintln(cli.Out(), o.Name) + fmt.Fprintf(cli.Err(), "Successfully created context %q\n", o.Name) + } + return err +} + +func createNewContext(o *CreateOptions, stackOrchestrator command.Orchestrator, cli command.Cli, s store.Writer) error { + if o.Docker == nil { + return errors.New("docker endpoint configuration is required") + } + contextMetadata := newContextMetadata(stackOrchestrator, o) + contextTLSData := store.ContextTLSData{ + Endpoints: make(map[string]store.EndpointTLSData), + } + dockerEP, dockerTLS, err := getDockerEndpointMetadataAndTLS(cli, o.Docker) + if err != nil { + return errors.Wrap(err, "unable to create docker endpoint config") + } + contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP + if dockerTLS != nil { + contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerTLS + } + if o.Kubernetes != nil { + kubernetesEP, kubernetesTLS, err := getKubernetesEndpointMetadataAndTLS(cli, o.Kubernetes) + if err != nil { + return errors.Wrap(err, "unable to create kubernetes endpoint config") + } + if kubernetesEP == nil && stackOrchestrator.HasKubernetes() { + return errors.Errorf("cannot specify orchestrator %q without configuring a Kubernetes endpoint", stackOrchestrator) + } + if kubernetesEP != nil { + contextMetadata.Endpoints[kubernetes.KubernetesEndpoint] = kubernetesEP + } + if kubernetesTLS != nil { + contextTLSData.Endpoints[kubernetes.KubernetesEndpoint] = *kubernetesTLS + } + } + if err := validateEndpointsAndOrchestrator(contextMetadata); err != nil { + return err + } + if err := s.CreateOrUpdate(contextMetadata); err != nil { + return err + } + if err := s.ResetTLSMaterial(o.Name, &contextTLSData); err != nil { + return err + } + return nil +} + +func checkContextNameForCreation(s store.Reader, name string) error { + if err := validateContextName(name); err != nil { + return err + } + if _, err := s.GetMetadata(name); !store.IsErrContextDoesNotExist(err) { + if err != nil { + return errors.Wrap(err, "error while getting existing contexts") + } + return errors.Errorf("context %q already exists", name) + } + return nil +} + +func createFromExistingContext(s store.ReaderWriter, fromContextName string, stackOrchestrator command.Orchestrator, o *CreateOptions) error { + if len(o.Docker) != 0 || len(o.Kubernetes) != 0 { + return errors.New("cannot use --docker or --kubernetes flags when --from is set") + } + reader := store.Export(fromContextName, &descriptionAndOrchestratorStoreDecorator{ + Reader: s, + description: o.Description, + orchestrator: stackOrchestrator, + }) + defer reader.Close() + return store.Import(o.Name, s, reader) +} + +type descriptionAndOrchestratorStoreDecorator struct { + store.Reader + description string + orchestrator command.Orchestrator +} + +func (d *descriptionAndOrchestratorStoreDecorator) GetMetadata(name string) (store.Metadata, error) { + c, err := d.Reader.GetMetadata(name) + if err != nil { + return c, err + } + typedContext, err := command.GetDockerContext(c) + if err != nil { + return c, err + } + if d.description != "" { + typedContext.Description = d.description + } + if d.orchestrator != command.Orchestrator("") { + typedContext.StackOrchestrator = d.orchestrator + } + c.Metadata = typedContext + return c, nil +} + +func newContextMetadata(stackOrchestrator command.Orchestrator, o *CreateOptions) store.Metadata { + return store.Metadata{ + Endpoints: make(map[string]interface{}), + Metadata: command.DockerContext{ + Description: o.Description, + StackOrchestrator: stackOrchestrator, + }, + Name: o.Name, + } +} diff --git a/cli/cli/command/context/create_test.go b/cli/cli/command/context/create_test.go new file mode 100644 index 00000000..013e82d0 --- /dev/null +++ b/cli/cli/command/context/create_test.go @@ -0,0 +1,365 @@ +package context + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/kubernetes" + "github.com/docker/cli/cli/context/store" + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + "gotest.tools/env" +) + +func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) (*test.FakeCli, func()) { + dir, err := ioutil.TempDir("", t.Name()) + assert.NilError(t, err) + storeConfig := store.NewConfig( + func() interface{} { return &command.DockerContext{} }, + store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }), + store.EndpointTypeGetter(kubernetes.KubernetesEndpoint, func() interface{} { return &kubernetes.EndpointMeta{} }), + ) + store := &command.ContextStoreWithDefault{ + Store: store.New(dir, storeConfig), + Resolver: func() (*command.DefaultContext, error) { + return &command.DefaultContext{ + Meta: store.Metadata{ + Endpoints: map[string]interface{}{ + docker.DockerEndpoint: docker.EndpointMeta{ + Host: "unix:///var/run/docker.sock", + }, + }, + Metadata: command.DockerContext{ + Description: "", + StackOrchestrator: command.OrchestratorSwarm, + }, + Name: command.DefaultContextName, + }, + TLS: store.ContextTLSData{}, + }, nil + }, + } + cleanup := func() { + os.RemoveAll(dir) + } + result := test.NewFakeCli(nil, opts...) + for _, o := range opts { + o(result) + } + result.SetContextStore(store) + return result, cleanup +} + +func withCliConfig(configFile *configfile.ConfigFile) func(*test.FakeCli) { + return func(m *test.FakeCli) { + m.SetConfigFile(configFile) + } +} + +func TestCreateInvalids(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + assert.NilError(t, cli.ContextStore().CreateOrUpdate(store.Metadata{Name: "existing-context"})) + tests := []struct { + options CreateOptions + expecterErr string + }{ + { + expecterErr: `context name cannot be empty`, + }, + { + options: CreateOptions{ + Name: "default", + }, + expecterErr: `"default" is a reserved context name`, + }, + { + options: CreateOptions{ + Name: " ", + }, + expecterErr: `context name " " is invalid`, + }, + { + options: CreateOptions{ + Name: "existing-context", + }, + expecterErr: `context "existing-context" already exists`, + }, + { + options: CreateOptions{ + Name: "invalid-docker-host", + Docker: map[string]string{ + keyHost: "some///invalid/host", + }, + }, + expecterErr: `unable to parse docker host`, + }, + { + options: CreateOptions{ + Name: "invalid-orchestrator", + DefaultStackOrchestrator: "invalid", + }, + expecterErr: `specified orchestrator "invalid" is invalid, please use either kubernetes, swarm or all`, + }, + { + options: CreateOptions{ + Name: "orchestrator-kubernetes-no-endpoint", + DefaultStackOrchestrator: "kubernetes", + Docker: map[string]string{}, + }, + expecterErr: `cannot specify orchestrator "kubernetes" without configuring a Kubernetes endpoint`, + }, + { + options: CreateOptions{ + Name: "orchestrator-all-no-endpoint", + DefaultStackOrchestrator: "all", + Docker: map[string]string{}, + }, + expecterErr: `cannot specify orchestrator "all" without configuring a Kubernetes endpoint`, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.options.Name, func(t *testing.T) { + err := RunCreate(cli, &tc.options) + assert.ErrorContains(t, err, tc.expecterErr) + }) + } +} + +func assertContextCreateLogging(t *testing.T, cli *test.FakeCli, n string) { + assert.Equal(t, n+"\n", cli.OutBuffer().String()) + assert.Equal(t, fmt.Sprintf("Successfully created context %q\n", n), cli.ErrBuffer().String()) +} + +func TestCreateOrchestratorSwarm(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + + err := RunCreate(cli, &CreateOptions{ + Name: "test", + DefaultStackOrchestrator: "swarm", + Docker: map[string]string{}, + }) + assert.NilError(t, err) + assertContextCreateLogging(t, cli, "test") +} + +func TestCreateOrchestratorEmpty(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + + err := RunCreate(cli, &CreateOptions{ + Name: "test", + Docker: map[string]string{}, + }) + assert.NilError(t, err) + assertContextCreateLogging(t, cli, "test") +} + +func validateTestKubeEndpoint(t *testing.T, s store.Reader, name string) { + t.Helper() + ctxMetadata, err := s.GetMetadata(name) + assert.NilError(t, err) + kubeMeta := ctxMetadata.Endpoints[kubernetes.KubernetesEndpoint].(kubernetes.EndpointMeta) + kubeEP, err := kubeMeta.WithTLSData(s, name) + assert.NilError(t, err) + assert.Equal(t, "https://someserver", kubeEP.Host) + assert.Equal(t, "the-ca", string(kubeEP.TLSData.CA)) + assert.Equal(t, "the-cert", string(kubeEP.TLSData.Cert)) + assert.Equal(t, "the-key", string(kubeEP.TLSData.Key)) +} + +func createTestContextWithKube(t *testing.T, cli command.Cli) { + t.Helper() + revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig") + defer revert() + + err := RunCreate(cli, &CreateOptions{ + Name: "test", + DefaultStackOrchestrator: "all", + Kubernetes: map[string]string{ + keyFrom: "default", + }, + Docker: map[string]string{}, + }) + assert.NilError(t, err) +} + +func TestCreateOrchestratorAllKubernetesEndpointFromCurrent(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKube(t, cli) + assertContextCreateLogging(t, cli, "test") + validateTestKubeEndpoint(t, cli.ContextStore(), "test") +} + +func TestCreateFromContext(t *testing.T) { + cases := []struct { + name string + description string + orchestrator string + expectedDescription string + docker map[string]string + kubernetes map[string]string + expectedOrchestrator command.Orchestrator + }{ + { + name: "no-override", + expectedDescription: "original description", + expectedOrchestrator: command.OrchestratorSwarm, + }, + { + name: "override-description", + description: "new description", + expectedDescription: "new description", + expectedOrchestrator: command.OrchestratorSwarm, + }, + { + name: "override-orchestrator", + orchestrator: "kubernetes", + expectedDescription: "original description", + expectedOrchestrator: command.OrchestratorKubernetes, + }, + } + + cli, cleanup := makeFakeCli(t) + defer cleanup() + revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig") + defer revert() + cli.ResetOutputBuffers() + assert.NilError(t, RunCreate(cli, &CreateOptions{ + Name: "original", + Description: "original description", + Docker: map[string]string{ + keyHost: "tcp://42.42.42.42:2375", + }, + Kubernetes: map[string]string{ + keyFrom: "default", + }, + DefaultStackOrchestrator: "swarm", + })) + assertContextCreateLogging(t, cli, "original") + + cli.ResetOutputBuffers() + assert.NilError(t, RunCreate(cli, &CreateOptions{ + Name: "dummy", + Description: "dummy description", + Docker: map[string]string{ + keyHost: "tcp://24.24.24.24:2375", + }, + Kubernetes: map[string]string{ + keyFrom: "default", + }, + DefaultStackOrchestrator: "swarm", + })) + assertContextCreateLogging(t, cli, "dummy") + + cli.SetCurrentContext("dummy") + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + cli.ResetOutputBuffers() + err := RunCreate(cli, &CreateOptions{ + From: "original", + Name: c.name, + Description: c.description, + DefaultStackOrchestrator: c.orchestrator, + Docker: c.docker, + Kubernetes: c.kubernetes, + }) + assert.NilError(t, err) + assertContextCreateLogging(t, cli, c.name) + newContext, err := cli.ContextStore().GetMetadata(c.name) + assert.NilError(t, err) + newContextTyped, err := command.GetDockerContext(newContext) + assert.NilError(t, err) + dockerEndpoint, err := docker.EndpointFromContext(newContext) + assert.NilError(t, err) + kubeEndpoint := kubernetes.EndpointFromContext(newContext) + assert.Check(t, kubeEndpoint != nil) + assert.Equal(t, newContextTyped.Description, c.expectedDescription) + assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator) + assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375") + assert.Equal(t, kubeEndpoint.Host, "https://someserver") + }) + } +} + +func TestCreateFromCurrent(t *testing.T) { + cases := []struct { + name string + description string + orchestrator string + expectedDescription string + expectedOrchestrator command.Orchestrator + }{ + { + name: "no-override", + expectedDescription: "original description", + expectedOrchestrator: command.OrchestratorSwarm, + }, + { + name: "override-description", + description: "new description", + expectedDescription: "new description", + expectedOrchestrator: command.OrchestratorSwarm, + }, + { + name: "override-orchestrator", + orchestrator: "kubernetes", + expectedDescription: "original description", + expectedOrchestrator: command.OrchestratorKubernetes, + }, + } + + cli, cleanup := makeFakeCli(t) + defer cleanup() + revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig") + defer revert() + cli.ResetOutputBuffers() + assert.NilError(t, RunCreate(cli, &CreateOptions{ + Name: "original", + Description: "original description", + Docker: map[string]string{ + keyHost: "tcp://42.42.42.42:2375", + }, + Kubernetes: map[string]string{ + keyFrom: "default", + }, + DefaultStackOrchestrator: "swarm", + })) + assertContextCreateLogging(t, cli, "original") + + cli.SetCurrentContext("original") + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + cli.ResetOutputBuffers() + err := RunCreate(cli, &CreateOptions{ + Name: c.name, + Description: c.description, + DefaultStackOrchestrator: c.orchestrator, + }) + assert.NilError(t, err) + assertContextCreateLogging(t, cli, c.name) + newContext, err := cli.ContextStore().GetMetadata(c.name) + assert.NilError(t, err) + newContextTyped, err := command.GetDockerContext(newContext) + assert.NilError(t, err) + dockerEndpoint, err := docker.EndpointFromContext(newContext) + assert.NilError(t, err) + kubeEndpoint := kubernetes.EndpointFromContext(newContext) + assert.Check(t, kubeEndpoint != nil) + assert.Equal(t, newContextTyped.Description, c.expectedDescription) + assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator) + assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375") + assert.Equal(t, kubeEndpoint.Host, "https://someserver") + }) + } +} diff --git a/cli/cli/command/context/export-import_test.go b/cli/cli/command/context/export-import_test.go new file mode 100644 index 00000000..75fc468b --- /dev/null +++ b/cli/cli/command/context/export-import_test.go @@ -0,0 +1,110 @@ +package context + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/cli/cli/streams" + "gotest.tools/assert" +) + +func TestExportImportWithFile(t *testing.T) { + contextDir, err := ioutil.TempDir("", t.Name()+"context") + assert.NilError(t, err) + defer os.RemoveAll(contextDir) + contextFile := filepath.Join(contextDir, "exported") + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKube(t, cli) + cli.ErrBuffer().Reset() + assert.NilError(t, RunExport(cli, &ExportOptions{ + ContextName: "test", + Dest: contextFile, + })) + assert.Equal(t, cli.ErrBuffer().String(), fmt.Sprintf("Written file %q\n", contextFile)) + cli.OutBuffer().Reset() + cli.ErrBuffer().Reset() + assert.NilError(t, RunImport(cli, "test2", contextFile)) + context1, err := cli.ContextStore().GetMetadata("test") + assert.NilError(t, err) + context2, err := cli.ContextStore().GetMetadata("test2") + assert.NilError(t, err) + assert.DeepEqual(t, context1.Endpoints, context2.Endpoints) + assert.DeepEqual(t, context1.Metadata, context2.Metadata) + assert.Equal(t, "test", context1.Name) + assert.Equal(t, "test2", context2.Name) + + assert.Equal(t, "test2\n", cli.OutBuffer().String()) + assert.Equal(t, "Successfully imported context \"test2\"\n", cli.ErrBuffer().String()) +} + +func TestExportImportPipe(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKube(t, cli) + cli.ErrBuffer().Reset() + cli.OutBuffer().Reset() + assert.NilError(t, RunExport(cli, &ExportOptions{ + ContextName: "test", + Dest: "-", + })) + assert.Equal(t, cli.ErrBuffer().String(), "") + cli.SetIn(streams.NewIn(ioutil.NopCloser(bytes.NewBuffer(cli.OutBuffer().Bytes())))) + cli.OutBuffer().Reset() + cli.ErrBuffer().Reset() + assert.NilError(t, RunImport(cli, "test2", "-")) + context1, err := cli.ContextStore().GetMetadata("test") + assert.NilError(t, err) + context2, err := cli.ContextStore().GetMetadata("test2") + assert.NilError(t, err) + assert.DeepEqual(t, context1.Endpoints, context2.Endpoints) + assert.DeepEqual(t, context1.Metadata, context2.Metadata) + assert.Equal(t, "test", context1.Name) + assert.Equal(t, "test2", context2.Name) + + assert.Equal(t, "test2\n", cli.OutBuffer().String()) + assert.Equal(t, "Successfully imported context \"test2\"\n", cli.ErrBuffer().String()) +} + +func TestExportKubeconfig(t *testing.T) { + contextDir, err := ioutil.TempDir("", t.Name()+"context") + assert.NilError(t, err) + defer os.RemoveAll(contextDir) + contextFile := filepath.Join(contextDir, "exported") + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKube(t, cli) + cli.ErrBuffer().Reset() + assert.NilError(t, RunExport(cli, &ExportOptions{ + ContextName: "test", + Dest: contextFile, + Kubeconfig: true, + })) + assert.Equal(t, cli.ErrBuffer().String(), fmt.Sprintf("Written file %q\n", contextFile)) + assert.NilError(t, RunCreate(cli, &CreateOptions{ + Name: "test2", + Kubernetes: map[string]string{ + keyKubeconfig: contextFile, + }, + Docker: map[string]string{}, + })) + validateTestKubeEndpoint(t, cli.ContextStore(), "test2") +} + +func TestExportExistingFile(t *testing.T) { + contextDir, err := ioutil.TempDir("", t.Name()+"context") + assert.NilError(t, err) + defer os.RemoveAll(contextDir) + contextFile := filepath.Join(contextDir, "exported") + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKube(t, cli) + cli.ErrBuffer().Reset() + assert.NilError(t, ioutil.WriteFile(contextFile, []byte{}, 0644)) + err = RunExport(cli, &ExportOptions{ContextName: "test", Dest: contextFile}) + assert.Assert(t, os.IsExist(err)) +} diff --git a/cli/cli/command/context/export.go b/cli/cli/command/context/export.go new file mode 100644 index 00000000..fd66a5d4 --- /dev/null +++ b/cli/cli/command/context/export.go @@ -0,0 +1,110 @@ +package context + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/context/kubernetes" + "github.com/docker/cli/cli/context/store" + "github.com/spf13/cobra" + "k8s.io/client-go/tools/clientcmd" +) + +// ExportOptions are the options used for exporting a context +type ExportOptions struct { + Kubeconfig bool + ContextName string + Dest string +} + +func newExportCommand(dockerCli command.Cli) *cobra.Command { + opts := &ExportOptions{} + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTEXT [FILE|-]", + Short: "Export a context to a tar or kubeconfig file", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ContextName = args[0] + if len(args) == 2 { + opts.Dest = args[1] + } else { + opts.Dest = opts.ContextName + if opts.Kubeconfig { + opts.Dest += ".kubeconfig" + } else { + opts.Dest += ".dockercontext" + } + } + return RunExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.Kubeconfig, "kubeconfig", false, "Export as a kubeconfig file") + return cmd +} + +func writeTo(dockerCli command.Cli, reader io.Reader, dest string) error { + var writer io.Writer + var printDest bool + if dest == "-" { + if dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to export to a terminal, please specify a file path") + } + writer = dockerCli.Out() + } else { + f, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer f.Close() + writer = f + printDest = true + } + if _, err := io.Copy(writer, reader); err != nil { + return err + } + if printDest { + fmt.Fprintf(dockerCli.Err(), "Written file %q\n", dest) + } + return nil +} + +// RunExport exports a Docker context +func RunExport(dockerCli command.Cli, opts *ExportOptions) error { + if err := validateContextName(opts.ContextName); err != nil && opts.ContextName != command.DefaultContextName { + return err + } + ctxMeta, err := dockerCli.ContextStore().GetMetadata(opts.ContextName) + if err != nil { + return err + } + if !opts.Kubeconfig { + reader := store.Export(opts.ContextName, dockerCli.ContextStore()) + defer reader.Close() + return writeTo(dockerCli, reader, opts.Dest) + } + kubernetesEndpointMeta := kubernetes.EndpointFromContext(ctxMeta) + if kubernetesEndpointMeta == nil { + return fmt.Errorf("context %q has no kubernetes endpoint", opts.ContextName) + } + kubernetesEndpoint, err := kubernetesEndpointMeta.WithTLSData(dockerCli.ContextStore(), opts.ContextName) + if err != nil { + return err + } + kubeConfig := kubernetesEndpoint.KubernetesConfig() + rawCfg, err := kubeConfig.RawConfig() + if err != nil { + return err + } + data, err := clientcmd.Write(rawCfg) + if err != nil { + return err + } + return writeTo(dockerCli, bytes.NewBuffer(data), opts.Dest) +} diff --git a/cli/cli/command/context/import.go b/cli/cli/command/context/import.go new file mode 100644 index 00000000..c09f8f89 --- /dev/null +++ b/cli/cli/command/context/import.go @@ -0,0 +1,51 @@ +package context + +import ( + "fmt" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/context/store" + "github.com/spf13/cobra" +) + +func newImportCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "import CONTEXT FILE|-", + Short: "Import a context from a tar or zip file", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return RunImport(dockerCli, args[0], args[1]) + }, + } + return cmd +} + +// RunImport imports a Docker context +func RunImport(dockerCli command.Cli, name string, source string) error { + if err := checkContextNameForCreation(dockerCli.ContextStore(), name); err != nil { + return err + } + + var reader io.Reader + if source == "-" { + reader = dockerCli.In() + } else { + f, err := os.Open(source) + if err != nil { + return err + } + defer f.Close() + reader = f + } + + if err := store.Import(name, dockerCli.ContextStore(), reader); err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), name) + fmt.Fprintf(dockerCli.Err(), "Successfully imported context %q\n", name) + return nil +} diff --git a/cli/cli/command/context/inspect.go b/cli/cli/command/context/inspect.go new file mode 100644 index 00000000..2145845a --- /dev/null +++ b/cli/cli/command/context/inspect.go @@ -0,0 +1,64 @@ +package context + +import ( + "errors" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/cli/cli/context/store" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] [CONTEXT] [CONTEXT...]", + Short: "Display detailed information on one or more contexts", + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + if len(opts.refs) == 0 { + if dockerCli.CurrentContext() == "" { + return errors.New("no context specified") + } + opts.refs = []string{dockerCli.CurrentContext()} + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + getRefFunc := func(ref string) (interface{}, []byte, error) { + c, err := dockerCli.ContextStore().GetMetadata(ref) + if err != nil { + return nil, nil, err + } + tlsListing, err := dockerCli.ContextStore().ListTLSFiles(ref) + if err != nil { + return nil, nil, err + } + return contextWithTLSListing{ + Metadata: c, + TLSMaterial: tlsListing, + Storage: dockerCli.ContextStore().GetStorageInfo(ref), + }, nil, nil + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} + +type contextWithTLSListing struct { + store.Metadata + TLSMaterial map[string]store.EndpointFiles + Storage store.StorageInfo +} diff --git a/cli/cli/command/context/inspect_test.go b/cli/cli/command/context/inspect_test.go new file mode 100644 index 00000000..592206a5 --- /dev/null +++ b/cli/cli/command/context/inspect_test.go @@ -0,0 +1,24 @@ +package context + +import ( + "strings" + "testing" + + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestInspect(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + cli.OutBuffer().Reset() + assert.NilError(t, runInspect(cli, inspectOptions{ + refs: []string{"current"}, + })) + expected := string(golden.Get(t, "inspect.golden")) + si := cli.ContextStore().GetStorageInfo("current") + expected = strings.Replace(expected, "", strings.Replace(si.MetadataPath, `\`, `\\`, -1), 1) + expected = strings.Replace(expected, "", strings.Replace(si.TLSPath, `\`, `\\`, -1), 1) + assert.Equal(t, cli.OutBuffer().String(), expected) +} diff --git a/cli/cli/command/context/list.go b/cli/cli/command/context/list.go new file mode 100644 index 00000000..4c540bb2 --- /dev/null +++ b/cli/cli/command/context/list.go @@ -0,0 +1,96 @@ +package context + +import ( + "fmt" + "os" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/context/docker" + kubecontext "github.com/docker/cli/cli/context/kubernetes" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type listOptions struct { + format string + quiet bool +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + opts := &listOptions{} + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List contexts", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.format, "format", "", "Pretty-print contexts using a Go template") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show context names") + return cmd +} + +func runList(dockerCli command.Cli, opts *listOptions) error { + if opts.format == "" { + opts.format = formatter.TableFormatKey + } + curContext := dockerCli.CurrentContext() + contextMap, err := dockerCli.ContextStore().List() + if err != nil { + return err + } + var contexts []*formatter.ClientContext + for _, rawMeta := range contextMap { + meta, err := command.GetDockerContext(rawMeta) + if err != nil { + return err + } + dockerEndpoint, err := docker.EndpointFromContext(rawMeta) + if err != nil { + return err + } + kubernetesEndpoint := kubecontext.EndpointFromContext(rawMeta) + kubEndpointText := "" + if kubernetesEndpoint != nil { + kubEndpointText = fmt.Sprintf("%s (%s)", kubernetesEndpoint.Host, kubernetesEndpoint.DefaultNamespace) + } + if rawMeta.Name == command.DefaultContextName { + meta.Description = "Current DOCKER_HOST based configuration" + } + desc := formatter.ClientContext{ + Name: rawMeta.Name, + Current: rawMeta.Name == curContext, + Description: meta.Description, + StackOrchestrator: string(meta.StackOrchestrator), + DockerEndpoint: dockerEndpoint.Host, + KubernetesEndpoint: kubEndpointText, + } + contexts = append(contexts, &desc) + } + sort.Slice(contexts, func(i, j int) bool { + return sortorder.NaturalLess(contexts[i].Name, contexts[j].Name) + }) + if err := format(dockerCli, opts, contexts); err != nil { + return err + } + if os.Getenv("DOCKER_HOST") != "" { + fmt.Fprint(dockerCli.Err(), "Warning: DOCKER_HOST environment variable overrides the active context. "+ + "To use a context, either set the global --context flag, or unset DOCKER_HOST environment variable.\n") + } + return nil +} + +func format(dockerCli command.Cli, opts *listOptions, contexts []*formatter.ClientContext) error { + contextCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewClientContextFormat(opts.format, opts.quiet), + } + return formatter.ClientContextWrite(contextCtx, contexts) +} diff --git a/cli/cli/command/context/list_test.go b/cli/cli/command/context/list_test.go new file mode 100644 index 00000000..df4534dc --- /dev/null +++ b/cli/cli/command/context/list_test.go @@ -0,0 +1,47 @@ +package context + +import ( + "testing" + + "github.com/docker/cli/cli/command" + "gotest.tools/assert" + "gotest.tools/env" + "gotest.tools/golden" +) + +func createTestContextWithKubeAndSwarm(t *testing.T, cli command.Cli, name string, orchestrator string) { + revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig") + defer revert() + + err := RunCreate(cli, &CreateOptions{ + Name: name, + DefaultStackOrchestrator: orchestrator, + Description: "description of " + name, + Kubernetes: map[string]string{keyFrom: "default"}, + Docker: map[string]string{keyHost: "https://someswarmserver"}, + }) + assert.NilError(t, err) +} + +func TestList(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + createTestContextWithKubeAndSwarm(t, cli, "unset", "unset") + cli.SetCurrentContext("current") + cli.OutBuffer().Reset() + assert.NilError(t, runList(cli, &listOptions{})) + golden.Assert(t, cli.OutBuffer().String(), "list.golden") +} + +func TestListQuiet(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + cli.SetCurrentContext("current") + cli.OutBuffer().Reset() + assert.NilError(t, runList(cli, &listOptions{quiet: true})) + golden.Assert(t, cli.OutBuffer().String(), "quiet-list.golden") +} diff --git a/cli/cli/command/context/options.go b/cli/cli/command/context/options.go new file mode 100644 index 00000000..97792639 --- /dev/null +++ b/cli/cli/command/context/options.go @@ -0,0 +1,219 @@ +package context + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/context" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/kubernetes" + "github.com/docker/cli/cli/context/store" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +const ( + keyFrom = "from" + keyHost = "host" + keyCA = "ca" + keyCert = "cert" + keyKey = "key" + keySkipTLSVerify = "skip-tls-verify" + keyKubeconfig = "config-file" + keyKubecontext = "context-override" + keyKubenamespace = "namespace-override" +) + +type configKeyDescription struct { + name string + description string +} + +var ( + allowedDockerConfigKeys = map[string]struct{}{ + keyFrom: {}, + keyHost: {}, + keyCA: {}, + keyCert: {}, + keyKey: {}, + keySkipTLSVerify: {}, + } + allowedKubernetesConfigKeys = map[string]struct{}{ + keyFrom: {}, + keyKubeconfig: {}, + keyKubecontext: {}, + keyKubenamespace: {}, + } + dockerConfigKeysDescriptions = []configKeyDescription{ + { + name: keyFrom, + description: "Copy named context's Docker endpoint configuration", + }, + { + name: keyHost, + description: "Docker endpoint on which to connect", + }, + { + name: keyCA, + description: "Trust certs signed only by this CA", + }, + { + name: keyCert, + description: "Path to TLS certificate file", + }, + { + name: keyKey, + description: "Path to TLS key file", + }, + { + name: keySkipTLSVerify, + description: "Skip TLS certificate validation", + }, + } + kubernetesConfigKeysDescriptions = []configKeyDescription{ + { + name: keyFrom, + description: "Copy named context's Kubernetes endpoint configuration", + }, + { + name: keyKubeconfig, + description: "Path to a Kubernetes config file", + }, + { + name: keyKubecontext, + description: "Overrides the context set in the kubernetes config file", + }, + { + name: keyKubenamespace, + description: "Overrides the namespace set in the kubernetes config file", + }, + } +) + +func parseBool(config map[string]string, name string) (bool, error) { + strVal, ok := config[name] + if !ok { + return false, nil + } + res, err := strconv.ParseBool(strVal) + return res, errors.Wrap(err, name) +} + +func validateConfig(config map[string]string, allowedKeys map[string]struct{}) error { + var errs []string + for k := range config { + if _, ok := allowedKeys[k]; !ok { + errs = append(errs, fmt.Sprintf("%s: unrecognized config key", k)) + } + } + if len(errs) == 0 { + return nil + } + return errors.New(strings.Join(errs, "\n")) +} + +func getDockerEndpoint(dockerCli command.Cli, config map[string]string) (docker.Endpoint, error) { + if err := validateConfig(config, allowedDockerConfigKeys); err != nil { + return docker.Endpoint{}, err + } + if contextName, ok := config[keyFrom]; ok { + metadata, err := dockerCli.ContextStore().GetMetadata(contextName) + if err != nil { + return docker.Endpoint{}, err + } + if ep, ok := metadata.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta); ok { + return docker.Endpoint{EndpointMeta: ep}, nil + } + return docker.Endpoint{}, errors.Errorf("unable to get endpoint from context %q", contextName) + } + tlsData, err := context.TLSDataFromFiles(config[keyCA], config[keyCert], config[keyKey]) + if err != nil { + return docker.Endpoint{}, err + } + skipTLSVerify, err := parseBool(config, keySkipTLSVerify) + if err != nil { + return docker.Endpoint{}, err + } + ep := docker.Endpoint{ + EndpointMeta: docker.EndpointMeta{ + Host: config[keyHost], + SkipTLSVerify: skipTLSVerify, + }, + TLSData: tlsData, + } + // try to resolve a docker client, validating the configuration + opts, err := ep.ClientOpts() + if err != nil { + return docker.Endpoint{}, errors.Wrap(err, "invalid docker endpoint options") + } + if _, err := client.NewClientWithOpts(opts...); err != nil { + return docker.Endpoint{}, errors.Wrap(err, "unable to apply docker endpoint options") + } + return ep, nil +} + +func getDockerEndpointMetadataAndTLS(dockerCli command.Cli, config map[string]string) (docker.EndpointMeta, *store.EndpointTLSData, error) { + ep, err := getDockerEndpoint(dockerCli, config) + if err != nil { + return docker.EndpointMeta{}, nil, err + } + return ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil +} + +func getKubernetesEndpoint(dockerCli command.Cli, config map[string]string) (*kubernetes.Endpoint, error) { + if err := validateConfig(config, allowedKubernetesConfigKeys); err != nil { + return nil, err + } + if len(config) == 0 { + return nil, nil + } + if contextName, ok := config[keyFrom]; ok { + ctxMeta, err := dockerCli.ContextStore().GetMetadata(contextName) + if err != nil { + return nil, err + } + endpointMeta := kubernetes.EndpointFromContext(ctxMeta) + if endpointMeta != nil { + res, err := endpointMeta.WithTLSData(dockerCli.ContextStore(), dockerCli.CurrentContext()) + if err != nil { + return nil, err + } + return &res, nil + } + + // fallback to env-based kubeconfig + kubeconfig := os.Getenv("KUBECONFIG") + if kubeconfig == "" { + kubeconfig = filepath.Join(homedir.Get(), ".kube/config") + } + ep, err := kubernetes.FromKubeConfig(kubeconfig, "", "") + if err != nil { + return nil, err + } + return &ep, nil + } + if config[keyKubeconfig] != "" { + ep, err := kubernetes.FromKubeConfig(config[keyKubeconfig], config[keyKubecontext], config[keyKubenamespace]) + if err != nil { + return nil, err + } + return &ep, nil + } + return nil, nil +} + +func getKubernetesEndpointMetadataAndTLS(dockerCli command.Cli, config map[string]string) (*kubernetes.EndpointMeta, *store.EndpointTLSData, error) { + ep, err := getKubernetesEndpoint(dockerCli, config) + if err != nil { + return nil, nil, err + } + if ep == nil { + return nil, nil, err + } + return &ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil +} diff --git a/cli/cli/command/context/remove.go b/cli/cli/command/context/remove.go new file mode 100644 index 00000000..59126e50 --- /dev/null +++ b/cli/cli/command/context/remove.go @@ -0,0 +1,68 @@ +package context + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// RemoveOptions are the options used to remove contexts +type RemoveOptions struct { + Force bool +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts RemoveOptions + cmd := &cobra.Command{ + Use: "rm CONTEXT [CONTEXT...]", + Aliases: []string{"remove"}, + Short: "Remove one or more contexts", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return RunRemove(dockerCli, opts, args) + }, + } + cmd.Flags().BoolVarP(&opts.Force, "force", "f", false, "Force the removal of a context in use") + return cmd +} + +// RunRemove removes one or more contexts +func RunRemove(dockerCli command.Cli, opts RemoveOptions, names []string) error { + var errs []string + currentCtx := dockerCli.CurrentContext() + for _, name := range names { + if name == "default" { + errs = append(errs, `default: context "default" cannot be removed`) + } else if err := doRemove(dockerCli, name, name == currentCtx, opts.Force); err != nil { + errs = append(errs, fmt.Sprintf("%s: %s", name, err)) + } else { + fmt.Fprintln(dockerCli.Out(), name) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} + +func doRemove(dockerCli command.Cli, name string, isCurrent, force bool) error { + if _, err := dockerCli.ContextStore().GetMetadata(name); err != nil { + return err + } + if isCurrent { + if !force { + return errors.New("context is in use, set -f flag to force remove") + } + // fallback to DOCKER_HOST + cfg := dockerCli.ConfigFile() + cfg.CurrentContext = "" + if err := cfg.Save(); err != nil { + return err + } + } + return dockerCli.ContextStore().Remove(name) +} diff --git a/cli/cli/command/context/remove_test.go b/cli/cli/command/context/remove_test.go new file mode 100644 index 00000000..3297cc79 --- /dev/null +++ b/cli/cli/command/context/remove_test.go @@ -0,0 +1,73 @@ +package context + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/context/store" + "gotest.tools/assert" +) + +func TestRemove(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + assert.NilError(t, RunRemove(cli, RemoveOptions{}, []string{"other"})) + _, err := cli.ContextStore().GetMetadata("current") + assert.NilError(t, err) + _, err = cli.ContextStore().GetMetadata("other") + assert.Check(t, store.IsErrContextDoesNotExist(err)) +} + +func TestRemoveNotAContext(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + err := RunRemove(cli, RemoveOptions{}, []string{"not-a-context"}) + assert.ErrorContains(t, err, `context "not-a-context" does not exist`) +} + +func TestRemoveCurrent(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + cli.SetCurrentContext("current") + err := RunRemove(cli, RemoveOptions{}, []string{"current"}) + assert.ErrorContains(t, err, "current: context is in use, set -f flag to force remove") +} + +func TestRemoveCurrentForce(t *testing.T) { + configDir, err := ioutil.TempDir("", t.Name()+"config") + assert.NilError(t, err) + defer os.RemoveAll(configDir) + configFilePath := filepath.Join(configDir, "config.json") + testCfg := configfile.New(configFilePath) + testCfg.CurrentContext = "current" + assert.NilError(t, testCfg.Save()) + + cli, cleanup := makeFakeCli(t, withCliConfig(testCfg)) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "current", "all") + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + cli.SetCurrentContext("current") + assert.NilError(t, RunRemove(cli, RemoveOptions{Force: true}, []string{"current"})) + reloadedConfig, err := config.Load(configDir) + assert.NilError(t, err) + assert.Equal(t, "", reloadedConfig.CurrentContext) +} + +func TestRemoveDefault(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "other", "all") + cli.SetCurrentContext("current") + err := RunRemove(cli, RemoveOptions{}, []string{"default"}) + assert.ErrorContains(t, err, `default: context "default" cannot be removed`) +} diff --git a/cli/cli/command/context/testdata/inspect.golden b/cli/cli/command/context/testdata/inspect.golden new file mode 100644 index 00000000..d520b4f9 --- /dev/null +++ b/cli/cli/command/context/testdata/inspect.golden @@ -0,0 +1,31 @@ +[ + { + "Name": "current", + "Metadata": { + "Description": "description of current", + "StackOrchestrator": "all" + }, + "Endpoints": { + "docker": { + "Host": "https://someswarmserver", + "SkipTLSVerify": false + }, + "kubernetes": { + "Host": "https://someserver", + "SkipTLSVerify": false, + "DefaultNamespace": "default" + } + }, + "TLSMaterial": { + "kubernetes": [ + "ca.pem", + "cert.pem", + "key.pem" + ] + }, + "Storage": { + "MetadataPath": "", + "TLSPath": "" + } + } +] diff --git a/cli/cli/command/context/testdata/list.golden b/cli/cli/command/context/testdata/list.golden new file mode 100644 index 00000000..a07c22f3 --- /dev/null +++ b/cli/cli/command/context/testdata/list.golden @@ -0,0 +1,5 @@ +NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR +current * description of current https://someswarmserver https://someserver (default) all +default Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm +other description of other https://someswarmserver https://someserver (default) all +unset description of unset https://someswarmserver https://someserver (default) diff --git a/cli/cli/command/context/testdata/quiet-list.golden b/cli/cli/command/context/testdata/quiet-list.golden new file mode 100644 index 00000000..dd00383f --- /dev/null +++ b/cli/cli/command/context/testdata/quiet-list.golden @@ -0,0 +1,3 @@ +current +default +other diff --git a/cli/cli/command/context/testdata/test-kubeconfig b/cli/cli/command/context/testdata/test-kubeconfig new file mode 100644 index 00000000..f6baf8e8 --- /dev/null +++ b/cli/cli/command/context/testdata/test-kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: dGhlLWNh + server: https://someserver + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test +current-context: test +kind: Config +preferences: {} +users: +- name: test-user + user: + client-certificate-data: dGhlLWNlcnQ= + client-key-data: dGhlLWtleQ== diff --git a/cli/cli/command/context/update.go b/cli/cli/command/context/update.go new file mode 100644 index 00000000..9165bb30 --- /dev/null +++ b/cli/cli/command/context/update.go @@ -0,0 +1,144 @@ +package context + +import ( + "bytes" + "fmt" + "text/tabwriter" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/kubernetes" + "github.com/docker/cli/cli/context/store" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// UpdateOptions are the options used to update a context +type UpdateOptions struct { + Name string + Description string + DefaultStackOrchestrator string + Docker map[string]string + Kubernetes map[string]string +} + +func longUpdateDescription() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("Update a context\n\nDocker endpoint config:\n\n") + tw := tabwriter.NewWriter(buf, 20, 1, 3, ' ', 0) + fmt.Fprintln(tw, "NAME\tDESCRIPTION") + for _, d := range dockerConfigKeysDescriptions { + fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description) + } + tw.Flush() + buf.WriteString("\nKubernetes endpoint config:\n\n") + tw = tabwriter.NewWriter(buf, 20, 1, 3, ' ', 0) + fmt.Fprintln(tw, "NAME\tDESCRIPTION") + for _, d := range kubernetesConfigKeysDescriptions { + fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description) + } + tw.Flush() + buf.WriteString("\nExample:\n\n$ docker context update my-context --description \"some description\" --docker \"host=tcp://myserver:2376,ca=~/ca-file,cert=~/cert-file,key=~/key-file\"\n") + return buf.String() +} + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + opts := &UpdateOptions{} + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTEXT", + Short: "Update a context", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Name = args[0] + return RunUpdate(dockerCli, opts) + }, + Long: longUpdateDescription(), + } + flags := cmd.Flags() + flags.StringVar(&opts.Description, "description", "", "Description of the context") + flags.StringVar( + &opts.DefaultStackOrchestrator, + "default-stack-orchestrator", "", + "Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)") + flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint") + flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint") + return cmd +} + +// RunUpdate updates a Docker context +func RunUpdate(cli command.Cli, o *UpdateOptions) error { + if err := validateContextName(o.Name); err != nil { + return err + } + s := cli.ContextStore() + c, err := s.GetMetadata(o.Name) + if err != nil { + return err + } + dockerContext, err := command.GetDockerContext(c) + if err != nil { + return err + } + if o.DefaultStackOrchestrator != "" { + stackOrchestrator, err := command.NormalizeOrchestrator(o.DefaultStackOrchestrator) + if err != nil { + return errors.Wrap(err, "unable to parse default-stack-orchestrator") + } + dockerContext.StackOrchestrator = stackOrchestrator + } + if o.Description != "" { + dockerContext.Description = o.Description + } + + c.Metadata = dockerContext + + tlsDataToReset := make(map[string]*store.EndpointTLSData) + + if o.Docker != nil { + dockerEP, dockerTLS, err := getDockerEndpointMetadataAndTLS(cli, o.Docker) + if err != nil { + return errors.Wrap(err, "unable to create docker endpoint config") + } + c.Endpoints[docker.DockerEndpoint] = dockerEP + tlsDataToReset[docker.DockerEndpoint] = dockerTLS + } + if o.Kubernetes != nil { + kubernetesEP, kubernetesTLS, err := getKubernetesEndpointMetadataAndTLS(cli, o.Kubernetes) + if err != nil { + return errors.Wrap(err, "unable to create kubernetes endpoint config") + } + if kubernetesEP == nil { + delete(c.Endpoints, kubernetes.KubernetesEndpoint) + } else { + c.Endpoints[kubernetes.KubernetesEndpoint] = kubernetesEP + tlsDataToReset[kubernetes.KubernetesEndpoint] = kubernetesTLS + } + } + if err := validateEndpointsAndOrchestrator(c); err != nil { + return err + } + if err := s.CreateOrUpdate(c); err != nil { + return err + } + for ep, tlsData := range tlsDataToReset { + if err := s.ResetEndpointTLSMaterial(o.Name, ep, tlsData); err != nil { + return err + } + } + + fmt.Fprintln(cli.Out(), o.Name) + fmt.Fprintf(cli.Err(), "Successfully updated context %q\n", o.Name) + return nil +} + +func validateEndpointsAndOrchestrator(c store.Metadata) error { + dockerContext, err := command.GetDockerContext(c) + if err != nil { + return err + } + if _, ok := c.Endpoints[kubernetes.KubernetesEndpoint]; !ok && dockerContext.StackOrchestrator.HasKubernetes() { + return errors.Errorf("cannot specify orchestrator %q without configuring a Kubernetes endpoint", dockerContext.StackOrchestrator) + } + return nil +} diff --git a/cli/cli/command/context/update_test.go b/cli/cli/command/context/update_test.go new file mode 100644 index 00000000..68cf75d8 --- /dev/null +++ b/cli/cli/command/context/update_test.go @@ -0,0 +1,102 @@ +package context + +import ( + "testing" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/kubernetes" + "gotest.tools/assert" + "gotest.tools/assert/cmp" +) + +func TestUpdateDescriptionOnly(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + err := RunCreate(cli, &CreateOptions{ + Name: "test", + DefaultStackOrchestrator: "swarm", + Docker: map[string]string{}, + }) + assert.NilError(t, err) + cli.OutBuffer().Reset() + cli.ErrBuffer().Reset() + assert.NilError(t, RunUpdate(cli, &UpdateOptions{ + Name: "test", + Description: "description", + })) + c, err := cli.ContextStore().GetMetadata("test") + assert.NilError(t, err) + dc, err := command.GetDockerContext(c) + assert.NilError(t, err) + assert.Equal(t, dc.StackOrchestrator, command.OrchestratorSwarm) + assert.Equal(t, dc.Description, "description") + + assert.Equal(t, "test\n", cli.OutBuffer().String()) + assert.Equal(t, "Successfully updated context \"test\"\n", cli.ErrBuffer().String()) +} + +func TestUpdateDockerOnly(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "test", "swarm") + assert.NilError(t, RunUpdate(cli, &UpdateOptions{ + Name: "test", + Docker: map[string]string{ + keyHost: "tcp://some-host", + }, + })) + c, err := cli.ContextStore().GetMetadata("test") + assert.NilError(t, err) + dc, err := command.GetDockerContext(c) + assert.NilError(t, err) + assert.Equal(t, dc.StackOrchestrator, command.OrchestratorSwarm) + assert.Equal(t, dc.Description, "description of test") + assert.Check(t, cmp.Contains(c.Endpoints, kubernetes.KubernetesEndpoint)) + assert.Check(t, cmp.Contains(c.Endpoints, docker.DockerEndpoint)) + assert.Equal(t, c.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta).Host, "tcp://some-host") +} + +func TestUpdateStackOrchestratorStrategy(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + err := RunCreate(cli, &CreateOptions{ + Name: "test", + DefaultStackOrchestrator: "swarm", + Docker: map[string]string{}, + }) + assert.NilError(t, err) + err = RunUpdate(cli, &UpdateOptions{ + Name: "test", + DefaultStackOrchestrator: "kubernetes", + }) + assert.ErrorContains(t, err, `cannot specify orchestrator "kubernetes" without configuring a Kubernetes endpoint`) +} + +func TestUpdateStackOrchestratorStrategyRemoveKubeEndpoint(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + createTestContextWithKubeAndSwarm(t, cli, "test", "kubernetes") + err := RunUpdate(cli, &UpdateOptions{ + Name: "test", + Kubernetes: map[string]string{}, + }) + assert.ErrorContains(t, err, `cannot specify orchestrator "kubernetes" without configuring a Kubernetes endpoint`) +} + +func TestUpdateInvalidDockerHost(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + err := RunCreate(cli, &CreateOptions{ + Name: "test", + Docker: map[string]string{}, + }) + assert.NilError(t, err) + err = RunUpdate(cli, &UpdateOptions{ + Name: "test", + Docker: map[string]string{ + keyHost: "some///invalid/host", + }, + }) + assert.ErrorContains(t, err, "unable to parse docker host") +} diff --git a/cli/cli/command/context/use.go b/cli/cli/command/context/use.go new file mode 100644 index 00000000..97e3a970 --- /dev/null +++ b/cli/cli/command/context/use.go @@ -0,0 +1,48 @@ +package context + +import ( + "fmt" + "os" + + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +func newUseCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "use CONTEXT", + Short: "Set the current docker context", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + name := args[0] + return RunUse(dockerCli, name) + }, + } + return cmd +} + +// RunUse set the current Docker context +func RunUse(dockerCli command.Cli, name string) error { + if err := validateContextName(name); err != nil && name != "default" { + return err + } + if _, err := dockerCli.ContextStore().GetMetadata(name); err != nil && name != "default" { + return err + } + configValue := name + if configValue == "default" { + configValue = "" + } + dockerConfig := dockerCli.ConfigFile() + dockerConfig.CurrentContext = configValue + if err := dockerConfig.Save(); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + fmt.Fprintf(dockerCli.Err(), "Current context is now %q\n", name) + if os.Getenv("DOCKER_HOST") != "" { + fmt.Fprintf(dockerCli.Err(), "Warning: DOCKER_HOST environment variable overrides the active context. "+ + "To use %q, either set the global --context flag, or unset DOCKER_HOST environment variable.\n", name) + } + return nil +} diff --git a/cli/cli/command/context/use_test.go b/cli/cli/command/context/use_test.go new file mode 100644 index 00000000..139189af --- /dev/null +++ b/cli/cli/command/context/use_test.go @@ -0,0 +1,49 @@ +package context + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/context/store" + "gotest.tools/assert" +) + +func TestUse(t *testing.T) { + configDir, err := ioutil.TempDir("", t.Name()+"config") + assert.NilError(t, err) + defer os.RemoveAll(configDir) + configFilePath := filepath.Join(configDir, "config.json") + testCfg := configfile.New(configFilePath) + cli, cleanup := makeFakeCli(t, withCliConfig(testCfg)) + defer cleanup() + err = RunCreate(cli, &CreateOptions{ + Name: "test", + Docker: map[string]string{}, + }) + assert.NilError(t, err) + assert.NilError(t, newUseCommand(cli).RunE(nil, []string{"test"})) + reloadedConfig, err := config.Load(configDir) + assert.NilError(t, err) + assert.Equal(t, "test", reloadedConfig.CurrentContext) + + // switch back to default + cli.OutBuffer().Reset() + cli.ErrBuffer().Reset() + assert.NilError(t, newUseCommand(cli).RunE(nil, []string{"default"})) + reloadedConfig, err = config.Load(configDir) + assert.NilError(t, err) + assert.Equal(t, "", reloadedConfig.CurrentContext) + assert.Equal(t, "default\n", cli.OutBuffer().String()) + assert.Equal(t, "Current context is now \"default\"\n", cli.ErrBuffer().String()) +} + +func TestUseNoExist(t *testing.T) { + cli, cleanup := makeFakeCli(t) + defer cleanup() + err := newUseCommand(cli).RunE(nil, []string{"test"}) + assert.Check(t, store.IsErrContextDoesNotExist(err)) +} diff --git a/cli/cli/command/defaultcontextstore.go b/cli/cli/command/defaultcontextstore.go new file mode 100644 index 00000000..3140dc50 --- /dev/null +++ b/cli/cli/command/defaultcontextstore.go @@ -0,0 +1,215 @@ +package command + +import ( + "fmt" + "io" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" + cliflags "github.com/docker/cli/cli/flags" + "github.com/pkg/errors" +) + +const ( + // DefaultContextName is the name reserved for the default context (config & env based) + DefaultContextName = "default" +) + +// DefaultContext contains the default context data for all endpoints +type DefaultContext struct { + Meta store.Metadata + TLS store.ContextTLSData +} + +// DefaultContextResolver is a function which resolves the default context base on the configuration and the env variables +type DefaultContextResolver func() (*DefaultContext, error) + +// ContextStoreWithDefault implements the store.Store interface with a support for the default context +type ContextStoreWithDefault struct { + store.Store + Resolver DefaultContextResolver +} + +// EndpointDefaultResolver is implemented by any EndpointMeta object +// which wants to be able to populate the store with whatever their default is. +type EndpointDefaultResolver interface { + // ResolveDefault returns values suitable for storing in store.Metadata.Endpoints + // and store.ContextTLSData.Endpoints. + // + // An error is only returned for something fatal, not simply + // the lack of a default (e.g. because the config file which + // would contain it is missing). If there is no default then + // returns nil, nil, nil. + ResolveDefault(Orchestrator) (interface{}, *store.EndpointTLSData, error) +} + +// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters +func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, storeconfig store.Config, stderr io.Writer) (*DefaultContext, error) { + stackOrchestrator, err := GetStackOrchestrator("", "", config.StackOrchestrator, stderr) + if err != nil { + return nil, err + } + contextTLSData := store.ContextTLSData{ + Endpoints: make(map[string]store.EndpointTLSData), + } + contextMetadata := store.Metadata{ + Endpoints: make(map[string]interface{}), + Metadata: DockerContext{ + Description: "", + StackOrchestrator: stackOrchestrator, + }, + Name: DefaultContextName, + } + + dockerEP, err := resolveDefaultDockerEndpoint(opts) + if err != nil { + return nil, err + } + contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP.EndpointMeta + if dockerEP.TLSData != nil { + contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData() + } + + if err := storeconfig.ForeachEndpointType(func(n string, get store.TypeGetter) error { + if n == docker.DockerEndpoint { // handled above + return nil + } + ep := get() + if i, ok := ep.(EndpointDefaultResolver); ok { + meta, tls, err := i.ResolveDefault(stackOrchestrator) + if err != nil { + return err + } + if meta == nil { + return nil + } + contextMetadata.Endpoints[n] = meta + if tls != nil { + contextTLSData.Endpoints[n] = *tls + } + } + // Nothing to be done + return nil + }); err != nil { + return nil, err + } + + return &DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil +} + +// List implements store.Store's List +func (s *ContextStoreWithDefault) List() ([]store.Metadata, error) { + contextList, err := s.Store.List() + if err != nil { + return nil, err + } + defaultContext, err := s.Resolver() + if err != nil { + return nil, err + } + return append(contextList, defaultContext.Meta), nil +} + +// CreateOrUpdate is not allowed for the default context and fails +func (s *ContextStoreWithDefault) CreateOrUpdate(meta store.Metadata) error { + if meta.Name == DefaultContextName { + return errors.New("default context cannot be created nor updated") + } + return s.Store.CreateOrUpdate(meta) +} + +// Remove is not allowed for the default context and fails +func (s *ContextStoreWithDefault) Remove(name string) error { + if name == DefaultContextName { + return errors.New("default context cannot be removed") + } + return s.Store.Remove(name) +} + +// GetMetadata implements store.Store's GetMetadata +func (s *ContextStoreWithDefault) GetMetadata(name string) (store.Metadata, error) { + if name == DefaultContextName { + defaultContext, err := s.Resolver() + if err != nil { + return store.Metadata{}, err + } + return defaultContext.Meta, nil + } + return s.Store.GetMetadata(name) +} + +// ResetTLSMaterial is not implemented for default context and fails +func (s *ContextStoreWithDefault) ResetTLSMaterial(name string, data *store.ContextTLSData) error { + if name == DefaultContextName { + return errors.New("The default context store does not support ResetTLSMaterial") + } + return s.Store.ResetTLSMaterial(name, data) +} + +// ResetEndpointTLSMaterial is not implemented for default context and fails +func (s *ContextStoreWithDefault) ResetEndpointTLSMaterial(contextName string, endpointName string, data *store.EndpointTLSData) error { + if contextName == DefaultContextName { + return errors.New("The default context store does not support ResetEndpointTLSMaterial") + } + return s.Store.ResetEndpointTLSMaterial(contextName, endpointName, data) +} + +// ListTLSFiles implements store.Store's ListTLSFiles +func (s *ContextStoreWithDefault) ListTLSFiles(name string) (map[string]store.EndpointFiles, error) { + if name == DefaultContextName { + defaultContext, err := s.Resolver() + if err != nil { + return nil, err + } + tlsfiles := make(map[string]store.EndpointFiles) + for epName, epTLSData := range defaultContext.TLS.Endpoints { + var files store.EndpointFiles + for filename := range epTLSData.Files { + files = append(files, filename) + } + tlsfiles[epName] = files + } + return tlsfiles, nil + } + return s.Store.ListTLSFiles(name) +} + +// GetTLSData implements store.Store's GetTLSData +func (s *ContextStoreWithDefault) GetTLSData(contextName, endpointName, fileName string) ([]byte, error) { + if contextName == DefaultContextName { + defaultContext, err := s.Resolver() + if err != nil { + return nil, err + } + if defaultContext.TLS.Endpoints[endpointName].Files[fileName] == nil { + return nil, &noDefaultTLSDataError{endpointName: endpointName, fileName: fileName} + } + return defaultContext.TLS.Endpoints[endpointName].Files[fileName], nil + + } + return s.Store.GetTLSData(contextName, endpointName, fileName) +} + +type noDefaultTLSDataError struct { + endpointName string + fileName string +} + +func (e *noDefaultTLSDataError) Error() string { + return fmt.Sprintf("tls data for %s/%s/%s does not exist", DefaultContextName, e.endpointName, e.fileName) +} + +// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound +func (e *noDefaultTLSDataError) NotFound() {} + +// IsTLSDataDoesNotExist satisfies github.com/docker/cli/cli/context/store.tlsDataDoesNotExist +func (e *noDefaultTLSDataError) IsTLSDataDoesNotExist() {} + +// GetStorageInfo implements store.Store's GetStorageInfo +func (s *ContextStoreWithDefault) GetStorageInfo(contextName string) store.StorageInfo { + if contextName == DefaultContextName { + return store.StorageInfo{MetadataPath: "", TLSPath: ""} + } + return s.Store.GetStorageInfo(contextName) +} diff --git a/cli/cli/command/defaultcontextstore_test.go b/cli/cli/command/defaultcontextstore_test.go new file mode 100644 index 00000000..e1c7c1df --- /dev/null +++ b/cli/cli/command/defaultcontextstore_test.go @@ -0,0 +1,190 @@ +package command + +import ( + "crypto/rand" + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" + cliflags "github.com/docker/cli/cli/flags" + "github.com/docker/go-connections/tlsconfig" + "gotest.tools/assert" + "gotest.tools/env" + "gotest.tools/golden" +) + +type endpoint struct { + Foo string `json:"a_very_recognizable_field_name"` +} + +type testContext struct { + Bar string `json:"another_very_recognizable_field_name"` +} + +var testCfg = store.NewConfig(func() interface{} { return &testContext{} }, + store.EndpointTypeGetter("ep1", func() interface{} { return &endpoint{} }), + store.EndpointTypeGetter("ep2", func() interface{} { return &endpoint{} }), +) + +func testDefaultMetadata() store.Metadata { + return store.Metadata{ + Endpoints: map[string]interface{}{ + "ep1": endpoint{Foo: "bar"}, + }, + Metadata: testContext{Bar: "baz"}, + Name: DefaultContextName, + } +} + +func testStore(t *testing.T, meta store.Metadata, tls store.ContextTLSData) (store.Store, func()) { + //meta := testDefaultMetadata() + testDir, err := ioutil.TempDir("", t.Name()) + assert.NilError(t, err) + //defer os.RemoveAll(testDir) + store := &ContextStoreWithDefault{ + Store: store.New(testDir, testCfg), + Resolver: func() (*DefaultContext, error) { + return &DefaultContext{ + Meta: meta, + TLS: tls, + }, nil + }, + } + return store, func() { + os.RemoveAll(testDir) + } +} + +func TestDefaultContextInitializer(t *testing.T) { + cli, err := NewDockerCli() + assert.NilError(t, err) + defer env.Patch(t, "DOCKER_HOST", "ssh://someswarmserver")() + cli.configFile = &configfile.ConfigFile{ + StackOrchestrator: "swarm", + } + ctx, err := ResolveDefaultContext(&cliflags.CommonOptions{ + TLS: true, + TLSOptions: &tlsconfig.Options{ + CAFile: "./testdata/ca.pem", + }, + }, cli.ConfigFile(), DefaultContextStoreConfig(), cli.Err()) + assert.NilError(t, err) + assert.Equal(t, "default", ctx.Meta.Name) + assert.Equal(t, OrchestratorSwarm, ctx.Meta.Metadata.(DockerContext).StackOrchestrator) + assert.DeepEqual(t, "ssh://someswarmserver", ctx.Meta.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta).Host) + golden.Assert(t, string(ctx.TLS.Endpoints[docker.DockerEndpoint].Files["ca.pem"]), "ca.pem") +} + +func TestExportDefaultImport(t *testing.T) { + file1 := make([]byte, 1500) + rand.Read(file1) + file2 := make([]byte, 3700) + rand.Read(file2) + s, cleanup := testStore(t, testDefaultMetadata(), store.ContextTLSData{ + Endpoints: map[string]store.EndpointTLSData{ + "ep2": { + Files: map[string][]byte{ + "file1": file1, + "file2": file2, + }, + }, + }, + }) + defer cleanup() + r := store.Export("default", s) + defer r.Close() + err := store.Import("dest", s, r) + assert.NilError(t, err) + + srcMeta, err := s.GetMetadata("default") + assert.NilError(t, err) + destMeta, err := s.GetMetadata("dest") + assert.NilError(t, err) + assert.DeepEqual(t, destMeta.Metadata, srcMeta.Metadata) + assert.DeepEqual(t, destMeta.Endpoints, srcMeta.Endpoints) + + srcFileList, err := s.ListTLSFiles("default") + assert.NilError(t, err) + destFileList, err := s.ListTLSFiles("dest") + assert.NilError(t, err) + assert.Equal(t, 1, len(destFileList)) + assert.Equal(t, 1, len(srcFileList)) + assert.Equal(t, 2, len(destFileList["ep2"])) + assert.Equal(t, 2, len(srcFileList["ep2"])) + + srcData1, err := s.GetTLSData("default", "ep2", "file1") + assert.NilError(t, err) + assert.DeepEqual(t, file1, srcData1) + srcData2, err := s.GetTLSData("default", "ep2", "file2") + assert.NilError(t, err) + assert.DeepEqual(t, file2, srcData2) + + destData1, err := s.GetTLSData("dest", "ep2", "file1") + assert.NilError(t, err) + assert.DeepEqual(t, file1, destData1) + destData2, err := s.GetTLSData("dest", "ep2", "file2") + assert.NilError(t, err) + assert.DeepEqual(t, file2, destData2) +} + +func TestListDefaultContext(t *testing.T) { + meta := testDefaultMetadata() + s, cleanup := testStore(t, meta, store.ContextTLSData{}) + defer cleanup() + result, err := s.List() + assert.NilError(t, err) + assert.Equal(t, 1, len(result)) + assert.DeepEqual(t, meta, result[0]) +} + +func TestGetDefaultContextStorageInfo(t *testing.T) { + s, cleanup := testStore(t, testDefaultMetadata(), store.ContextTLSData{}) + defer cleanup() + result := s.GetStorageInfo(DefaultContextName) + assert.Equal(t, "", result.MetadataPath) + assert.Equal(t, "", result.TLSPath) +} + +func TestGetDefaultContextMetadata(t *testing.T) { + meta := testDefaultMetadata() + s, cleanup := testStore(t, meta, store.ContextTLSData{}) + defer cleanup() + result, err := s.GetMetadata(DefaultContextName) + assert.NilError(t, err) + assert.Equal(t, DefaultContextName, result.Name) + assert.DeepEqual(t, meta.Metadata, result.Metadata) + assert.DeepEqual(t, meta.Endpoints, result.Endpoints) +} + +func TestErrCreateDefault(t *testing.T) { + meta := testDefaultMetadata() + s, cleanup := testStore(t, meta, store.ContextTLSData{}) + defer cleanup() + err := s.CreateOrUpdate(store.Metadata{ + Endpoints: map[string]interface{}{ + "ep1": endpoint{Foo: "bar"}, + }, + Metadata: testContext{Bar: "baz"}, + Name: "default", + }) + assert.Error(t, err, "default context cannot be created nor updated") +} + +func TestErrRemoveDefault(t *testing.T) { + meta := testDefaultMetadata() + s, cleanup := testStore(t, meta, store.ContextTLSData{}) + defer cleanup() + err := s.Remove("default") + assert.Error(t, err, "default context cannot be removed") +} + +func TestErrTLSDataError(t *testing.T) { + meta := testDefaultMetadata() + s, cleanup := testStore(t, meta, store.ContextTLSData{}) + defer cleanup() + _, err := s.GetTLSData("default", "noop", "noop") + assert.Check(t, store.IsErrTLSDataDoesNotExist(err)) +} diff --git a/cli/cli/command/engine/activate.go b/cli/cli/command/engine/activate.go new file mode 100644 index 00000000..fac6df85 --- /dev/null +++ b/cli/cli/command/engine/activate.go @@ -0,0 +1,209 @@ +package engine + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/licenseutils" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/api/types" + "github.com/docker/licensing/model" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type activateOptions struct { + licenseFile string + version string + registryPrefix string + format string + image string + quiet bool + displayOnly bool + sockPath string + licenseLoginFunc func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error) +} + +// newActivateCommand creates a new `docker engine activate` command +func newActivateCommand(dockerCli command.Cli) *cobra.Command { + var options activateOptions + options.licenseLoginFunc = licenseutils.Login + + cmd := &cobra.Command{ + Use: "activate [OPTIONS]", + Short: "Activate Enterprise Edition", + Long: `Activate Enterprise Edition. + +With this command you may apply an existing Docker enterprise license, or +interactively download one from Docker. In the interactive exchange, you can +sign up for a new trial, or download an existing license. If you are +currently running a Community Edition engine, the daemon will be updated to +the Enterprise Edition Docker engine with additional capabilities and long +term support. + +For more information about different Docker Enterprise license types visit +https://www.docker.com/licenses + +For non-interactive scriptable deployments, download your license from +https://hub.docker.com/ then specify the file with the '--license' flag. +`, + RunE: func(cmd *cobra.Command, args []string) error { + return runActivate(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.StringVar(&options.licenseFile, "license", "", "License File") + flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)") + flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the default location where engine images are pulled") + flags.StringVar(&options.image, "engine-image", "", "Specify engine image") + flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template") + flags.BoolVar(&options.displayOnly, "display-only", false, "only display license information and exit") + flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID") + flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint") + + return cmd +} + +func runActivate(cli command.Cli, options activateOptions) error { + if !isRoot() { + return errors.New("this command must be run as a privileged user") + } + ctx := context.Background() + client, err := cli.NewContainerizedEngineClient(options.sockPath) + if err != nil { + return errors.Wrap(err, "unable to access local containerd") + } + defer client.Close() + + authConfig, err := getRegistryAuth(cli, options.registryPrefix) + if err != nil { + return err + } + + var license *model.IssuedLicense + + // Lookup on hub if no license provided via params + if options.licenseFile == "" { + if license, err = getLicenses(ctx, authConfig, cli, options); err != nil { + return err + } + if options.displayOnly { + return nil + } + } else { + if license, err = licenseutils.LoadLocalIssuedLicense(ctx, options.licenseFile); err != nil { + return err + } + } + summary, err := licenseutils.GetLicenseSummary(ctx, *license) + if err != nil { + return err + } + fmt.Fprintf(cli.Out(), "License: %s\n", summary) + if options.displayOnly { + return nil + } + dclient := cli.Client() + if err = licenseutils.ApplyLicense(ctx, dclient, license); err != nil { + return err + } + + // Short circuit if the user didn't specify a version and we're already running enterprise + if options.version == "" { + serverVersion, err := dclient.ServerVersion(ctx) + if err != nil { + return err + } + if strings.Contains(strings.ToLower(serverVersion.Platform.Name), "enterprise") { + fmt.Fprintln(cli.Out(), "Successfully activated engine license on existing enterprise engine.") + return nil + } + options.version = serverVersion.Version + } + + opts := clitypes.EngineInitOptions{ + RegistryPrefix: options.registryPrefix, + EngineImage: options.image, + EngineVersion: options.version, + } + + if err := client.ActivateEngine(ctx, opts, cli.Out(), authConfig); err != nil { + return err + } + fmt.Fprintln(cli.Out(), `Successfully activated engine. +Restart docker with 'systemctl restart docker' to complete the activation.`) + return nil +} + +func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) { + user, err := options.licenseLoginFunc(ctx, authConfig) + if err != nil { + return nil, err + } + fmt.Fprintf(cli.Out(), "Looking for existing licenses for %s...\n", user.User.Username) + subs, err := user.GetAvailableLicenses(ctx) + if err != nil { + return nil, err + } + if len(subs) == 0 { + return doTrialFlow(ctx, cli, user) + } + + format := options.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + updatesCtx := formatter.Context{ + Output: cli.Out(), + Format: NewSubscriptionsFormat(format, options.quiet), + Trunc: false, + } + if err := SubscriptionsWrite(updatesCtx, subs); err != nil { + return nil, err + } + if options.displayOnly { + return nil, nil + } + fmt.Fprintf(cli.Out(), "Please pick a license by number: ") + var num int + if _, err := fmt.Fscan(cli.In(), &num); err != nil { + return nil, errors.Wrap(err, "failed to read user input") + } + if num < 0 || num >= len(subs) { + return nil, fmt.Errorf("invalid choice") + } + return user.GetIssuedLicense(ctx, subs[num].ID) +} + +func doTrialFlow(ctx context.Context, cli command.Cli, user licenseutils.HubUser) (*model.IssuedLicense, error) { + if !command.PromptForConfirmation(cli.In(), cli.Out(), + "No existing licenses found, would you like to set up a new Enterprise Basic Trial license?") { + return nil, fmt.Errorf("you must have an existing enterprise license or generate a new trial to use the Enterprise Docker Engine") + } + targetID := user.User.ID + // If the user is a member of any organizations, allow trials generated against them + if len(user.Orgs) > 0 { + fmt.Fprintf(cli.Out(), "%d\t%s\n", 0, user.User.Username) + for i, org := range user.Orgs { + fmt.Fprintf(cli.Out(), "%d\t%s\n", i+1, org.Orgname) + } + fmt.Fprintf(cli.Out(), "Please choose an account to generate the trial in:") + var num int + if _, err := fmt.Fscan(cli.In(), &num); err != nil { + return nil, errors.Wrap(err, "failed to read user input") + } + if num < 0 || num > len(user.Orgs) { + return nil, fmt.Errorf("invalid choice") + } + if num > 0 { + targetID = user.Orgs[num-1].ID + } + } + return user.GenerateTrialLicense(ctx, targetID) +} diff --git a/cli/cli/command/engine/activate_test.go b/cli/cli/command/engine/activate_test.go new file mode 100644 index 00000000..9eff3b8c --- /dev/null +++ b/cli/cli/command/engine/activate_test.go @@ -0,0 +1,148 @@ +package engine + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/cli/internal/licenseutils" + "github.com/docker/cli/internal/test" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/licensing" + "github.com/docker/licensing/model" + "gotest.tools/assert" + "gotest.tools/fs" + "gotest.tools/golden" +) + +const ( + // nolint: lll + expiredLicense = `{"key_id":"irlYm3b9fdD8hMUXjazF39im7VQSSbAm9tfHK8cKUxJt","private_key":"aH5tTRDAVJpCRS2CRetTQVXIKgWUPfoCHODhDvNPvAbz","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXhPQzB3TXkweE9GUXdOem93TURvd01Gb2lMQ0owYjJ0bGJpSTZJbkZtTVMxMlVtRmtialp5YjFaMldXdHJlVXN4VFdKMGNGUmpXR1ozVjA4MVRWZFFTM2cwUnpJd2NIYzlJaXdpYldGNFJXNW5hVzVsY3lJNk1Td2ljMk5oYm01cGJtZEZibUZpYkdWa0lqcDBjblZsTENKc2FXTmxibk5sVkhsd1pTSTZJazltWm14cGJtVWlMQ0owYVdWeUlqb2lVSEp2WkhWamRHbHZiaUo5IiwKICAgInNpZ25hdHVyZXMiOiBbCiAgICAgIHsKICAgICAgICAgImhlYWRlciI6IHsKICAgICAgICAgICAgImp3ayI6IHsKICAgICAgICAgICAgICAgImUiOiAiQVFBQiIsCiAgICAgICAgICAgICAgICJrZXlJRCI6ICJKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVCIsCiAgICAgICAgICAgICAgICJraWQiOiAiSjdMRDo2N1ZSOkw1SFo6VTdCQToyTzRHOjRBTDM6T0YyTjpKSEdCOkVGVEg6NUNWUTpNRkVPOkFFSVQiLAogICAgICAgICAgICAgICAia3R5IjogIlJTQSIsCiAgICAgICAgICAgICAgICJuIjogInlkSXktbFU3bzdQY2VZLTQtcy1DUTVPRWdDeUY4Q3hJY1FJV3VLODRwSWlaY2lZNjczMHlDWW53TFNLVGx3LVU2VUNfUVJlV1Jpb01OTkU1RHM1VFlFWGJHRzZvbG0ycWRXYkJ3Y0NnLTJVVUhfT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTFJdi1tcTdscDVuUV9SeHJUMEtaUkFRVFlMRU1FZkd3bTNoTU9fZ2VMUFMtaGdLUHRJSGxrZzZfV2NveFRHb0tQNzlkX3dhSFl4R05sN1doU25laUJTeGJwYlFBS2syMWxnNzk4WGI3dlp5RUFURE1yUlI5TWVFNkFkajVISnBZM0NveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTy13YTJLTjhVd2dJSW0waTVJMXVXOUdrcTR6akJ5NXpoZ3F1VVhiRzliV1BBT1lycTVRYTgxRHhHY0JsSnlIWUFwLUREUEU5VEdnNHpZbVhqSm54WnFIRWR1R3FkZXZaOFhNSTB1a2ZrR0lJMTR3VU9pTUlJSXJYbEVjQmZfNDZJOGdRV0R6eHljWmVfSkdYLUxBdWF5WHJ5clVGZWhWTlVkWlVsOXdYTmFKQi1rYUNxejVRd2FSOTNzR3ctUVNmdEQwTnZMZTdDeU9ILUU2dmc2U3RfTmVUdmd2OFluaENpWElsWjhIT2ZJd05lN3RFRl9VY3o1T2JQeWttM3R5bHJOVWp0MFZ5QW10dGFjVkkyaUdpaGNVUHJtazRsVklaN1ZEX0xTVy1pN3lvU3VydHBzUFhjZTJwS0RJbzMwbEpHaE9fM0tVbWwyU1VaQ3F6SjF5RW1LcHlzSDVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVIgogICAgICAgICAgICB9LAogICAgICAgICAgICAiYWxnIjogIlJTMjU2IgogICAgICAgICB9LAogICAgICAgICAic2lnbmF0dXJlIjogIm5saTZIdzRrbW5KcTBSUmRXaGVfbkhZS2VJLVpKenM1U0d5SUpDakh1dWtnVzhBYklpVzFZYWJJR2NqWUt0QTY4dWN6T1hyUXZreGxWQXJLSlgzMDJzN0RpbzcxTlNPRzJVcnhsSjlibDFpd0F3a3ZyTEQ2T0p5MGxGLVg4WnRabXhPVmNQZmwzcmJwZFQ0dnlnWTdNcU1QRXdmb0IxTmlWZDYyZ1cxU2NSREZZcWw3R0FVaFVKNkp4QU15VzVaOXl5YVE0NV8wd0RMUk5mRjA5YWNXeVowTjRxVS1hZjhrUTZUUWZUX05ERzNCR3pRb2V3cHlEajRiMFBHb0diOFhLdDlwekpFdEdxM3lQM25VMFFBbk90a2gwTnZac1l1UFcyUnhDT3lRNEYzVlR3UkF2eF9HSTZrMVRpYmlKNnByUWluUy16Sjh6RE8zUjBuakE3OFBwNXcxcVpaUE9BdmtzZFNSYzJDcVMtcWhpTmF5YUhOVHpVNnpyOXlOZHR2S0o1QjNST0FmNUtjYXNiWURjTnVpeXBUNk90LUtqQ2I1dmYtWVpnc2FRNzJBdFBhSU4yeUpNREZHbmEwM0hpSjMxcTJRUlp5eTZrd3RYaGtwcDhTdEdIcHYxSWRaV09SVWttb0g5SFBzSGk4SExRLTZlM0tEY2x1RUQyMTNpZnljaVhtN0YzdHdaTTNHeDd1UXR1SldHaUlTZ2Z0QW9lVjZfUmI2VThkMmZxNzZuWHYxak5nckRRcE5waEZFd2tCdGRtZHZ2THByZVVYX3BWangza1AxN3pWbXFKNmNOOWkwWUc4WHg2VmRzcUxsRXUxQ2Rhd3Q0eko1M3VHMFlKTjRnUDZwc25yUS1uM0U1aFdlMDJ3d3dBZ3F3bGlPdmd4V1RTeXJyLXY2eDI0IiwKICAgICAgICAgInByb3RlY3RlZCI6ICJleUptYjNKdFlYUk1aVzVuZEdnaU9qRTNNeXdpWm05eWJXRjBWR0ZwYkNJNkltWlJJaXdpZEdsdFpTSTZJakl3TVRjdE1EVXRNRFZVTWpFNk5UYzZNek5hSW4wIgogICAgICB9CiAgIF0KfQ=="}` +) + +func TestActivateNoContainerd(t *testing.T) { + testCli.SetContainerizedEngineClient( + func(string) (clitypes.ContainerizedClient, error) { + return nil, fmt.Errorf("some error") + }, + ) + isRoot = func() bool { return true } + cmd := newActivateCommand(testCli) + cmd.Flags().Set("license", "invalidpath") + cmd.SilenceUsage = true + cmd.SilenceErrors = true + err := cmd.Execute() + assert.ErrorContains(t, err, "unable to access local containerd") +} + +func TestActivateBadLicense(t *testing.T) { + isRoot = func() bool { return true } + c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil}) + c.SetContainerizedEngineClient( + func(string) (clitypes.ContainerizedClient, error) { + return &fakeContainerizedEngineClient{}, nil + }, + ) + cmd := newActivateCommand(c) + cmd.SilenceUsage = true + cmd.SilenceErrors = true + cmd.Flags().Set("license", "invalidpath") + err := cmd.Execute() + assert.Assert(t, os.IsNotExist(err)) +} + +func TestActivateExpiredLicenseDryRun(t *testing.T) { + dir := fs.NewDir(t, "license", fs.WithFile("docker.lic", expiredLicense, fs.WithMode(0644))) + defer dir.Remove() + filename := dir.Join("docker.lic") + isRoot = func() bool { return true } + c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil}) + c.SetContainerizedEngineClient( + func(string) (clitypes.ContainerizedClient, error) { + return &fakeContainerizedEngineClient{}, nil + }, + ) + cmd := newActivateCommand(c) + cmd.SilenceUsage = true + cmd.SilenceErrors = true + cmd.Flags().Set("license", filename) + cmd.Flags().Set("display-only", "true") + c.OutBuffer().Reset() + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, c.OutBuffer().String(), "expired-license-display-only.golden") +} + +type mockLicenseClient struct{} + +func (c mockLicenseClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) { + return "", fmt.Errorf("not implemented") +} + +func (c mockLicenseClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) { + return "", fmt.Errorf("not implemented") +} +func (c mockLicenseClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) { + expires := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC) + return []*model.Subscription{ + { + State: "active", + Expires: &expires, + }, + }, nil +} +func (c mockLicenseClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error { + return fmt.Errorf("not implemented") +} +func (c mockLicenseClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) { + return nil, fmt.Errorf("not implemented") +} +func (c mockLicenseClient) SummarizeLicense(res *model.CheckResponse, keyID string) *model.Subscription { + return nil +} +func TestActivateDisplayOnlyHub(t *testing.T) { + isRoot = func() bool { return true } + c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil}) + c.SetContainerizedEngineClient( + func(string) (clitypes.ContainerizedClient, error) { + return &fakeContainerizedEngineClient{}, nil + }, + ) + + hubUser := licenseutils.HubUser{ + Client: mockLicenseClient{}, + } + options := activateOptions{ + licenseLoginFunc: func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error) { + return hubUser, nil + }, + displayOnly: true, + } + c.OutBuffer().Reset() + err := runActivate(c, options) + + assert.NilError(t, err) + golden.Assert(t, c.OutBuffer().String(), "expired-hub-license-display-only.golden") +} diff --git a/cli/cli/command/engine/activate_unix.go b/cli/cli/command/engine/activate_unix.go new file mode 100644 index 00000000..ed4777ae --- /dev/null +++ b/cli/cli/command/engine/activate_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package engine + +import ( + "golang.org/x/sys/unix" +) + +var ( + isRoot = func() bool { + return unix.Geteuid() == 0 + } +) diff --git a/cli/cli/command/engine/activate_windows.go b/cli/cli/command/engine/activate_windows.go new file mode 100644 index 00000000..35a4e88a --- /dev/null +++ b/cli/cli/command/engine/activate_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package engine + +var ( + isRoot = func() bool { + return true + } +) diff --git a/cli/cli/command/engine/auth.go b/cli/cli/command/engine/auth.go new file mode 100644 index 00000000..fee3e7b2 --- /dev/null +++ b/cli/cli/command/engine/auth.go @@ -0,0 +1,34 @@ +package engine + +import ( + "context" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + clitypes "github.com/docker/cli/types" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/pkg/errors" +) + +func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) { + if registryPrefix == "" { + registryPrefix = clitypes.RegistryPrefix + } + distributionRef, err := reference.ParseNormalizedNamed(registryPrefix) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse image name: %s", registryPrefix) + } + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(context.Background(), nil, authResolver(cli), distributionRef.String()) + if err != nil { + return nil, errors.Wrap(err, "failed to get imgRefAndAuth") + } + return imgRefAndAuth.AuthConfig(), nil +} + +func authResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return command.ResolveAuthConfig(ctx, cli, index) + } +} diff --git a/cli/cli/command/engine/check.go b/cli/cli/command/engine/check.go new file mode 100644 index 00000000..07e4734a --- /dev/null +++ b/cli/cli/command/engine/check.go @@ -0,0 +1,125 @@ +package engine + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/versions" + clitypes "github.com/docker/cli/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type checkOptions struct { + registryPrefix string + preReleases bool + engineImage string + downgrades bool + upgrades bool + format string + quiet bool + sockPath string +} + +func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command { + var options checkOptions + + cmd := &cobra.Command{ + Use: "check [OPTIONS]", + Short: "Check for available engine updates", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runCheck(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the existing location where engine images are pulled") + flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)") + flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions") + flags.StringVar(&options.engineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)") + flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades") + flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions") + flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint") + + return cmd +} + +func runCheck(dockerCli command.Cli, options checkOptions) error { + if !isRoot() { + return errors.New("this command must be run as a privileged user") + } + ctx := context.Background() + client := dockerCli.Client() + serverVersion, err := client.ServerVersion(ctx) + if err != nil { + return err + } + + availVersions, err := versions.GetEngineVersions(ctx, dockerCli.RegistryClient(false), options.registryPrefix, options.engineImage, serverVersion.Version) + if err != nil { + return err + } + + availUpdates := []clitypes.Update{ + {Type: "current", Version: serverVersion.Version}, + } + if len(availVersions.Patches) > 0 { + availUpdates = append(availUpdates, + processVersions( + serverVersion.Version, + "patch", + options.preReleases, + availVersions.Patches)...) + } + if options.upgrades { + availUpdates = append(availUpdates, + processVersions( + serverVersion.Version, + "upgrade", + options.preReleases, + availVersions.Upgrades)...) + } + if options.downgrades { + availUpdates = append(availUpdates, + processVersions( + serverVersion.Version, + "downgrade", + options.preReleases, + availVersions.Downgrades)...) + } + + format := options.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + updatesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewUpdatesFormat(format, options.quiet), + Trunc: false, + } + return UpdatesWrite(updatesCtx, availUpdates) +} + +func processVersions(currentVersion, verType string, + includePrerelease bool, + availVersions []clitypes.DockerVersion) []clitypes.Update { + availUpdates := []clitypes.Update{} + for _, ver := range availVersions { + if !includePrerelease && ver.Prerelease() != "" { + continue + } + if ver.Tag != currentVersion { + availUpdates = append(availUpdates, clitypes.Update{ + Type: verType, + Version: ver.Tag, + Notes: fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, ver.Tag), + }) + } + } + return availUpdates +} diff --git a/cli/cli/command/engine/check_test.go b/cli/cli/command/engine/check_test.go new file mode 100644 index 00000000..d1bfd933 --- /dev/null +++ b/cli/cli/command/engine/check_test.go @@ -0,0 +1,114 @@ +package engine + +import ( + "context" + "fmt" + "testing" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" + "gotest.tools/golden" +) + +var ( + testCli = test.NewFakeCli(&client.Client{}) +) + +type verClient struct { + client.Client + ver types.Version + verErr error + info types.Info + infoErr error +} + +func (c *verClient) ServerVersion(ctx context.Context) (types.Version, error) { + return c.ver, c.verErr +} + +func (c *verClient) Info(ctx context.Context) (types.Info, error) { + return c.info, c.infoErr +} + +type testRegistryClient struct { + tags []string +} + +func (c testRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, nil +} +func (c testRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, nil +} +func (c testRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error { + return nil +} + +func (c testRegistryClient) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) { + return "", nil +} +func (c testRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) { + return c.tags, nil +} + +func TestCheckForUpdatesNoCurrentVersion(t *testing.T) { + isRoot = func() bool { return true } + c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil}) + c.SetRegistryClient(testRegistryClient{}) + cmd := newCheckForUpdatesCommand(c) + cmd.SilenceUsage = true + cmd.SilenceErrors = true + err := cmd.Execute() + assert.ErrorContains(t, err, "no such file or directory") +} + +func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) { + c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil}) + c.SetRegistryClient(testRegistryClient{[]string{ + "1.0.1", "1.0.2", "1.0.3-beta1", + "1.1.1", "1.1.2", "1.1.3-beta1", + "1.2.0", "2.0.0", "2.1.0-beta1", + }}) + + isRoot = func() bool { return true } + cmd := newCheckForUpdatesCommand(c) + cmd.Flags().Set("pre-releases", "true") + cmd.Flags().Set("downgrades", "true") + cmd.Flags().Set("engine-image", "engine-community") + cmd.SilenceUsage = true + cmd.SilenceErrors = true + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, c.OutBuffer().String(), "check-all.golden") + + c.OutBuffer().Reset() + cmd.Flags().Set("pre-releases", "false") + cmd.Flags().Set("downgrades", "true") + err = cmd.Execute() + assert.NilError(t, err) + fmt.Println(c.OutBuffer().String()) + golden.Assert(t, c.OutBuffer().String(), "check-no-prerelease.golden") + + c.OutBuffer().Reset() + cmd.Flags().Set("pre-releases", "false") + cmd.Flags().Set("downgrades", "false") + err = cmd.Execute() + assert.NilError(t, err) + fmt.Println(c.OutBuffer().String()) + golden.Assert(t, c.OutBuffer().String(), "check-no-downgrades.golden") + + c.OutBuffer().Reset() + cmd.Flags().Set("pre-releases", "false") + cmd.Flags().Set("downgrades", "false") + cmd.Flags().Set("upgrades", "false") + err = cmd.Execute() + assert.NilError(t, err) + fmt.Println(c.OutBuffer().String()) + golden.Assert(t, c.OutBuffer().String(), "check-patches-only.golden") +} diff --git a/cli/cli/command/engine/client_test.go b/cli/cli/command/engine/client_test.go new file mode 100644 index 00000000..e646666f --- /dev/null +++ b/cli/cli/command/engine/client_test.go @@ -0,0 +1,101 @@ +package engine + +import ( + "context" + + "github.com/containerd/containerd" + registryclient "github.com/docker/cli/cli/registry/client" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/api/types" +) + +type ( + fakeContainerizedEngineClient struct { + closeFunc func() error + activateEngineFunc func(ctx context.Context, + opts clitypes.EngineInitOptions, + out clitypes.OutStream, + authConfig *types.AuthConfig) error + initEngineFunc func(ctx context.Context, + opts clitypes.EngineInitOptions, + out clitypes.OutStream, + authConfig *types.AuthConfig, + healthfn func(context.Context) error) error + doUpdateFunc func(ctx context.Context, + opts clitypes.EngineInitOptions, + out clitypes.OutStream, + authConfig *types.AuthConfig) error + getEngineVersionsFunc func(ctx context.Context, + registryClient registryclient.RegistryClient, + currentVersion, + imageName string) (clitypes.AvailableVersions, error) + + getEngineFunc func(ctx context.Context) (containerd.Container, error) + removeEngineFunc func(ctx context.Context) error + getCurrentEngineVersionFunc func(ctx context.Context) (clitypes.EngineInitOptions, error) + } +) + +func (w *fakeContainerizedEngineClient) Close() error { + if w.closeFunc != nil { + return w.closeFunc() + } + return nil +} + +func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context, + opts clitypes.EngineInitOptions, + out clitypes.OutStream, + authConfig *types.AuthConfig) error { + if w.activateEngineFunc != nil { + return w.activateEngineFunc(ctx, opts, out, authConfig) + } + return nil +} +func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context, + opts clitypes.EngineInitOptions, + out clitypes.OutStream, + authConfig *types.AuthConfig, + healthfn func(context.Context) error) error { + if w.initEngineFunc != nil { + return w.initEngineFunc(ctx, opts, out, authConfig, healthfn) + } + return nil +} +func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context, + opts clitypes.EngineInitOptions, + out clitypes.OutStream, + authConfig *types.AuthConfig) error { + if w.doUpdateFunc != nil { + return w.doUpdateFunc(ctx, opts, out, authConfig) + } + return nil +} +func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context, + registryClient registryclient.RegistryClient, + currentVersion, imageName string) (clitypes.AvailableVersions, error) { + + if w.getEngineVersionsFunc != nil { + return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName) + } + return clitypes.AvailableVersions{}, nil +} + +func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) { + if w.getEngineFunc != nil { + return w.getEngineFunc(ctx) + } + return nil, nil +} +func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context) error { + if w.removeEngineFunc != nil { + return w.removeEngineFunc(ctx) + } + return nil +} +func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (clitypes.EngineInitOptions, error) { + if w.getCurrentEngineVersionFunc != nil { + return w.getCurrentEngineVersionFunc(ctx) + } + return clitypes.EngineInitOptions{}, nil +} diff --git a/cli/cli/command/engine/cmd.go b/cli/cli/command/engine/cmd.go new file mode 100644 index 00000000..66e56631 --- /dev/null +++ b/cli/cli/command/engine/cmd.go @@ -0,0 +1,23 @@ +package engine + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewEngineCommand returns a cobra command for `engine` subcommands +func NewEngineCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "engine COMMAND", + Short: "Manage the docker engine", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newActivateCommand(dockerCli), + newCheckForUpdatesCommand(dockerCli), + newUpdateCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/engine/cmd_test.go b/cli/cli/command/engine/cmd_test.go new file mode 100644 index 00000000..30639cbf --- /dev/null +++ b/cli/cli/command/engine/cmd_test.go @@ -0,0 +1,14 @@ +package engine + +import ( + "testing" + + "gotest.tools/assert" +) + +func TestNewEngineCommand(t *testing.T) { + cmd := NewEngineCommand(testCli) + + subcommands := cmd.Commands() + assert.Assert(t, len(subcommands) == 3) +} diff --git a/cli/cli/command/engine/init.go b/cli/cli/command/engine/init.go new file mode 100644 index 00000000..f29001d0 --- /dev/null +++ b/cli/cli/command/engine/init.go @@ -0,0 +1,10 @@ +package engine + +import ( + clitypes "github.com/docker/cli/types" +) + +type extendedEngineInitOptions struct { + clitypes.EngineInitOptions + sockPath string +} diff --git a/cli/cli/command/engine/licenses.go b/cli/cli/command/engine/licenses.go new file mode 100644 index 00000000..37090ac2 --- /dev/null +++ b/cli/cli/command/engine/licenses.go @@ -0,0 +1,155 @@ +package engine + +import ( + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/licenseutils" + "github.com/docker/licensing/model" +) + +const ( + defaultSubscriptionsTableFormat = "table {{.Num}}\t{{.Owner}}\t{{.ProductID}}\t{{.Expires}}\t{{.ComponentsString}}" + defaultSubscriptionsQuietFormat = "{{.Num}}:{{.Summary}}" + + numHeader = "NUM" + ownerHeader = "OWNER" + licenseNameHeader = "NAME" + idHeader = "ID" + dockerIDHeader = "DOCKER ID" + productIDHeader = "PRODUCT ID" + productRatePlanHeader = "PRODUCT RATE PLAN" + productRatePlanIDHeader = "PRODUCT RATE PLAN ID" + startHeader = "START" + expiresHeader = "EXPIRES" + stateHeader = "STATE" + eusaHeader = "EUSA" + pricingComponentsHeader = "PRICING COMPONENTS" +) + +// NewSubscriptionsFormat returns a Format for rendering using a license Context +func NewSubscriptionsFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + if quiet { + return defaultSubscriptionsQuietFormat + } + return defaultSubscriptionsTableFormat + case formatter.RawFormatKey: + if quiet { + return `license: {{.ID}}` + } + return `license: {{.ID}}\nname: {{.Name}}\nowner: {{.Owner}}\ncomponents: {{.ComponentsString}}\n` + } + return formatter.Format(source) +} + +// SubscriptionsWrite writes the context +func SubscriptionsWrite(ctx formatter.Context, subs []licenseutils.LicenseDisplay) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, sub := range subs { + licenseCtx := &licenseContext{trunc: ctx.Trunc, l: sub} + if err := format(licenseCtx); err != nil { + return err + } + } + return nil + } + licenseCtx := licenseContext{} + licenseCtx.Header = map[string]string{ + "Num": numHeader, + "Owner": ownerHeader, + "Name": licenseNameHeader, + "ID": idHeader, + "DockerID": dockerIDHeader, + "ProductID": productIDHeader, + "ProductRatePlan": productRatePlanHeader, + "ProductRatePlanID": productRatePlanIDHeader, + "Start": startHeader, + "Expires": expiresHeader, + "State": stateHeader, + "Eusa": eusaHeader, + "ComponentsString": pricingComponentsHeader, + } + return ctx.Write(&licenseCtx, render) +} + +type licenseContext struct { + formatter.HeaderContext + trunc bool + l licenseutils.LicenseDisplay +} + +func (c *licenseContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *licenseContext) Num() int { + return c.l.Num +} + +func (c *licenseContext) Owner() string { + return c.l.Owner +} + +func (c *licenseContext) ComponentsString() string { + return c.l.ComponentsString +} + +func (c *licenseContext) Summary() string { + return c.l.String() +} + +func (c *licenseContext) Name() string { + return c.l.Name +} + +func (c *licenseContext) ID() string { + return c.l.ID +} + +func (c *licenseContext) DockerID() string { + return c.l.DockerID +} + +func (c *licenseContext) ProductID() string { + return c.l.ProductID +} + +func (c *licenseContext) ProductRatePlan() string { + return c.l.ProductRatePlan +} + +func (c *licenseContext) ProductRatePlanID() string { + return c.l.ProductRatePlanID +} + +func (c *licenseContext) Start() *time.Time { + return c.l.Start +} + +func (c *licenseContext) Expires() *time.Time { + return c.l.Expires +} + +func (c *licenseContext) State() string { + return c.l.State +} + +func (c *licenseContext) Eusa() *model.EusaState { + return c.l.Eusa +} + +func (c *licenseContext) PricingComponents() []model.SubscriptionPricingComponent { + // Dereference the pricing component pointers in the pricing components + // so it can be rendered properly with the template formatter + + var ret []model.SubscriptionPricingComponent + for _, spc := range c.l.PricingComponents { + if spc == nil { + continue + } + ret = append(ret, *spc) + } + return ret +} diff --git a/cli/cli/command/engine/licenses_test.go b/cli/cli/command/engine/licenses_test.go new file mode 100644 index 00000000..bacde70c --- /dev/null +++ b/cli/cli/command/engine/licenses_test.go @@ -0,0 +1,257 @@ +package engine + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/licenseutils" + "github.com/docker/licensing/model" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSubscriptionContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: NewSubscriptionsFormat("table", false)}, + `NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS +1 owner1 productid1 2020-01-01 10:00:00 +0000 UTC compstring +2 owner2 productid2 2020-01-01 10:00:00 +0000 UTC compstring +`, + }, + { + formatter.Context{Format: NewSubscriptionsFormat("table", true)}, + `1:License Name: name1 Quantity: 10 nodes Expiration date: 2020-01-01 +2:License Name: name2 Quantity: 20 nodes Expiration date: 2020-01-01 +`, + }, + { + formatter.Context{Format: NewSubscriptionsFormat("table {{.Owner}}", false)}, + `OWNER +owner1 +owner2 +`, + }, + { + formatter.Context{Format: NewSubscriptionsFormat("table {{.Owner}}", true)}, + `OWNER +owner1 +owner2 +`, + }, + // Raw Format + { + formatter.Context{Format: NewSubscriptionsFormat("raw", false)}, + `license: id1 +name: name1 +owner: owner1 +components: compstring + +license: id2 +name: name2 +owner: owner2 +components: compstring + +`, + }, + { + formatter.Context{Format: NewSubscriptionsFormat("raw", true)}, + `license: id1 +license: id2 +`, + }, + // Custom Format + { + formatter.Context{Format: NewSubscriptionsFormat("{{.Owner}}", false)}, + `owner1 +owner2 +`, + }, + } + + expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC") + + for _, testcase := range cases { + subscriptions := []licenseutils.LicenseDisplay{ + { + Num: 1, + Owner: "owner1", + Subscription: model.Subscription{ + ID: "id1", + Name: "name1", + ProductID: "productid1", + Expires: &expiration, + PricingComponents: model.PricingComponents{ + &model.SubscriptionPricingComponent{ + Name: "nodes", + Value: 10, + }, + }, + }, + ComponentsString: "compstring", + }, + { + Num: 2, + Owner: "owner2", + Subscription: model.Subscription{ + ID: "id2", + Name: "name2", + ProductID: "productid2", + Expires: &expiration, + PricingComponents: model.PricingComponents{ + &model.SubscriptionPricingComponent{ + Name: "nodes", + Value: 20, + }, + }, + }, + ComponentsString: "compstring", + }, + } + out := &bytes.Buffer{} + testcase.context.Output = out + err := SubscriptionsWrite(testcase.context, subscriptions) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestSubscriptionContextWriteJSON(t *testing.T) { + expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC") + subscriptions := []licenseutils.LicenseDisplay{ + { + Num: 1, + Owner: "owner1", + Subscription: model.Subscription{ + ID: "id1", + Name: "name1", + ProductID: "productid1", + Expires: &expiration, + PricingComponents: model.PricingComponents{ + &model.SubscriptionPricingComponent{ + Name: "nodes", + Value: 10, + }, + }, + }, + ComponentsString: "compstring", + }, + { + Num: 2, + Owner: "owner2", + Subscription: model.Subscription{ + ID: "id2", + Name: "name2", + ProductID: "productid2", + Expires: &expiration, + PricingComponents: model.PricingComponents{ + &model.SubscriptionPricingComponent{ + Name: "nodes", + Value: 20, + }, + }, + }, + ComponentsString: "compstring", + }, + } + expectedJSONs := []map[string]interface{}{ + { + "Owner": "owner1", + "ComponentsString": "compstring", + "Expires": "2020-01-01T10:00:00Z", + "DockerID": "", + "Eusa": nil, + "ID": "id1", + "Start": nil, + "Name": "name1", + "Num": float64(1), + "PricingComponents": []interface{}{ + map[string]interface{}{ + "name": "nodes", + "value": float64(10), + }, + }, + "ProductID": "productid1", + "ProductRatePlan": "", + "ProductRatePlanID": "", + "State": "", + "Summary": "License Name: name1\tQuantity: 10 nodes\tExpiration date: 2020-01-01", + }, + { + "Owner": "owner2", + "ComponentsString": "compstring", + "Expires": "2020-01-01T10:00:00Z", + "DockerID": "", + "Eusa": nil, + "ID": "id2", + "Start": nil, + "Name": "name2", + "Num": float64(2), + "PricingComponents": []interface{}{ + map[string]interface{}{ + "name": "nodes", + "value": float64(20), + }, + }, + "ProductID": "productid2", + "ProductRatePlan": "", + "ProductRatePlanID": "", + "State": "", + "Summary": "License Name: name2\tQuantity: 20 nodes\tExpiration date: 2020-01-01", + }, + } + + out := &bytes.Buffer{} + err := SubscriptionsWrite(formatter.Context{Format: "{{json .}}", Output: out}, subscriptions) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(expectedJSONs[i], m)) + } +} + +func TestSubscriptionContextWriteJSONField(t *testing.T) { + subscriptions := []licenseutils.LicenseDisplay{ + {Num: 1, Owner: "owner1"}, + {Num: 2, Owner: "owner2"}, + } + out := &bytes.Buffer{} + err := SubscriptionsWrite(formatter.Context{Format: "{{json .Owner}}", Output: out}, subscriptions) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(subscriptions[i].Owner, s)) + } +} diff --git a/cli/cli/command/engine/testdata/check-all.golden b/cli/cli/command/engine/testdata/check-all.golden new file mode 100644 index 00000000..a6a5c70f --- /dev/null +++ b/cli/cli/command/engine/testdata/check-all.golden @@ -0,0 +1,11 @@ +TYPE VERSION NOTES +current 1.1.0 +patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1 +patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2 +patch 1.1.3-beta1 https://docs.docker.com/releasenotes/1.1.3-beta1 +upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0 +upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0 +upgrade 2.1.0-beta1 https://docs.docker.com/releasenotes/2.1.0-beta1 +downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1 +downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2 +downgrade 1.0.3-beta1 https://docs.docker.com/releasenotes/1.0.3-beta1 diff --git a/cli/cli/command/engine/testdata/check-no-downgrades.golden b/cli/cli/command/engine/testdata/check-no-downgrades.golden new file mode 100644 index 00000000..790b7dd1 --- /dev/null +++ b/cli/cli/command/engine/testdata/check-no-downgrades.golden @@ -0,0 +1,6 @@ +TYPE VERSION NOTES +current 1.1.0 +patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1 +patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2 +upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0 +upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0 diff --git a/cli/cli/command/engine/testdata/check-no-prerelease.golden b/cli/cli/command/engine/testdata/check-no-prerelease.golden new file mode 100644 index 00000000..acb50535 --- /dev/null +++ b/cli/cli/command/engine/testdata/check-no-prerelease.golden @@ -0,0 +1,8 @@ +TYPE VERSION NOTES +current 1.1.0 +patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1 +patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2 +upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0 +upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0 +downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1 +downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2 diff --git a/cli/cli/command/engine/testdata/check-patches-only.golden b/cli/cli/command/engine/testdata/check-patches-only.golden new file mode 100644 index 00000000..d5729703 --- /dev/null +++ b/cli/cli/command/engine/testdata/check-patches-only.golden @@ -0,0 +1,4 @@ +TYPE VERSION NOTES +current 1.1.0 +patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1 +patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2 diff --git a/cli/cli/command/engine/testdata/expired-hub-license-display-only.golden b/cli/cli/command/engine/testdata/expired-hub-license-display-only.golden new file mode 100644 index 00000000..71e97bbd --- /dev/null +++ b/cli/cli/command/engine/testdata/expired-hub-license-display-only.golden @@ -0,0 +1,3 @@ +Looking for existing licenses for ... +NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS +0 2010-01-01 00:00:00 +0000 UTC diff --git a/cli/cli/command/engine/testdata/expired-license-display-only.golden b/cli/cli/command/engine/testdata/expired-license-display-only.golden new file mode 100644 index 00000000..a1bccce2 --- /dev/null +++ b/cli/cli/command/engine/testdata/expired-license-display-only.golden @@ -0,0 +1 @@ +License: Quantity: 1 Nodes Expiration date: 2018-03-18 Expired! You will no longer receive updates. Please renew at https://docker.com/licensing diff --git a/cli/cli/command/engine/update.go b/cli/cli/command/engine/update.go new file mode 100644 index 00000000..e28fc459 --- /dev/null +++ b/cli/cli/command/engine/update.go @@ -0,0 +1,55 @@ +package engine + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + clitypes "github.com/docker/cli/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + var options extendedEngineInitOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS]", + Short: "Update a local engine", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, options) + }, + } + flags := cmd.Flags() + + flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version") + flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)") + flags.StringVar(&options.RegistryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the current location where engine images are pulled") + flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint") + + return cmd +} + +func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error { + if !isRoot() { + return errors.New("this command must be run as a privileged user") + } + ctx := context.Background() + client, err := dockerCli.NewContainerizedEngineClient(options.sockPath) + if err != nil { + return errors.Wrap(err, "unable to access local containerd") + } + defer client.Close() + authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix) + if err != nil { + return err + } + if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), `Successfully updated engine. +Restart docker with 'systemctl restart docker' to complete the update.`) + return nil +} diff --git a/cli/cli/command/engine/update_test.go b/cli/cli/command/engine/update_test.go new file mode 100644 index 00000000..641b8458 --- /dev/null +++ b/cli/cli/command/engine/update_test.go @@ -0,0 +1,40 @@ +package engine + +import ( + "fmt" + "testing" + + "github.com/docker/cli/internal/test" + clitypes "github.com/docker/cli/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "gotest.tools/assert" +) + +func TestUpdateNoContainerd(t *testing.T) { + testCli.SetContainerizedEngineClient( + func(string) (clitypes.ContainerizedClient, error) { + return nil, fmt.Errorf("some error") + }, + ) + cmd := newUpdateCommand(testCli) + cmd.SilenceUsage = true + cmd.SilenceErrors = true + err := cmd.Execute() + assert.ErrorContains(t, err, "unable to access local containerd") +} + +func TestUpdateHappy(t *testing.T) { + c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil}) + c.SetContainerizedEngineClient( + func(string) (clitypes.ContainerizedClient, error) { + return &fakeContainerizedEngineClient{}, nil + }, + ) + cmd := newUpdateCommand(c) + cmd.Flags().Set("registry-prefix", clitypes.RegistryPrefix) + cmd.Flags().Set("version", "someversion") + cmd.Flags().Set("engine-image", "someimage") + err := cmd.Execute() + assert.NilError(t, err) +} diff --git a/cli/cli/command/engine/updates.go b/cli/cli/command/engine/updates.go new file mode 100644 index 00000000..2ca4a552 --- /dev/null +++ b/cli/cli/command/engine/updates.go @@ -0,0 +1,74 @@ +package engine + +import ( + "github.com/docker/cli/cli/command/formatter" + clitypes "github.com/docker/cli/types" +) + +const ( + defaultUpdatesTableFormat = "table {{.Type}}\t{{.Version}}\t{{.Notes}}" + defaultUpdatesQuietFormat = "{{.Version}}" + + updatesTypeHeader = "TYPE" + versionHeader = "VERSION" + notesHeader = "NOTES" +) + +// NewUpdatesFormat returns a Format for rendering using a updates context +func NewUpdatesFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + if quiet { + return defaultUpdatesQuietFormat + } + return defaultUpdatesTableFormat + case formatter.RawFormatKey: + if quiet { + return `update_version: {{.Version}}` + } + return `update_version: {{.Version}}\ntype: {{.Type}}\nnotes: {{.Notes}}\n` + } + return formatter.Format(source) +} + +// UpdatesWrite writes the context +func UpdatesWrite(ctx formatter.Context, availableUpdates []clitypes.Update) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, update := range availableUpdates { + updatesCtx := &updateContext{trunc: ctx.Trunc, u: update} + if err := format(updatesCtx); err != nil { + return err + } + } + return nil + } + updatesCtx := updateContext{} + updatesCtx.Header = map[string]string{ + "Type": updatesTypeHeader, + "Version": versionHeader, + "Notes": notesHeader, + } + return ctx.Write(&updatesCtx, render) +} + +type updateContext struct { + formatter.HeaderContext + trunc bool + u clitypes.Update +} + +func (c *updateContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *updateContext) Type() string { + return c.u.Type +} + +func (c *updateContext) Version() string { + return c.u.Version +} + +func (c *updateContext) Notes() string { + return c.u.Notes +} diff --git a/cli/cli/command/engine/updates_test.go b/cli/cli/command/engine/updates_test.go new file mode 100644 index 00000000..7fb2b0f1 --- /dev/null +++ b/cli/cli/command/engine/updates_test.go @@ -0,0 +1,144 @@ +package engine + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/cli/cli/command/formatter" + clitypes "github.com/docker/cli/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestUpdateContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: NewUpdatesFormat("table", false)}, + `TYPE VERSION NOTES +updateType1 version1 description 1 +updateType2 version2 description 2 +`, + }, + { + formatter.Context{Format: NewUpdatesFormat("table", true)}, + `version1 +version2 +`, + }, + { + formatter.Context{Format: NewUpdatesFormat("table {{.Version}}", false)}, + `VERSION +version1 +version2 +`, + }, + { + formatter.Context{Format: NewUpdatesFormat("table {{.Version}}", true)}, + `VERSION +version1 +version2 +`, + }, + // Raw Format + { + formatter.Context{Format: NewUpdatesFormat("raw", false)}, + `update_version: version1 +type: updateType1 +notes: description 1 + +update_version: version2 +type: updateType2 +notes: description 2 + +`, + }, + { + formatter.Context{Format: NewUpdatesFormat("raw", true)}, + `update_version: version1 +update_version: version2 +`, + }, + // Custom Format + { + formatter.Context{Format: NewUpdatesFormat("{{.Version}}", false)}, + `version1 +version2 +`, + }, + } + + for _, testcase := range cases { + updates := []clitypes.Update{ + {Type: "updateType1", Version: "version1", Notes: "description 1"}, + {Type: "updateType2", Version: "version2", Notes: "description 2"}, + } + out := &bytes.Buffer{} + testcase.context.Output = out + err := UpdatesWrite(testcase.context, updates) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestUpdateContextWriteJSON(t *testing.T) { + updates := []clitypes.Update{ + {Type: "updateType1", Version: "version1", Notes: "note1"}, + {Type: "updateType2", Version: "version2", Notes: "note2"}, + } + expectedJSONs := []map[string]interface{}{ + {"Version": "version1", "Notes": "note1", "Type": "updateType1"}, + {"Version": "version2", "Notes": "note2", "Type": "updateType2"}, + } + + out := &bytes.Buffer{} + err := UpdatesWrite(formatter.Context{Format: "{{json .}}", Output: out}, updates) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(expectedJSONs[i], m)) + } +} + +func TestUpdateContextWriteJSONField(t *testing.T) { + updates := []clitypes.Update{ + {Type: "updateType1", Version: "version1"}, + {Type: "updateType2", Version: "version2"}, + } + out := &bytes.Buffer{} + err := UpdatesWrite(formatter.Context{Format: "{{json .Type}}", Output: out}, updates) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(updates[i].Type, s)) + } +} diff --git a/cli/cli/command/events_utils.go b/cli/cli/command/events_utils.go new file mode 100644 index 00000000..16d76892 --- /dev/null +++ b/cli/cli/command/events_utils.go @@ -0,0 +1,47 @@ +package command + +import ( + "sync" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/sirupsen/logrus" +) + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/cli/cli/command/formatter/buildcache.go b/cli/cli/command/formatter/buildcache.go new file mode 100644 index 00000000..c8c38481 --- /dev/null +++ b/cli/cli/command/formatter/buildcache.go @@ -0,0 +1,177 @@ +package formatter + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + defaultBuildCacheTableFormat = "table {{.ID}}\t{{.Type}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}\t{{.Description}}" + + cacheIDHeader = "CACHE ID" + cacheTypeHeader = "CACHE TYPE" + parentHeader = "PARENT" + lastUsedSinceHeader = "LAST USED" + usageCountHeader = "USAGE" + inUseHeader = "IN USE" + sharedHeader = "SHARED" +) + +// NewBuildCacheFormat returns a Format for rendering using a Context +func NewBuildCacheFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return DefaultQuietFormat + } + return Format(defaultBuildCacheTableFormat) + case RawFormatKey: + if quiet { + return `build_cache_id: {{.ID}}` + } + format := `build_cache_id: {{.ID}} +parent_id: {{.Parent}} +build_cache_type: {{.CacheType}} +description: {{.Description}} +created_at: {{.CreatedAt}} +created_since: {{.CreatedSince}} +last_used_at: {{.LastUsedAt}} +last_used_since: {{.LastUsedSince}} +usage_count: {{.UsageCount}} +in_use: {{.InUse}} +shared: {{.Shared}} +` + return Format(format) + } + return Format(source) +} + +func buildCacheSort(buildCache []*types.BuildCache) { + sort.Slice(buildCache, func(i, j int) bool { + lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt + switch { + case lui == nil && luj == nil: + return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0 + case lui == nil: + return true + case luj == nil: + return false + case lui.Equal(*luj): + return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0 + default: + return lui.Before(*luj) + } + }) +} + +// BuildCacheWrite renders the context for a list of containers +func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error { + render := func(format func(subContext SubContext) error) error { + buildCacheSort(buildCaches) + for _, bc := range buildCaches { + err := format(&buildCacheContext{trunc: ctx.Trunc, v: bc}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(newBuildCacheContext(), render) +} + +type buildCacheContext struct { + HeaderContext + trunc bool + v *types.BuildCache +} + +func newBuildCacheContext() *buildCacheContext { + buildCacheCtx := buildCacheContext{} + buildCacheCtx.Header = SubHeaderContext{ + "ID": cacheIDHeader, + "Parent": parentHeader, + "CacheType": cacheTypeHeader, + "Size": SizeHeader, + "CreatedSince": CreatedSinceHeader, + "LastUsedSince": lastUsedSinceHeader, + "UsageCount": usageCountHeader, + "InUse": inUseHeader, + "Shared": sharedHeader, + "Description": DescriptionHeader, + } + return &buildCacheCtx +} + +func (c *buildCacheContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *buildCacheContext) ID() string { + id := c.v.ID + if c.trunc { + id = stringid.TruncateID(c.v.ID) + } + if c.v.InUse { + return id + "*" + } + return id +} + +func (c *buildCacheContext) Parent() string { + if c.trunc { + return stringid.TruncateID(c.v.Parent) + } + return c.v.Parent +} + +func (c *buildCacheContext) CacheType() string { + return c.v.Type +} + +func (c *buildCacheContext) Description() string { + return c.v.Description +} + +func (c *buildCacheContext) Size() string { + return units.HumanSizeWithPrecision(float64(c.v.Size), 3) +} + +func (c *buildCacheContext) CreatedAt() string { + return c.v.CreatedAt.String() +} + +func (c *buildCacheContext) CreatedSince() string { + return units.HumanDuration(time.Now().UTC().Sub(c.v.CreatedAt)) + " ago" +} + +func (c *buildCacheContext) LastUsedAt() string { + if c.v.LastUsedAt == nil { + return "" + } + return c.v.LastUsedAt.String() +} + +func (c *buildCacheContext) LastUsedSince() string { + if c.v.LastUsedAt == nil { + return "" + } + return units.HumanDuration(time.Now().UTC().Sub(*c.v.LastUsedAt)) + " ago" +} + +func (c *buildCacheContext) UsageCount() string { + return fmt.Sprintf("%d", c.v.UsageCount) +} + +func (c *buildCacheContext) InUse() string { + return fmt.Sprintf("%t", c.v.InUse) +} + +func (c *buildCacheContext) Shared() string { + return fmt.Sprintf("%t", c.v.Shared) +} diff --git a/cli/cli/command/formatter/container.go b/cli/cli/command/formatter/container.go new file mode 100644 index 00000000..e7e98cb9 --- /dev/null +++ b/cli/cli/command/formatter/container.go @@ -0,0 +1,329 @@ +package formatter + +import ( + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + + namesHeader = "NAMES" + commandHeader = "COMMAND" + runningForHeader = "CREATED" + mountsHeader = "MOUNTS" + localVolumes = "LOCAL VOLUMES" + networksHeader = "NETWORKS" +) + +// NewContainerFormat returns a Format for rendering using a Context +func NewContainerFormat(source string, quiet bool, size bool) Format { + switch source { + case TableFormatKey: + if quiet { + return DefaultQuietFormat + } + format := defaultContainerTableFormat + if size { + format += `\t{{.Size}}` + } + return Format(format) + case RawFormatKey: + if quiet { + return `container_id: {{.ID}}` + } + format := `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{- pad .Status 1 0}} +names: {{.Names}} +labels: {{- pad .Labels 1 0}} +ports: {{- pad .Ports 1 0}} +` + if size { + format += `size: {{.Size}}\n` + } + return Format(format) + } + return Format(source) +} + +// ContainerWrite renders the context for a list of containers +func ContainerWrite(ctx Context, containers []types.Container) error { + render := func(format func(subContext SubContext) error) error { + for _, container := range containers { + err := format(&containerContext{trunc: ctx.Trunc, c: container}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(newContainerContext(), render) +} + +type containerContext struct { + HeaderContext + trunc bool + c types.Container +} + +func newContainerContext() *containerContext { + containerCtx := containerContext{} + containerCtx.Header = SubHeaderContext{ + "ID": ContainerIDHeader, + "Names": namesHeader, + "Image": ImageHeader, + "Command": commandHeader, + "CreatedAt": CreatedAtHeader, + "RunningFor": runningForHeader, + "Ports": PortsHeader, + "Status": StatusHeader, + "Size": SizeHeader, + "Labels": LabelsHeader, + "Mounts": mountsHeader, + "LocalVolumes": localVolumes, + "Networks": networksHeader, + } + return &containerCtx +} + +func (c *containerContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *containerContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + // truncate digest if no-trunc option was not selected + ref, err := reference.ParseNormalizedNamed(c.c.Image) + if err == nil { + if nt, ok := ref.(reference.NamedTagged); ok { + // case for when a tag is provided + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + return reference.FamiliarString(namedTagged) + } + } else { + // case for when a tag is not provided + named := reference.TrimNamed(ref) + return reference.FamiliarString(named) + } + } + } + + return c.c.Image +} + +func (c *containerContext) Command() string { + command := c.c.Command + if c.trunc { + command = Ellipsis(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + return time.Unix(c.c.Created, 0).String() +} + +func (c *containerContext) RunningFor() string { + createdAt := time.Unix(c.c.Created, 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" +} + +func (c *containerContext) Ports() string { + return DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + return c.c.Status +} + +func (c *containerContext) Size() string { + srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) + sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = Ellipsis(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +func (c *containerContext) LocalVolumes() string { + count := 0 + for _, m := range c.c.Mounts { + if m.Driver == "local" { + count++ + } + } + + return fmt.Sprintf("%d", count) +} + +func (c *containerContext) Networks() string { + if c.c.NetworkSettings == nil { + return "" + } + + networks := []string{} + for k := range c.c.NetworkSettings.Networks { + networks = append(networks, k) + } + + return strings.Join(networks, ",") +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Slice(ports, func(i, j int) bool { + return comparePorts(ports[i], ports[j]) + }) + + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +func comparePorts(i, j types.Port) bool { + if i.PrivatePort != j.PrivatePort { + return i.PrivatePort < j.PrivatePort + } + + if i.IP != j.IP { + return i.IP < j.IP + } + + if i.PublicPort != j.PublicPort { + return i.PublicPort < j.PublicPort + } + + return i.Type < j.Type +} diff --git a/cli/cli/command/formatter/container_test.go b/cli/cli/command/formatter/container_test.go new file mode 100644 index 00000000..cafb9abd --- /dev/null +++ b/cli/cli/command/formatter/container_test.go @@ -0,0 +1,658 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestContainerPsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + unix := time.Now().Add(-65 * time.Second).Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + call func() string + }{ + {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), ctx.ID}, + {types.Container{ID: containerID}, false, containerID, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", ctx.Image}, + {types.Container{Image: "verylongimagename"}, true, "verylongimagename", ctx.Image}, + {types.Container{Image: "verylongimagename"}, false, "verylongimagename", ctx.Image}, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + true, + "a5a665ff33ec", + ctx.Image, + }, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + false, + "a5a665ff33eced1e0803148700880edab4", + ctx.Image, + }, + {types.Container{Image: ""}, true, "", ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, ctx.Command}, + {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", ctx.Status}, + {types.Container{SizeRw: 10}, true, "10B", ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10B (virtual 20B)", ctx.Size}, + {types.Container{}, true, "", ctx.Labels}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", ctx.Labels}, + {types.Container{Created: unix}, true, "About a minute ago", ctx.RunningFor}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", + Driver: "local", + Source: "/a/path", + }, + }, + }, true, "this-is-a-long…", ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "/a/path", ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", ctx.Mounts}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } + + c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c1, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + c2 := types.Container{} + ctx = containerContext{c: c2, trunc: true} + + label := ctx.Label("anything.really") + if label != "" { + t.Fatalf("Expected an empty string, was %s", label) + } +} + +func TestContainerContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{Format: NewContainerFormat("table", false, true)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE +containerID1 ubuntu "" 24 hours ago foobar_baz 0B +containerID2 ubuntu "" 24 hours ago foobar_bar 0B +`, + }, + { + Context{Format: NewContainerFormat("table", false, false)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +containerID1 ubuntu "" 24 hours ago foobar_baz +containerID2 ubuntu "" 24 hours ago foobar_bar +`, + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table", true, false)}, + "containerID1\ncontainerID2\n", + }, + // Raw Format + { + Context{Format: NewContainerFormat("raw", false, false)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", false, true)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: +size: 0B + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: +size: 0B + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", true, false)}, + "container_id: containerID1\ncontainer_id: containerID2\n", + }, + // Custom Format + { + Context{Format: "{{.Image}}"}, + "ubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("{{.Image}}", false, true)}, + "ubuntu\nubuntu\n", + }, + // Special headers for customized table format + { + Context{Format: NewContainerFormat(`table {{truncate .ID 5}}\t{{json .Image}} {{.RunningFor}}/{{title .Status}}/{{pad .Ports 2 2}}.{{upper .Names}} {{lower .Status}}`, false, true)}, + string(golden.Get(t, "container-context-write-special-headers.golden")), + }, + } + + for _, testcase := range cases { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ContainerWrite(testcase.context, containers) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestContainerContextWriteWithNoContainers(t *testing.T) { + out := bytes.NewBufferString("") + containers := []types.Container{} + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Image}}", + Output: out, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: NewContainerFormat("{{.Image}}", false, true), + Output: out, + }, + "", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}", false, true), + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: "table {{.Image}}\t{{.Size}}", + Output: out, + }, + "IMAGE SIZE\n", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), + Output: out, + }, + "IMAGE SIZE\n", + }, + } + + for _, context := range contexts { + ContainerWrite(context.context, containers) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} + +func TestContainerContextWriteJSON(t *testing.T) { + unix := time.Now().Add(-65 * time.Second).Unix() + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, + } + expectedCreated := time.Unix(unix, 0).String() + expectedJSONs := []map[string]interface{}{ + { + "Command": "\"\"", + "CreatedAt": expectedCreated, + "ID": "containerID1", + "Image": "ubuntu", + "Labels": "", + "LocalVolumes": "0", + "Mounts": "", + "Names": "foobar_baz", + "Networks": "", + "Ports": "", + "RunningFor": "About a minute ago", + "Size": "0B", + "Status": "", + }, + { + "Command": "\"\"", + "CreatedAt": expectedCreated, + "ID": "containerID2", + "Image": "ubuntu", + "Labels": "", + "LocalVolumes": "0", + "Mounts": "", + "Names": "foobar_bar", + "Networks": "", + "Ports": "", + "RunningFor": "About a minute ago", + "Size": "0B", + "Status": "", + }, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} + +func TestContainerContextWriteJSONField(t *testing.T) { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(containers[i].ID, s), msg) + } +} + +func TestContainerBackCompat(t *testing.T) { + containers := []types.Container{{ID: "brewhaha"}} + cases := []string{ + "ID", + "Names", + "Image", + "Command", + "CreatedAt", + "RunningFor", + "Ports", + "Status", + "Size", + "Labels", + "Mounts", + } + buf := bytes.NewBuffer(nil) + for _, c := range cases { + ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} + if err := ContainerWrite(ctx, containers); err != nil { + t.Logf("could not render template for field '%s': %v", c, err) + t.Fail() + } + buf.Reset() + } +} + +type ports struct { + ports []types.Port + expected string +} + +// nolint: lll +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 12345, + Type: "sctp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 12345/sctp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + assert.Check(t, is.Equal(port.expected, actual)) + } +} diff --git a/cli/cli/command/formatter/context.go b/cli/cli/command/formatter/context.go new file mode 100644 index 00000000..93f86f6a --- /dev/null +++ b/cli/cli/command/formatter/context.go @@ -0,0 +1,90 @@ +package formatter + +const ( + // ClientContextTableFormat is the default client context format + ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}\t{{.KubernetesEndpoint}}\t{{.StackOrchestrator}}" + + dockerEndpointHeader = "DOCKER ENDPOINT" + kubernetesEndpointHeader = "KUBERNETES ENDPOINT" + stackOrchestrastorHeader = "ORCHESTRATOR" + quietContextFormat = "{{.Name}}" +) + +// NewClientContextFormat returns a Format for rendering using a Context +func NewClientContextFormat(source string, quiet bool) Format { + if quiet { + return Format(quietContextFormat) + } + if source == TableFormatKey { + return Format(ClientContextTableFormat) + } + return Format(source) +} + +// ClientContext is a context for display +type ClientContext struct { + Name string + Description string + DockerEndpoint string + KubernetesEndpoint string + StackOrchestrator string + Current bool +} + +// ClientContextWrite writes formatted contexts using the Context +func ClientContextWrite(ctx Context, contexts []*ClientContext) error { + render := func(format func(subContext SubContext) error) error { + for _, context := range contexts { + if err := format(&clientContextContext{c: context}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newClientContextContext(), render) +} + +type clientContextContext struct { + HeaderContext + c *ClientContext +} + +func newClientContextContext() *clientContextContext { + ctx := clientContextContext{} + ctx.Header = SubHeaderContext{ + "Name": NameHeader, + "Description": DescriptionHeader, + "DockerEndpoint": dockerEndpointHeader, + "KubernetesEndpoint": kubernetesEndpointHeader, + "StackOrchestrator": stackOrchestrastorHeader, + } + return &ctx +} + +func (c *clientContextContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *clientContextContext) Current() bool { + return c.c.Current +} + +func (c *clientContextContext) Name() string { + return c.c.Name +} + +func (c *clientContextContext) Description() string { + return c.c.Description +} + +func (c *clientContextContext) DockerEndpoint() string { + return c.c.DockerEndpoint +} + +func (c *clientContextContext) KubernetesEndpoint() string { + return c.c.KubernetesEndpoint +} + +func (c *clientContextContext) StackOrchestrator() string { + return c.c.StackOrchestrator +} diff --git a/cli/cli/command/formatter/custom.go b/cli/cli/command/formatter/custom.go new file mode 100644 index 00000000..6be9ed7a --- /dev/null +++ b/cli/cli/command/formatter/custom.go @@ -0,0 +1,55 @@ +package formatter + +import "strings" + +// Common header constants +const ( + CreatedSinceHeader = "CREATED" + CreatedAtHeader = "CREATED AT" + SizeHeader = "SIZE" + LabelsHeader = "LABELS" + NameHeader = "NAME" + DescriptionHeader = "DESCRIPTION" + DriverHeader = "DRIVER" + ScopeHeader = "SCOPE" + StatusHeader = "STATUS" + PortsHeader = "PORTS" + ImageHeader = "IMAGE" + ContainerIDHeader = "CONTAINER ID" +) + +// SubContext defines what Context implementation should provide +type SubContext interface { + FullHeader() interface{} +} + +// SubHeaderContext is a map destined to formatter header (table format) +type SubHeaderContext map[string]string + +// Label returns the header label for the specified string +func (c SubHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +// HeaderContext provides the subContext interface for managing headers +type HeaderContext struct { + Header interface{} +} + +// FullHeader returns the header as an interface +func (c *HeaderContext) FullHeader() interface{} { + return c.Header +} + +func stripNamePrefix(ss []string) []string { + sss := make([]string, len(ss)) + for i, s := range ss { + sss[i] = s[1:] + } + + return sss +} diff --git a/cli/cli/command/formatter/custom_test.go b/cli/cli/command/formatter/custom_test.go new file mode 100644 index 00000000..921bfd71 --- /dev/null +++ b/cli/cli/command/formatter/custom_test.go @@ -0,0 +1,12 @@ +package formatter + +import ( + "testing" + + "github.com/docker/cli/internal/test" +) + +// Deprecated: use internal/test.CompareMultipleValues instead +func compareMultipleValues(t *testing.T, value, expected string) { + test.CompareMultipleValues(t, value, expected) +} diff --git a/cli/cli/command/formatter/disk_usage.go b/cli/cli/command/formatter/disk_usage.go new file mode 100644 index 00000000..7ddb6e48 --- /dev/null +++ b/cli/cli/command/formatter/disk_usage.go @@ -0,0 +1,476 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageBuildCacheTableFormat = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}" + defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + + typeHeader = "TYPE" + totalHeader = "TOTAL" + activeHeader = "ACTIVE" + reclaimableHeader = "RECLAIMABLE" + containersHeader = "CONTAINERS" + sharedSizeHeader = "SHARED SIZE" + uniqueSizeHeader = "UNIQUE SIZE" +) + +// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct. +type DiskUsageContext struct { + Context + Verbose bool + LayersSize int64 + Images []*types.ImageSummary + Containers []*types.Container + Volumes []*types.Volume + BuildCache []*types.BuildCache + BuilderSize int64 +} + +func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { + ctx.buffer = bytes.NewBufferString("") + ctx.header = "" + ctx.Format = Format(format) + ctx.preFormat() + + return ctx.parseFormat() +} + +// NewDiskUsageFormat returns a format for rendering an DiskUsageContext +func NewDiskUsageFormat(source string, verbose bool) Format { + switch { + case verbose && source == RawFormatKey: + format := `{{range .Images}}type: Image +` + NewImageFormat(source, false, true) + ` +{{end -}} +{{range .Containers}}type: Container +` + NewContainerFormat(source, false, true) + ` +{{end -}} +{{range .Volumes}}type: Volume +` + NewVolumeFormat(source, false) + ` +{{end -}} +{{range .BuildCache}}type: Build Cache +` + NewBuildCacheFormat(source, false) + ` +{{end -}}` + return format + case !verbose && source == TableFormatKey: + return Format(defaultDiskUsageTableFormat) + case !verbose && source == RawFormatKey: + format := `type: {{.Type}} +total: {{.TotalCount}} +active: {{.Active}} +size: {{.Size}} +reclaimable: {{.Reclaimable}} +` + return Format(format) + default: + return Format(source) + } +} + +func (ctx *DiskUsageContext) Write() (err error) { + if ctx.Verbose { + return ctx.verboseWrite() + } + ctx.buffer = bytes.NewBufferString("") + ctx.preFormat() + + tmpl, err := ctx.parseFormat() + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ + totalSize: ctx.LayersSize, + images: ctx.Images, + }) + if err != nil { + return err + } + err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ + containers: ctx.Containers, + }) + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ + volumes: ctx.Volumes, + }) + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{ + builderSize: ctx.BuilderSize, + buildCache: ctx.BuildCache, + }) + if err != nil { + return err + } + + diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}} + diskUsageContainersCtx.Header = SubHeaderContext{ + "Type": typeHeader, + "TotalCount": totalHeader, + "Active": activeHeader, + "Size": SizeHeader, + "Reclaimable": reclaimableHeader, + } + ctx.postFormat(tmpl, &diskUsageContainersCtx) + + return err +} + +type diskUsageContext struct { + Images []*imageContext + Containers []*containerContext + Volumes []*volumeContext + BuildCache []*buildCacheContext +} + +func (ctx *DiskUsageContext) verboseWrite() error { + duc := &diskUsageContext{ + Images: make([]*imageContext, 0, len(ctx.Images)), + Containers: make([]*containerContext, 0, len(ctx.Containers)), + Volumes: make([]*volumeContext, 0, len(ctx.Volumes)), + BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)), + } + trunc := ctx.Format.IsTable() + + // First images + for _, i := range ctx.Images { + repo := "" + tag := "" + if len(i.RepoTags) > 0 && !isDangling(*i) { + // Only show the first tag + ref, err := reference.ParseNormalizedNamed(i.RepoTags[0]) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repo = reference.FamiliarName(ref) + tag = nt.Tag() + } + } + + duc.Images = append(duc.Images, &imageContext{ + repo: repo, + tag: tag, + trunc: trunc, + i: *i, + }) + } + + // Now containers + for _, c := range ctx.Containers { + // Don't display the virtual size + c.SizeRootFs = 0 + duc.Containers = append(duc.Containers, &containerContext{trunc: trunc, c: *c}) + } + + // And volumes + for _, v := range ctx.Volumes { + duc.Volumes = append(duc.Volumes, &volumeContext{v: *v}) + } + + // And build cache + buildCacheSort(ctx.BuildCache) + for _, v := range ctx.BuildCache { + duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc}) + } + + if ctx.Format == TableFormatKey { + return ctx.verboseWriteTable(duc) + } + + ctx.preFormat() + tmpl, err := ctx.parseFormat() + if err != nil { + return err + } + return tmpl.Execute(ctx.Output, duc) +} + +func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error { + tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) + if err != nil { + return err + } + ctx.Output.Write([]byte("Images space usage:\n\n")) + for _, img := range duc.Images { + if err := ctx.contextFormat(tmpl, img); err != nil { + return err + } + } + ctx.postFormat(tmpl, newImageContext()) + + tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) + if err != nil { + return err + } + ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + for _, c := range duc.Containers { + if err := ctx.contextFormat(tmpl, c); err != nil { + return err + } + } + ctx.postFormat(tmpl, newContainerContext()) + + tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) + if err != nil { + return err + } + ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) + for _, v := range duc.Volumes { + if err := ctx.contextFormat(tmpl, v); err != nil { + return err + } + } + ctx.postFormat(tmpl, newVolumeContext()) + + tmpl, err = ctx.startSubsection(defaultDiskUsageBuildCacheTableFormat) + if err != nil { + return err + } + fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize))) + for _, v := range duc.BuildCache { + if err := ctx.contextFormat(tmpl, v); err != nil { + return err + } + } + ctx.postFormat(tmpl, newBuildCacheContext()) + + return nil +} + +type diskUsageImagesContext struct { + HeaderContext + totalSize int64 + images []*types.ImageSummary +} + +func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *diskUsageImagesContext) Type() string { + return "Images" +} + +func (c *diskUsageImagesContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.images)) +} + +func (c *diskUsageImagesContext) Active() string { + used := 0 + for _, i := range c.images { + if i.Containers > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageImagesContext) Size() string { + return units.HumanSize(float64(c.totalSize)) + +} + +func (c *diskUsageImagesContext) Reclaimable() string { + var used int64 + + for _, i := range c.images { + if i.Containers != 0 { + if i.VirtualSize == -1 || i.SharedSize == -1 { + continue + } + used += i.VirtualSize - i.SharedSize + } + } + + reclaimable := c.totalSize - used + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + } + return units.HumanSize(float64(reclaimable)) +} + +type diskUsageContainersContext struct { + HeaderContext + containers []*types.Container +} + +func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *diskUsageContainersContext) Type() string { + return "Containers" +} + +func (c *diskUsageContainersContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.containers)) +} + +func (c *diskUsageContainersContext) isActive(container types.Container) bool { + return strings.Contains(container.State, "running") || + strings.Contains(container.State, "paused") || + strings.Contains(container.State, "restarting") +} + +func (c *diskUsageContainersContext) Active() string { + used := 0 + for _, container := range c.containers { + if c.isActive(*container) { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageContainersContext) Size() string { + var size int64 + + for _, container := range c.containers { + size += container.SizeRw + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageContainersContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + for _, container := range c.containers { + if !c.isActive(*container) { + reclaimable += container.SizeRw + } + totalSize += container.SizeRw + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return units.HumanSize(float64(reclaimable)) +} + +type diskUsageVolumesContext struct { + HeaderContext + volumes []*types.Volume +} + +func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *diskUsageVolumesContext) Type() string { + return "Local Volumes" +} + +func (c *diskUsageVolumesContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.volumes)) +} + +func (c *diskUsageVolumesContext) Active() string { + + used := 0 + for _, v := range c.volumes { + if v.UsageData.RefCount > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageVolumesContext) Size() string { + var size int64 + + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + size += v.UsageData.Size + } + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageVolumesContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + if v.UsageData.RefCount == 0 { + reclaimable += v.UsageData.Size + } + totalSize += v.UsageData.Size + } + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return units.HumanSize(float64(reclaimable)) +} + +type diskUsageBuilderContext struct { + HeaderContext + builderSize int64 + buildCache []*types.BuildCache +} + +func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *diskUsageBuilderContext) Type() string { + return "Build Cache" +} + +func (c *diskUsageBuilderContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.buildCache)) +} + +func (c *diskUsageBuilderContext) Active() string { + numActive := 0 + for _, bc := range c.buildCache { + if bc.InUse { + numActive++ + } + } + return fmt.Sprintf("%d", numActive) +} + +func (c *diskUsageBuilderContext) Size() string { + return units.HumanSize(float64(c.builderSize)) +} + +func (c *diskUsageBuilderContext) Reclaimable() string { + var inUseBytes int64 + for _, bc := range c.buildCache { + if bc.InUse && !bc.Shared { + inUseBytes += bc.Size + } + } + + return units.HumanSize(float64(c.builderSize - inUseBytes)) +} diff --git a/cli/cli/command/formatter/disk_usage_test.go b/cli/cli/command/formatter/disk_usage_test.go new file mode 100644 index 00000000..9a0b829e --- /dev/null +++ b/cli/cli/command/formatter/disk_usage_test.go @@ -0,0 +1,119 @@ +package formatter + +import ( + "bytes" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestDiskUsageContextFormatWrite(t *testing.T) { + cases := []struct { + context DiskUsageContext + expected string + }{ + // Check default output format (verbose and non-verbose mode) for table headers + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("table", false), + }, + Verbose: false}, + `TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 0 0 0B 0B +Containers 0 0 0B 0B +Local Volumes 0 0 0B 0B +Build Cache 0 0 0B 0B +`, + }, + { + DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("table", true)}}, + `Images space usage: + +REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES + +Local Volumes space usage: + +VOLUME NAME LINKS SIZE + +Build cache usage: 0B + +CACHE ID CACHE TYPE SIZE CREATED LAST USED USAGE SHARED +`, + }, + { + DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("raw", true)}}, + ``, + }, + { + DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("{{json .}}", true)}}, + `{"Images":[],"Containers":[],"Volumes":[],"BuildCache":[]}`, + }, + // Errors + { + DiskUsageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + DiskUsageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("table", false), + }, + }, + `TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 0 0 0B 0B +Containers 0 0 0B 0B +Local Volumes 0 0 0B 0B +Build Cache 0 0 0B 0B +`, + }, + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}", false), + }, + }, + string(golden.Get(t, "disk-usage-context-write-custom.golden")), + }, + // Raw Format + { + DiskUsageContext{ + Context: Context{ + Format: NewDiskUsageFormat("raw", false), + }, + }, + string(golden.Get(t, "disk-usage-raw-format.golden")), + }, + } + + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + if err := testcase.context.Write(); err != nil { + assert.Check(t, is.Equal(testcase.expected, err.Error())) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/formatter/displayutils.go b/cli/cli/command/formatter/displayutils.go new file mode 100644 index 00000000..0c3b6ebb --- /dev/null +++ b/cli/cli/command/formatter/displayutils.go @@ -0,0 +1,61 @@ +package formatter + +import ( + "unicode/utf8" + + "golang.org/x/text/width" +) + +// charWidth returns the number of horizontal positions a character occupies, +// and is used to account for wide characters when displaying strings. +// +// In a broad sense, wide characters include East Asian Wide, East Asian Full-width, +// (when not in East Asian context) see http://unicode.org/reports/tr11/. +func charWidth(r rune) int { + switch width.LookupRune(r).Kind() { + case width.EastAsianWide, width.EastAsianFullwidth: + return 2 + default: + return 1 + } +} + +// Ellipsis truncates a string to fit within maxDisplayWidth, and appends ellipsis (…). +// For maxDisplayWidth of 1 and lower, no ellipsis is appended. +// For maxDisplayWidth of 1, first char of string will return even if its width > 1. +func Ellipsis(s string, maxDisplayWidth int) string { + if maxDisplayWidth <= 0 { + return "" + } + rs := []rune(s) + if maxDisplayWidth == 1 { + return string(rs[0]) + } + + byteLen := len(s) + if byteLen == utf8.RuneCountInString(s) { + if byteLen <= maxDisplayWidth { + return s + } + return string(rs[:maxDisplayWidth-1]) + "…" + } + + var ( + display []int + displayWidth int + ) + for _, r := range rs { + cw := charWidth(r) + displayWidth += cw + display = append(display, displayWidth) + } + if displayWidth <= maxDisplayWidth { + return s + } + for i := range display { + if display[i] <= maxDisplayWidth-1 && display[i+1] > maxDisplayWidth-1 { + return string(rs[:i+1]) + "…" + } + } + return s +} diff --git a/cli/cli/command/formatter/displayutils_test.go b/cli/cli/command/formatter/displayutils_test.go new file mode 100644 index 00000000..db60610b --- /dev/null +++ b/cli/cli/command/formatter/displayutils_test.go @@ -0,0 +1,31 @@ +package formatter + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestEllipsis(t *testing.T) { + var testcases = []struct { + source string + width int + expected string + }{ + {source: "t🐳ststring", width: 0, expected: ""}, + {source: "t🐳ststring", width: 1, expected: "t"}, + {source: "t🐳ststring", width: 2, expected: "t…"}, + {source: "t🐳ststring", width: 6, expected: "t🐳st…"}, + {source: "t🐳ststring", width: 20, expected: "t🐳ststring"}, + {source: "你好世界teststring", width: 0, expected: ""}, + {source: "你好世界teststring", width: 1, expected: "ä½ "}, + {source: "你好世界teststring", width: 3, expected: "你…"}, + {source: "你好世界teststring", width: 6, expected: "你好…"}, + {source: "你好世界teststring", width: 20, expected: "你好世界teststring"}, + } + + for _, testcase := range testcases { + assert.Check(t, is.Equal(testcase.expected, Ellipsis(testcase.source, testcase.width))) + } +} diff --git a/cli/cli/command/formatter/formatter.go b/cli/cli/command/formatter/formatter.go new file mode 100644 index 00000000..ae828bfe --- /dev/null +++ b/cli/cli/command/formatter/formatter.go @@ -0,0 +1,119 @@ +package formatter + +import ( + "bytes" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/cli/templates" + "github.com/pkg/errors" +) + +// Format keys used to specify certain kinds of output formats +const ( + TableFormatKey = "table" + RawFormatKey = "raw" + PrettyFormatKey = "pretty" + + DefaultQuietFormat = "{{.ID}}" +) + +// Format is the format string rendered using the Context +type Format string + +// IsTable returns true if the format is a table-type format +func (f Format) IsTable() bool { + return strings.HasPrefix(string(f), TableFormatKey) +} + +// Contains returns true if the format contains the substring +func (f Format) Contains(sub string) bool { + return strings.Contains(string(f), sub) +} + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format Format + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + finalFormat string + header interface{} + buffer *bytes.Buffer +} + +func (c *Context) preFormat() { + c.finalFormat = string(c.Format) + + // TODO: handle this in the Format type + if c.Format.IsTable() { + c.finalFormat = c.finalFormat[len(TableFormatKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + return tmpl, errors.Errorf("Template parsing error: %v\n", err) + } + return tmpl, err +} + +func (c *Context) postFormat(tmpl *template.Template, subContext SubContext) { + if c.Format.IsTable() { + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + buffer := bytes.NewBufferString("") + tmpl.Funcs(templates.HeaderFunctions).Execute(buffer, subContext.FullHeader()) + buffer.WriteTo(t) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext SubContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + return errors.Errorf("Template parsing error: %v\n", err) + } + if c.Format.IsTable() && c.header != nil { + c.header = subContext.FullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// SubFormat is a function type accepted by Write() +type SubFormat func(func(SubContext) error) error + +// Write the template to the buffer using this Context +func (c *Context) Write(sub SubContext, f SubFormat) error { + c.buffer = bytes.NewBufferString("") + c.preFormat() + + tmpl, err := c.parseFormat() + if err != nil { + return err + } + + subFormat := func(subContext SubContext) error { + return c.contextFormat(tmpl, subContext) + } + if err := f(subFormat); err != nil { + return err + } + + c.postFormat(tmpl, sub) + return nil +} diff --git a/cli/cli/command/formatter/image.go b/cli/cli/command/formatter/image.go new file mode 100644 index 00000000..91893e8f --- /dev/null +++ b/cli/cli/command/formatter/image.go @@ -0,0 +1,272 @@ +package formatter + +import ( + "fmt" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" + + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +// ImageContext contains image specific information required by the formatter, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool +} + +func isDangling(image types.ImageSummary) bool { + return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" +} + +// NewImageFormat returns a format for rendering an ImageContext +func NewImageFormat(source string, quiet bool, digest bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return DefaultQuietFormat + case digest: + return defaultImageTableFormatWithDigest + default: + return defaultImageTableFormat + } + case RawFormatKey: + switch { + case quiet: + return `image_id: {{.ID}}` + case digest: + return `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + default: + return `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + + format := Format(source) + if format.IsTable() && digest && !format.Contains("{{.Digest}}") { + format += "\t{{.Digest}}" + } + return format +} + +// ImageWrite writes the formatter images using the ImageContext +func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { + render := func(format func(subContext SubContext) error) error { + return imageFormat(ctx, images, format) + } + return ctx.Write(newImageContext(), render) +} + +// needDigest determines whether the image digest should be ignored or not when writing image context +func needDigest(ctx ImageContext) bool { + return ctx.Digest || ctx.Format.Contains("{{.Digest}}") +} + +func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext SubContext) error) error { + for _, image := range images { + formatted := []*imageContext{} + if isDangling(image) { + formatted = append(formatted, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: "", + tag: "", + digest: "", + }) + } else { + formatted = imageFormatTaggedAndDigest(ctx, image) + } + for _, imageCtx := range formatted { + if err := format(imageCtx); err != nil { + return err + } + } + } + return nil +} + +func imageFormatTaggedAndDigest(ctx ImageContext, image types.ImageSummary) []*imageContext { + repoTags := map[string][]string{} + repoDigests := map[string][]string{} + images := []*imageContext{} + + for _, refString := range image.RepoTags { + ref, err := reference.ParseNormalizedNamed(refString) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + familiarRef := reference.FamiliarName(ref) + repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag()) + } + } + for _, refString := range image.RepoDigests { + ref, err := reference.ParseNormalizedNamed(refString) + if err != nil { + continue + } + if c, ok := ref.(reference.Canonical); ok { + familiarRef := reference.FamiliarName(ref) + repoDigests[familiarRef] = append(repoDigests[familiarRef], c.Digest().String()) + } + } + + addImage := func(repo, tag, digest string) { + image := &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: digest, + } + images = append(images, image) + } + + for repo, tags := range repoTags { + digests := repoDigests[repo] + + // Do not display digests as their own row + delete(repoDigests, repo) + + if !needDigest(ctx) { + // Ignore digest references, just show tag once + digests = nil + } + + for _, tag := range tags { + if len(digests) == 0 { + addImage(repo, tag, "") + continue + } + // Display the digests for each tag + for _, dgst := range digests { + addImage(repo, tag, dgst) + } + + } + } + + // Show rows for remaining digest only references + for repo, digests := range repoDigests { + // If digests are displayed, show row per digest + if ctx.Digest { + for _, dgst := range digests { + addImage(repo, "", dgst) + } + } else { + addImage(repo, "", "") + + } + } + return images +} + +type imageContext struct { + HeaderContext + trunc bool + i types.ImageSummary + repo string + tag string + digest string +} + +func newImageContext() *imageContext { + imageCtx := imageContext{} + imageCtx.Header = SubHeaderContext{ + "ID": imageIDHeader, + "Repository": repositoryHeader, + "Tag": tagHeader, + "Digest": digestHeader, + "CreatedSince": CreatedSinceHeader, + "CreatedAt": CreatedAtHeader, + "Size": SizeHeader, + "Containers": containersHeader, + "VirtualSize": SizeHeader, + "SharedSize": sharedSizeHeader, + "UniqueSize": uniqueSizeHeader, + } + return &imageCtx +} + +func (c *imageContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *imageContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + return c.repo +} + +func (c *imageContext) Tag() string { + return c.tag +} + +func (c *imageContext) Digest() string { + return c.digest +} + +func (c *imageContext) CreatedSince() string { + createdAt := time.Unix(c.i.Created, 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" +} + +func (c *imageContext) CreatedAt() string { + return time.Unix(c.i.Created, 0).String() +} + +func (c *imageContext) Size() string { + return units.HumanSizeWithPrecision(float64(c.i.Size), 3) +} + +func (c *imageContext) Containers() string { + if c.i.Containers == -1 { + return "N/A" + } + return fmt.Sprintf("%d", c.i.Containers) +} + +func (c *imageContext) VirtualSize() string { + return units.HumanSize(float64(c.i.VirtualSize)) +} + +func (c *imageContext) SharedSize() string { + if c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.SharedSize)) +} + +func (c *imageContext) UniqueSize() string { + if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) +} diff --git a/cli/cli/command/formatter/image_test.go b/cli/cli/command/formatter/image_test.go new file mode 100644 index 00000000..7efad0a7 --- /dev/null +++ b/cli/cli/command/formatter/image_test.go @@ -0,0 +1,356 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestImageContext(t *testing.T) { + imageID := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx imageContext + cases := []struct { + imageCtx imageContext + expValue string + call func() string + }{ + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: true, + }, stringid.TruncateID(imageID), ctx.ID}, + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: false, + }, imageID, ctx.ID}, + {imageContext{ + i: types.ImageSummary{Size: 10, VirtualSize: 10}, + trunc: true, + }, "10B", ctx.Size}, + {imageContext{ + i: types.ImageSummary{Created: unix}, + trunc: true, + }, time.Unix(unix, 0).String(), ctx.CreatedAt}, + // FIXME + // {imageContext{ + // i: types.ImageSummary{Created: unix}, + // trunc: true, + // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, + {imageContext{ + i: types.ImageSummary{}, + repo: "busybox", + }, "busybox", ctx.Repository}, + {imageContext{ + i: types.ImageSummary{}, + tag: "latest", + }, "latest", ctx.Tag}, + {imageContext{ + i: types.ImageSummary{}, + digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", + }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", ctx.Digest}, + { + imageContext{ + i: types.ImageSummary{Containers: 10}, + }, "10", ctx.Containers, + }, + { + imageContext{ + i: types.ImageSummary{VirtualSize: 10000}, + }, "10kB", ctx.VirtualSize, + }, + { + imageContext{ + i: types.ImageSummary{SharedSize: 10000}, + }, "10kB", ctx.SharedSize, + }, + { + imageContext{ + i: types.ImageSummary{SharedSize: 5000, VirtualSize: 20000}, + }, "15kB", ctx.UniqueSize, + }, + } + + for _, c := range cases { + ctx = c.imageCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else { + assert.Check(t, is.Equal(c.expValue, v)) + } + } +} + +func TestImageContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context ImageContext + expected string + }{ + // Errors + { + ImageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + ImageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, false), + }, + }, + `REPOSITORY TAG IMAGE ID CREATED SIZE +image tag1 imageID1 24 hours ago 0B +image tag2 imageID2 24 hours ago 0B + imageID3 24 hours ago 0B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + }, + Digest: true, + }, + `REPOSITORY DIGEST +image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image + +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", true, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Digest}}", true, false), + }, + }, + "DIGEST\nsha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf\n\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, false), + }, + }, + "imageID1\nimageID2\nimageID3\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, true), + }, + Digest: true, + }, + `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0B +image tag2 imageID2 24 hours ago 0B + imageID3 24 hours ago 0B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, true), + }, + Digest: true, + }, + "imageID1\nimageID2\nimageID3\n", + }, + // Raw Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, false), + }, + }, + fmt.Sprintf(`repository: image +tag: tag1 +image_id: imageID1 +created_at: %s +virtual_size: 0B + +repository: image +tag: tag2 +image_id: imageID2 +created_at: %s +virtual_size: 0B + +repository: +tag: +image_id: imageID3 +created_at: %s +virtual_size: 0B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, true), + }, + Digest: true, + }, + fmt.Sprintf(`repository: image +tag: tag1 +digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image_id: imageID1 +created_at: %s +virtual_size: 0B + +repository: image +tag: tag2 +digest: +image_id: imageID2 +created_at: %s +virtual_size: 0B + +repository: +tag: +digest: +image_id: imageID3 +created_at: %s +virtual_size: 0B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", true, false), + }, + }, + `image_id: imageID1 +image_id: imageID2 +image_id: imageID3 +`, + }, + // Custom Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + }, + }, + "image\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + }, + Digest: true, + }, + "image\nimage\n\n", + }, + } + + for _, testcase := range cases { + images := []types.ImageSummary{ + {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, + {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, + {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ImageWrite(testcase.context, images) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestImageContextWriteWithNoImage(t *testing.T) { + out := bytes.NewBufferString("") + images := []types.ImageSummary{} + + contexts := []struct { + context ImageContext + expected string + }{ + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + Output: out, + }, + }, + "REPOSITORY\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + Output: out, + }, + }, + "REPOSITORY DIGEST\n", + }, + } + + for _, context := range contexts { + ImageWrite(context.context, images) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} diff --git a/cli/cli/command/formatter/reflect.go b/cli/cli/command/formatter/reflect.go new file mode 100644 index 00000000..9d153bc9 --- /dev/null +++ b/cli/cli/command/formatter/reflect.go @@ -0,0 +1,68 @@ +package formatter + +import ( + "encoding/json" + "reflect" + "unicode" + + "github.com/pkg/errors" +) + +// MarshalJSON marshals x into json +// It differs a bit from encoding/json MarshalJSON function for formatter +func MarshalJSON(x interface{}) ([]byte, error) { + m, err := marshalMap(x) + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// marshalMap marshals x to map[string]interface{} +func marshalMap(x interface{}) (map[string]interface{}, error) { + val := reflect.ValueOf(x) + if val.Kind() != reflect.Ptr { + return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind()) + } + if val.IsNil() { + return nil, errors.Errorf("expected a pointer to a struct, got nil pointer") + } + valElem := val.Elem() + if valElem.Kind() != reflect.Struct { + return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + } + typ := val.Type() + m := make(map[string]interface{}) + for i := 0; i < val.NumMethod(); i++ { + k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) + if err != nil { + return nil, err + } + if k != "" { + m[k] = v + } + } + return m, nil +} + +var unmarshallableNames = map[string]struct{}{"FullHeader": {}} + +// marshalForMethod returns the map key and the map value for marshalling the method. +// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") +func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { + if val.Kind() != reflect.Func { + return "", nil, errors.Errorf("expected func, got %v", val.Kind()) + } + name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() + _, blackListed := unmarshallableNames[name] + // FIXME: In text/template, (numOut == 2) is marshallable, + // if the type of the second param is error. + marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && + numIn == 0 && numOut == 1 + if !marshallable { + return "", nil, nil + } + result := val.Call(make([]reflect.Value, numIn)) + intf := result[0].Interface() + return name, intf, nil +} diff --git a/cli/cli/command/formatter/reflect_test.go b/cli/cli/command/formatter/reflect_test.go new file mode 100644 index 00000000..ffda51b8 --- /dev/null +++ b/cli/cli/command/formatter/reflect_test.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "reflect" + "testing" +) + +type dummy struct { +} + +func (d *dummy) Func1() string { + return "Func1" +} + +func (d *dummy) func2() string { // nolint: unused + return "func2(should not be marshalled)" +} + +func (d *dummy) Func3() (string, int) { + return "Func3(should not be marshalled)", -42 +} + +func (d *dummy) Func4() int { + return 4 +} + +type dummyType string + +func (d *dummy) Func5() dummyType { + return dummyType("Func5") +} + +func (d *dummy) FullHeader() string { + return "FullHeader(should not be marshalled)" +} + +var dummyExpected = map[string]interface{}{ + "Func1": "Func1", + "Func4": 4, + "Func5": dummyType("Func5"), +} + +func TestMarshalMap(t *testing.T) { + d := dummy{} + m, err := marshalMap(&d) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(dummyExpected, m) { + t.Fatalf("expected %+v, got %+v", + dummyExpected, m) + } +} + +func TestMarshalMapBad(t *testing.T) { + if _, err := marshalMap(nil); err == nil { + t.Fatal("expected an error (argument is nil)") + } + if _, err := marshalMap(dummy{}); err == nil { + t.Fatal("expected an error (argument is non-pointer)") + } + x := 42 + if _, err := marshalMap(&x); err == nil { + t.Fatal("expected an error (argument is a pointer to non-struct)") + } +} diff --git a/cli/cli/command/formatter/testdata/container-context-write-special-headers.golden b/cli/cli/command/formatter/testdata/container-context-write-special-headers.golden new file mode 100644 index 00000000..3fe21c8e --- /dev/null +++ b/cli/cli/command/formatter/testdata/container-context-write-special-headers.golden @@ -0,0 +1,3 @@ +CONTAINER ID IMAGE CREATED/STATUS/ PORTS .NAMES STATUS +conta "ubuntu" 24 hours ago//.FOOBAR_BAZ +conta "ubuntu" 24 hours ago//.FOOBAR_BAR diff --git a/cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden b/cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden new file mode 100644 index 00000000..6f2d9a9b --- /dev/null +++ b/cli/cli/command/formatter/testdata/disk-usage-context-write-custom.golden @@ -0,0 +1,5 @@ +TYPE ACTIVE +Images 0 +Containers 0 +Local Volumes 0 +Build Cache 0 diff --git a/cli/cli/command/formatter/testdata/disk-usage-raw-format.golden b/cli/cli/command/formatter/testdata/disk-usage-raw-format.golden new file mode 100644 index 00000000..7b9d11eb --- /dev/null +++ b/cli/cli/command/formatter/testdata/disk-usage-raw-format.golden @@ -0,0 +1,24 @@ +type: Images +total: 0 +active: 0 +size: 0B +reclaimable: 0B + +type: Containers +total: 0 +active: 0 +size: 0B +reclaimable: 0B + +type: Local Volumes +total: 0 +active: 0 +size: 0B +reclaimable: 0B + +type: Build Cache +total: 0 +active: 0 +size: 0B +reclaimable: 0B + diff --git a/cli/cli/command/formatter/volume.go b/cli/cli/command/formatter/volume.go new file mode 100644 index 00000000..67144591 --- /dev/null +++ b/cli/cli/command/formatter/volume.go @@ -0,0 +1,121 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultVolumeQuietFormat = "{{.Name}}" + defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" + + volumeNameHeader = "VOLUME NAME" + mountpointHeader = "MOUNTPOINT" + linksHeader = "LINKS" + // Status header ? +) + +// NewVolumeFormat returns a format for use with a volume Context +func NewVolumeFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultVolumeQuietFormat + } + return defaultVolumeTableFormat + case RawFormatKey: + if quiet { + return `name: {{.Name}}` + } + return `name: {{.Name}}\ndriver: {{.Driver}}\n` + } + return Format(source) +} + +// VolumeWrite writes formatted volumes using the Context +func VolumeWrite(ctx Context, volumes []*types.Volume) error { + render := func(format func(subContext SubContext) error) error { + for _, volume := range volumes { + if err := format(&volumeContext{v: *volume}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newVolumeContext(), render) +} + +type volumeContext struct { + HeaderContext + v types.Volume +} + +func newVolumeContext() *volumeContext { + volumeCtx := volumeContext{} + volumeCtx.Header = SubHeaderContext{ + "Name": volumeNameHeader, + "Driver": DriverHeader, + "Scope": ScopeHeader, + "Mountpoint": mountpointHeader, + "Labels": LabelsHeader, + "Links": linksHeader, + "Size": SizeHeader, + } + return &volumeCtx +} + +func (c *volumeContext) MarshalJSON() ([]byte, error) { + return MarshalJSON(c) +} + +func (c *volumeContext) Name() string { + return c.v.Name +} + +func (c *volumeContext) Driver() string { + return c.v.Driver +} + +func (c *volumeContext) Scope() string { + return c.v.Scope +} + +func (c *volumeContext) Mountpoint() string { + return c.v.Mountpoint +} + +func (c *volumeContext) Labels() string { + if c.v.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.v.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *volumeContext) Label(name string) string { + if c.v.Labels == nil { + return "" + } + return c.v.Labels[name] +} + +func (c *volumeContext) Links() string { + if c.v.UsageData == nil { + return "N/A" + } + return fmt.Sprintf("%d", c.v.UsageData.RefCount) +} + +func (c *volumeContext) Size() string { + if c.v.UsageData == nil { + return "N/A" + } + return units.HumanSize(float64(c.v.UsageData.Size)) +} diff --git a/cli/cli/command/formatter/volume_test.go b/cli/cli/command/formatter/volume_test.go new file mode 100644 index 00000000..43c6061d --- /dev/null +++ b/cli/cli/command/formatter/volume_test.go @@ -0,0 +1,183 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestVolumeContext(t *testing.T) { + volumeName := stringid.GenerateRandomID() + + var ctx volumeContext + cases := []struct { + volumeCtx volumeContext + expValue string + call func() string + }{ + {volumeContext{ + v: types.Volume{Name: volumeName}, + }, volumeName, ctx.Name}, + {volumeContext{ + v: types.Volume{Driver: "driver_name"}, + }, "driver_name", ctx.Driver}, + {volumeContext{ + v: types.Volume{Scope: "local"}, + }, "local", ctx.Scope}, + {volumeContext{ + v: types.Volume{Mountpoint: "mountpoint"}, + }, "mountpoint", ctx.Mountpoint}, + {volumeContext{ + v: types.Volume{}, + }, "", ctx.Labels}, + {volumeContext{ + v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", ctx.Labels}, + } + + for _, c := range cases { + ctx = c.volumeCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestVolumeContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewVolumeFormat("table", false)}, + `DRIVER VOLUME NAME +foo foobar_baz +bar foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table", true)}, + `foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", false)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", true)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewVolumeFormat("raw", false)}, + `name: foobar_baz +driver: foo + +name: foobar_bar +driver: bar + +`, + }, + { + Context{Format: NewVolumeFormat("raw", true)}, + `name: foobar_baz +name: foobar_bar +`, + }, + // Custom Format + { + Context{Format: NewVolumeFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + volumes := []*types.Volume{ + {Name: "foobar_baz", Driver: "foo"}, + {Name: "foobar_bar", Driver: "bar"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := VolumeWrite(testcase.context, volumes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestVolumeContextWriteJSON(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, + {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} + +func TestVolumeContextWriteJSONField(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(volumes[i].Name, s), msg) + } +} diff --git a/cli/cli/command/idresolver/client_test.go b/cli/cli/command/idresolver/client_test.go new file mode 100644 index 00000000..c53cfc6a --- /dev/null +++ b/cli/cli/command/idresolver/client_test.go @@ -0,0 +1,29 @@ +package idresolver + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + nodeInspectFunc func(string) (swarm.Node, []byte, error) + serviceInspectFunc func(string) (swarm.Service, []byte, error) +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if cli.nodeInspectFunc != nil { + return cli.nodeInspectFunc(nodeID) + } + return swarm.Node{}, []byte{}, nil +} + +func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if cli.serviceInspectFunc != nil { + return cli.serviceInspectFunc(serviceID) + } + return swarm.Service{}, []byte{}, nil +} diff --git a/cli/cli/command/idresolver/idresolver.go b/cli/cli/command/idresolver/idresolver.go new file mode 100644 index 00000000..3d1f71a0 --- /dev/null +++ b/cli/cli/command/idresolver/idresolver.go @@ -0,0 +1,70 @@ +package idresolver + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +// IDResolver provides ID to Name resolution. +type IDResolver struct { + client client.APIClient + noResolve bool + cache map[string]string +} + +// New creates a new IDResolver. +func New(client client.APIClient, noResolve bool) *IDResolver { + return &IDResolver{ + client: client, + noResolve: noResolve, + cache: make(map[string]string), + } +} + +func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { + switch t.(type) { + case swarm.Node: + node, _, err := r.client.NodeInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + if node.Spec.Annotations.Name != "" { + return node.Spec.Annotations.Name, nil + } + if node.Description.Hostname != "" { + return node.Description.Hostname, nil + } + return id, nil + case swarm.Service: + service, _, err := r.client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{}) + if err != nil { + return id, nil + } + return service.Spec.Annotations.Name, nil + default: + return "", errors.Errorf("unsupported type") + } + +} + +// Resolve will attempt to resolve an ID to a Name by querying the manager. +// Results are stored into a cache. +// If the `-n` flag is used in the command-line, resolution is disabled. +func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { + if r.noResolve { + return id, nil + } + if name, ok := r.cache[id]; ok { + return name, nil + } + name, err := r.get(ctx, t, id) + if err != nil { + return "", err + } + r.cache[id] = name + return name, nil +} diff --git a/cli/cli/command/idresolver/idresolver_test.go b/cli/cli/command/idresolver/idresolver_test.go new file mode 100644 index 00000000..f667b106 --- /dev/null +++ b/cli/cli/command/idresolver/idresolver_test.go @@ -0,0 +1,146 @@ +package idresolver + +import ( + "testing" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + // Import builders to get the builder function as package function + "context" + + . "github.com/docker/cli/internal/test/builders" + "github.com/pkg/errors" +) + +func TestResolveError(t *testing.T) { + cli := &fakeClient{ + nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") + }, + } + + idResolver := New(cli, false) + _, err := idResolver.Resolve(context.Background(), struct{}{}, "nodeID") + + assert.Error(t, err, "unsupported type") +} + +func TestResolveWithNoResolveOption(t *testing.T) { + resolved := false + cli := &fakeClient{ + nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { + resolved = true + return swarm.Node{}, []byte{}, nil + }, + serviceInspectFunc: func(serviceID string) (swarm.Service, []byte, error) { + resolved = true + return swarm.Service{}, []byte{}, nil + }, + } + + idResolver := New(cli, true) + id, err := idResolver.Resolve(context.Background(), swarm.Node{}, "nodeID") + + assert.NilError(t, err) + assert.Check(t, is.Equal("nodeID", id)) + assert.Check(t, !resolved) +} + +func TestResolveWithCache(t *testing.T) { + inspectCounter := 0 + cli := &fakeClient{ + nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) { + inspectCounter++ + return *Node(NodeName("node-foo")), []byte{}, nil + }, + } + + idResolver := New(cli, false) + + ctx := context.Background() + for i := 0; i < 2; i++ { + id, err := idResolver.Resolve(ctx, swarm.Node{}, "nodeID") + assert.NilError(t, err) + assert.Check(t, is.Equal("node-foo", id)) + } + + assert.Check(t, is.Equal(1, inspectCounter)) +} + +func TestResolveNode(t *testing.T) { + testCases := []struct { + nodeID string + nodeInspectFunc func(string) (swarm.Node, []byte, error) + expectedID string + }{ + { + nodeID: "nodeID", + nodeInspectFunc: func(string) (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") + }, + expectedID: "nodeID", + }, + { + nodeID: "nodeID", + nodeInspectFunc: func(string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-foo")), []byte{}, nil + }, + expectedID: "node-foo", + }, + { + nodeID: "nodeID", + nodeInspectFunc: func(string) (swarm.Node, []byte, error) { + return *Node(NodeName(""), Hostname("node-hostname")), []byte{}, nil + }, + expectedID: "node-hostname", + }, + } + + ctx := context.Background() + for _, tc := range testCases { + cli := &fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + } + idResolver := New(cli, false) + id, err := idResolver.Resolve(ctx, swarm.Node{}, tc.nodeID) + + assert.NilError(t, err) + assert.Check(t, is.Equal(tc.expectedID, id)) + } +} + +func TestResolveService(t *testing.T) { + testCases := []struct { + serviceID string + serviceInspectFunc func(string) (swarm.Service, []byte, error) + expectedID string + }{ + { + serviceID: "serviceID", + serviceInspectFunc: func(string) (swarm.Service, []byte, error) { + return swarm.Service{}, []byte{}, errors.Errorf("error inspecting service") + }, + expectedID: "serviceID", + }, + { + serviceID: "serviceID", + serviceInspectFunc: func(string) (swarm.Service, []byte, error) { + return *Service(ServiceName("service-foo")), []byte{}, nil + }, + expectedID: "service-foo", + }, + } + + ctx := context.Background() + for _, tc := range testCases { + cli := &fakeClient{ + serviceInspectFunc: tc.serviceInspectFunc, + } + idResolver := New(cli, false) + id, err := idResolver.Resolve(ctx, swarm.Service{}, tc.serviceID) + + assert.NilError(t, err) + assert.Check(t, is.Equal(tc.expectedID, id)) + } +} diff --git a/cli/cli/command/image/build.go b/cli/cli/command/image/build.go new file mode 100644 index 00000000..ef50a291 --- /dev/null +++ b/cli/cli/command/image/build.go @@ -0,0 +1,711 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + "github.com/docker/cli/opts" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile") + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + extraHosts opts.ListOpts + ulimits *opts.UlimitOpt + memory opts.MemBytes + memorySwap opts.MemSwapBytes + shmSize opts.MemBytes + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + progress string + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool + target string + imageIDFile string + stream bool + platform string + untrusted bool + secrets []string + ssh []string + outputs []string +} + +// dockerfileFromStdin returns true when the user specified that the Dockerfile +// should be read from stdin instead of a file +func (o buildOptions) dockerfileFromStdin() bool { + return o.dockerfileName == "-" +} + +// contextFromStdin returns true when the user specified that the build context +// should be read from stdin +func (o buildOptions) contextFromStdin() bool { + return o.context == "-" +} + +func newBuildOptions() buildOptions { + ulimits := make(map[string]*units.Ulimit) + return buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(opts.ValidateEnv), + ulimits: opts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(opts.ValidateLabel), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + } +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli command.Cli) *cobra.Command { + options := newBuildOptions() + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.SetAnnotation("compress", "no-buildkit", nil) + + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + flags.SetAnnotation("network", "version", []string{"1.25"}) + flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") + flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file") + + command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled()) + + flags.StringVar(&options.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") + // Platform is not experimental when BuildKit is used + buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo()) + if err == nil && buildkitEnabled { + flags.SetAnnotation("platform", "version", []string{"1.38"}) + } else { + flags.SetAnnotation("platform", "version", []string{"1.32"}) + flags.SetAnnotation("platform", "experimental", nil) + } + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + + flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context") + flags.SetAnnotation("stream", "experimental", nil) + flags.SetAnnotation("stream", "version", []string{"1.31"}) + flags.SetAnnotation("stream", "no-buildkit", nil) + + flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output") + flags.SetAnnotation("progress", "buildkit", nil) + + flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret") + flags.SetAnnotation("secret", "version", []string{"1.39"}) + flags.SetAnnotation("secret", "buildkit", nil) + + flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|[=|[,]])") + flags.SetAnnotation("ssh", "version", []string{"1.39"}) + flags.SetAnnotation("ssh", "buildkit", nil) + + flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)") + flags.SetAnnotation("output", "version", []string{"1.40"}) + flags.SetAnnotation("output", "buildkit", nil) + + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to suppress verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +// nolint: gocyclo +func runBuild(dockerCli command.Cli, options buildOptions) error { + buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo()) + if err != nil { + return err + } + if buildkitEnabled { + return runBuildBuildKit(dockerCli, options) + } + + var ( + buildCtx io.ReadCloser + dockerfileCtx io.ReadCloser + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + remote string + ) + + if options.compress && options.stream { + return errors.New("--compress conflicts with --stream options") + } + + if options.dockerfileFromStdin() { + if options.contextFromStdin() { + return errStdinConflict + } + dockerfileCtx = dockerCli.In() + } + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + if options.imageIDFile != "" { + // Avoid leaving a stale file if we eventually fail + if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "Removing image ID file") + } + } + + switch { + case options.contextFromStdin(): + // buildCtx is tar archive. if stdin was dockerfile then it is wrapped + buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case isLocalDir(specifiedContext): + contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + // Dockerfile is outside of build-context; read the Dockerfile and pass it as dockerfileCtx + dockerfileCtx, err = os.Open(options.dockerfileName) + if err != nil { + return errors.Errorf("unable to open Dockerfile: %v", err) + } + defer dockerfileCtx.Close() + } + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + return errors.Errorf("unable to prepare context: path %q not found", specifiedContext) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return errors.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + // read from a directory into tar archive + if buildCtx == nil && !options.stream { + excludes, err := build.ReadDockerignore(contextDir) + if err != nil { + return err + } + + if err := build.ValidateContextDirectory(contextDir, excludes); err != nil { + return errors.Errorf("error checking context: '%s'.", err) + } + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile = archive.CanonicalTarNameForPath(relDockerfile) + + excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin()) + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + ExcludePatterns: excludes, + ChownOpts: &idtools.Identity{UID: 0, GID: 0}, + }) + if err != nil { + return err + } + } + + // replace Dockerfile if it was added from stdin or a file outside the build-context, and there is archive context + if dockerfileCtx != nil && buildCtx != nil { + buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx) + if err != nil { + return err + } + } + + // if streaming and Dockerfile was not from stdin then read from file + // to the same reader that is usually stdin + if options.stream && dockerfileCtx == nil { + dockerfileCtx, err = os.Open(relDockerfile) + if err != nil { + return errors.Wrapf(err, "failed to open %s", relDockerfile) + } + defer dockerfileCtx.Close() + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var resolvedTags []*resolvedTag + if !options.untrusted { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // if there is a tar wrapper, the dockerfile needs to be replaced inside it + if buildCtx != nil { + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileForContentTrust(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } else if dockerfileCtx != nil { + // if there was not archive context still do the possible replacements in Dockerfile + newDockerfile, _, err := rewriteDockerfileFromForContentTrust(ctx, dockerfileCtx, translator) + if err != nil { + return err + } + dockerfileCtx = ioutil.NopCloser(bytes.NewBuffer(newDockerfile)) + } + } + + if options.compress { + buildCtx, err = build.Compress(buildCtx) + if err != nil { + return err + } + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewProgressOutput(progBuff) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + // if up to this point nothing has set the context then we must have another + // way for sending it(streaming) and set the context to the Dockerfile + if dockerfileCtx != nil && buildCtx == nil { + buildCtx = dockerfileCtx + } + + s, err := trySession(dockerCli, contextDir, true) + if err != nil { + return err + } + + var body io.Reader + if buildCtx != nil && !options.stream { + body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + } + + // add context stream to the session + if options.stream && s != nil { + syncDone := make(chan error) // used to signal first progress reporting completed. + // progress would also send errors but don't need it here as errors + // are handled by session.Run() and ImageBuild() + if err := addDirToSession(s, contextDir, progressOutput, syncDone); err != nil { + return err + } + + buf := newBufferedWriter(syncDone, buildBuff) + defer func() { + select { + case <-buf.flushed: + case <-ctx.Done(): + } + }() + buildBuff = buf + + remote = clientSessionRemote + body = buildCtx + } + + configFile := dockerCli.ConfigFile() + creds, _ := configFile.GetAllCredentials() + authConfigs := make(map[string]types.AuthConfig, len(creds)) + for k, auth := range creds { + authConfigs[k] = types.AuthConfig(auth) + } + buildOptions := imageBuildOptions(dockerCli, options) + buildOptions.Version = types.BuilderV1 + buildOptions.Dockerfile = relDockerfile + buildOptions.AuthConfigs = authConfigs + buildOptions.RemoteContext = remote + + if s != nil { + go func() { + logrus.Debugf("running session: %v", s.ID()) + dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + return dockerCli.Client().DialHijack(ctx, "/session", proto, meta) + } + if err := s.Run(ctx, dialSession); err != nil { + logrus.Error(err) + cancel() // cancel progress context + } + }() + buildOptions.SessionID = s.ID() + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + cancel() + return err + } + defer response.Body.Close() + + imageID := "" + aux := func(msg jsonmessage.JSONMessage) { + var result types.BuildResult + if err := json.Unmarshal(*msg.Aux, &result); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err) + } else { + imageID = result.ID + } + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+ + "image from Windows against a non-Windows Docker host. All files and "+ + "directories added to build context will have '-rwxr-xr-x' permissions. "+ + "It is recommended to double check and reset permissions for sensitive "+ + "files and directories.") + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + imageID = fmt.Sprintf("%s", buildBuff) + fmt.Fprintf(dockerCli.Out(), imageID) + } + + if options.imageIDFile != "" { + if imageID == "" { + return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile) + } + if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { + return err + } + } + if !options.untrusted { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +func isLocalDir(c string) bool { + _, err := os.Stat(c) + return err == nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNormalizedNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFromForContentTrust rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +// This should be called *only* when content trust is enabled +func rewriteDockerfileFromForContentTrust(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + var ref reference.Named + ref, err = reference.ParseNormalizedNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.TagNameOnly(ref) + if ref, ok := ref.(reference.NamedTagged); ok { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef))) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileForContentTrust wraps the given input tar archive stream and +// uses the translator to replace the Dockerfile which uses a trusted reference. +// Returns a new tar archive stream with the replaced Dockerfile. +func replaceDockerfileForContentTrust(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFromForContentTrust(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} + +func imageBuildOptions(dockerCli command.Cli, options buildOptions) types.ImageBuildOptions { + configFile := dockerCli.ConfigFile() + return types.ImageBuildOptions{ + Memory: options.memory.Value(), + MemorySwap: options.memorySwap.Value(), + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + ShmSize: options.shmSize.Value(), + Ulimits: options.ulimits.GetList(), + BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll())), + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + ExtraHosts: options.extraHosts.GetAll(), + Target: options.target, + Platform: options.platform, + } +} + +func parseOutputs(inp []string) ([]types.ImageBuildOutput, error) { + var outs []types.ImageBuildOutput + if len(inp) == 0 { + return nil, nil + } + for _, s := range inp { + csvReader := csv.NewReader(strings.NewReader(s)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") { + if s == "-" { + outs = append(outs, types.ImageBuildOutput{ + Type: "tar", + Attrs: map[string]string{ + "dest": s, + }, + }) + } else { + outs = append(outs, types.ImageBuildOutput{ + Type: "local", + Attrs: map[string]string{ + "dest": s, + }, + }) + } + continue + } + + out := types.ImageBuildOutput{ + Attrs: map[string]string{}, + } + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return nil, errors.Errorf("invalid value %s", field) + } + key := strings.ToLower(parts[0]) + value := parts[1] + switch key { + case "type": + out.Type = value + default: + out.Attrs[key] = value + } + } + if out.Type == "" { + return nil, errors.Errorf("type is required for output") + } + outs = append(outs, out) + } + return outs, nil +} diff --git a/cli/cli/command/image/build/context.go b/cli/cli/command/image/build/context.go new file mode 100644 index 00000000..e9af9bd3 --- /dev/null +++ b/cli/cli/command/image/build/context.go @@ -0,0 +1,440 @@ +package build + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" + // archiveHeaderSize is the number of bytes in an archive header + archiveHeaderSize = 512 +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + + pm, err := fileutils.NewPatternMatcher(excludes) + if err != nil { + return err + } + + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return errors.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return errors.Errorf("file ('%s') not found or excluded by .dockerignore", filePath) + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := filepathMatches(pm, relFilePath); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return errors.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +func filepathMatches(matcher *fileutils.PatternMatcher, file string) (bool, error) { + file = filepath.Clean(file) + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + return matcher.Matches(file) +} + +// DetectArchiveReader detects whether the input stream is an archive or a +// Dockerfile and returns a buffered version of input, safe to consume in lieu +// of input. If an archive is detected, isArchive is set to true, and to false +// otherwise, in which case it is safe to assume input represents the contents +// of a Dockerfile. +func DetectArchiveReader(input io.ReadCloser) (rc io.ReadCloser, isArchive bool, err error) { + buf := bufio.NewReader(input) + + magic, err := buf.Peek(archiveHeaderSize * 2) + if err != nil && err != io.EOF { + return nil, false, errors.Errorf("failed to peek context header from STDIN: %v", err) + } + + return ioutils.NewReadCloserWrapper(buf, func() error { return input.Close() }), IsArchive(magic), nil +} + +// WriteTempDockerfile writes a Dockerfile stream to a temporary file with a +// name specified by DefaultDockerfileName and returns the path to the +// temporary directory containing the Dockerfile. +func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) { + // err is a named return value, due to the defer call below. + dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-") + if err != nil { + return "", errors.Errorf("unable to create temporary context directory: %v", err) + } + defer func() { + if err != nil { + os.RemoveAll(dockerfileDir) + } + }() + + f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName)) + if err != nil { + return "", err + } + defer f.Close() + if _, err := io.Copy(f, rc); err != nil { + return "", err + } + return dockerfileDir, rc.Close() +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(rc io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + rc, isArchive, err := DetectArchiveReader(rc) + if err != nil { + return nil, "", err + } + + if isArchive { + return rc, dockerfileName, nil + } + + // Input should be read as a Dockerfile. + + if dockerfileName == "-" { + return nil, "", errors.New("build context is not an archive") + } + + dockerfileDir, err := WriteTempDockerfile(rc) + if err != nil { + return nil, "", err + } + + tar, err := archive.Tar(dockerfileDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(dockerfileDir) + return err + }), DefaultDockerfileName, nil +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := archive.DetectCompression(header) + if compression != archive.Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", errors.Wrapf(err, "unable to find 'git'") + } + absContextDir, err := git.Clone(gitURL) + if err != nil { + return "", "", errors.Wrapf(err, "unable to 'git clone' to temporary context directory") + } + + absContextDir, err = ResolveAndValidateContextPath(absContextDir) + if err != nil { + return "", "", err + } + relDockerfile, err := getDockerfileRelPath(absContextDir, dockerfileName) + if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", errors.Errorf("the Dockerfile (%s) must be within the build context", dockerfileName) + } + + return absContextDir, relDockerfile, err +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := getWithStatusError(remoteURL) + if err != nil { + return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewProgressOutput(out) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// getWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func getWithStatusError(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errors.Wrapf(err, msg+": error reading body") + } + return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body)) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (string, string, error) { + localDir, err := ResolveAndValidateContextPath(localDir) + if err != nil { + return "", "", err + } + + // When using a local context directory, and the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" && dockerfileName != "-" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + relDockerfile, err := getDockerfileRelPath(localDir, dockerfileName) + return localDir, relDockerfile, err +} + +// ResolveAndValidateContextPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory. +func ResolveAndValidateContextPath(givenContextDir string) (string, error) { + absContextDir, err := filepath.Abs(givenContextDir) + if err != nil { + return "", errors.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", errors.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", errors.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", errors.Errorf("context must be a directory: %s", absContextDir) + } + return absContextDir, err +} + +// getDockerfileRelPath returns the dockerfile path relative to the context +// directory +func getDockerfileRelPath(absContextDir, givenDockerfile string) (string, error) { + var err error + + if givenDockerfile == "-" { + return givenDockerfile, nil + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", errors.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", errors.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", errors.Errorf("unable to stat Dockerfile: %v", err) + } + + relDockerfile, err := filepath.Rel(absContextDir, absDockerfile) + if err != nil { + return "", errors.Errorf("unable to get relative Dockerfile path: %v", err) + } + + return relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} + +// AddDockerfileToBuildContext from a ReadCloser, returns a new archive and +// the relative path to the dockerfile in the context. +func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) { + file, err := ioutil.ReadAll(dockerfileCtx) + dockerfileCtx.Close() + if err != nil { + return nil, "", err + } + now := time.Now() + hdrTmpl := &tar.Header{ + Mode: 0600, + Uid: 0, + Gid: 0, + ModTime: now, + Typeflag: tar.TypeReg, + AccessTime: now, + ChangeTime: now, + } + randomName := ".dockerfile." + stringid.GenerateRandomID()[:20] + + buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{ + // Add the dockerfile with a random filename + randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + return hdrTmpl, file, nil + }, + // Update .dockerignore to include the random filename + ".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + if h == nil { + h = hdrTmpl + } + + b := &bytes.Buffer{} + if content != nil { + if _, err := b.ReadFrom(content); err != nil { + return nil, nil, err + } + } else { + b.WriteString(".dockerignore") + } + b.WriteString("\n" + randomName + "\n") + return h, b.Bytes(), nil + }, + }) + return buildCtx, randomName, nil +} + +// Compress the build context for sending to the API +func Compress(buildCtx io.ReadCloser) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + + go func() { + compressWriter, err := archive.CompressStream(pipeWriter, archive.Gzip) + if err != nil { + pipeWriter.CloseWithError(err) + } + defer buildCtx.Close() + + if _, err := pools.Copy(compressWriter, buildCtx); err != nil { + pipeWriter.CloseWithError( + errors.Wrap(err, "failed to compress context")) + compressWriter.Close() + return + } + compressWriter.Close() + pipeWriter.Close() + }() + + return pipeReader, nil +} diff --git a/cli/cli/command/image/build/context_test.go b/cli/cli/command/image/build/context_test.go new file mode 100644 index 00000000..de4d38f7 --- /dev/null +++ b/cli/cli/command/image/build/context_test.go @@ -0,0 +1,416 @@ +package build + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +const dockerfileContents = "FROM busybox" + +var prepareEmpty = func(t *testing.T) (string, func()) { + return "", func() {} +} + +var prepareNoFiles = func(t *testing.T) (string, func()) { + return createTestTempDir(t, "", "builder-context-test") +} + +var prepareOneFile = func(t *testing.T) (string, func()) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + return contextDir, cleanup +} + +func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { + contextDir, cleanup := prepare(t) + defer cleanup() + + err := ValidateContextDirectory(contextDir, excludes) + assert.NilError(t, err) +} + +func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + _, _, err := GetContextFromLocalDir(contextDir, "") + assert.ErrorContains(t, err, "Dockerfile") +} + +func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + _, _, err := GetContextFromLocalDir(fakePath, "") + assert.ErrorContains(t, err, "fake") +} + +func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + _, _, err := GetContextFromLocalDir(contextDir, fakePath) + assert.ErrorContains(t, err, "fake") +} + +func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { + contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") + defer dirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + assert.NilError(t, err) + + assert.Check(t, is.Equal(contextDir, absContextDir)) + assert.Check(t, is.Equal(DefaultDockerfileName, relDockerfile)) +} + +func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + assert.NilError(t, err) + + assert.Check(t, is.Equal(contextDir, absContextDir)) + assert.Check(t, is.Equal(DefaultDockerfileName, relDockerfile)) +} + +func TestGetContextFromLocalDirLocalFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) + assert.NilError(t, err) + + assert.Check(t, is.Equal(contextDir, absContextDir)) + assert.Check(t, is.Equal(DefaultDockerfileName, relDockerfile)) +} + +func TestGetContextFromReaderString(t *testing.T) { + tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + _, err = tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + assert.NilError(t, tarArchive.Close()) + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromReaderTar(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + assert.NilError(t, err) + + tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) + assert.NilError(t, err) + + tarReader := tar.NewReader(tarArchive) + + header, err := tarReader.Next() + assert.NilError(t, err) + + if header.Name != DefaultDockerfileName { + t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + assert.NilError(t, tarArchive.Close()) + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestValidateContextDirectoryEmptyContext(t *testing.T) { + // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. + // The test will ultimately end up calling filepath.Abs(""). On Windows, + // golang will error. On Linux, golang will return /. Due to there being + // drive letters on Windows, this is probably the correct behaviour for + // Windows. + if runtime.GOOS == "windows" { + t.Skip("Invalid test on Windows") + } + testValidateContextDirectory(t, prepareEmpty, []string{}) +} + +func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { + testValidateContextDirectory(t, prepareNoFiles, []string{}) +} + +func TestValidateContextDirectoryWithOneFile(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{}) +} + +func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) +} + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + assert.NilError(t, err) + return path, func() { assert.NilError(t, os.RemoveAll(path)) } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + assert.NilError(t, err) + return filePath +} + +// chdir changes current working directory to dir. +// It returns a function which changes working directory back to the previous one. +// This function is meant to be executed as a deferred call. +// When an error occurs, it terminates the test. +func chdir(t *testing.T, dir string) func() { + workingDirectory, err := os.Getwd() + assert.NilError(t, err) + assert.NilError(t, os.Chdir(dir)) + return func() { assert.NilError(t, os.Chdir(workingDirectory)) } +} + +func TestIsArchive(t *testing.T) { + var testcases = []struct { + doc string + header []byte + expected bool + }{ + { + doc: "nil is not a valid header", + header: nil, + expected: false, + }, + { + doc: "invalid header bytes", + header: []byte{0x00, 0x01, 0x02}, + expected: false, + }, + { + doc: "header for bzip2 archive", + header: []byte{0x42, 0x5A, 0x68}, + expected: true, + }, + { + doc: "header for 7zip archive is not supported", + header: []byte{0x50, 0x4b, 0x03, 0x04}, + expected: false, + }, + } + for _, testcase := range testcases { + assert.Check(t, is.Equal(testcase.expected, IsArchive(testcase.header)), testcase.doc) + } +} + +func TestDetectArchiveReader(t *testing.T) { + var testcases = []struct { + file string + desc string + expected bool + }{ + { + file: "../testdata/tar.test", + desc: "tar file without pax headers", + expected: true, + }, + { + file: "../testdata/gittar.test", + desc: "tar file with pax headers", + expected: true, + }, + { + file: "../testdata/Dockerfile.test", + desc: "not a tar file", + expected: false, + }, + } + for _, testcase := range testcases { + content, err := os.Open(testcase.file) + assert.NilError(t, err) + defer content.Close() + + _, isArchive, err := DetectArchiveReader(content) + assert.NilError(t, err) + assert.Check(t, is.Equal(testcase.expected, isArchive), testcase.file) + } +} + +func mustPatternMatcher(t *testing.T, patterns []string) *fileutils.PatternMatcher { + t.Helper() + pm, err := fileutils.NewPatternMatcher(patterns) + if err != nil { + t.Fatal("failed to construct pattern matcher: ", err) + } + return pm +} + +func TestWildcardMatches(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"*"}), "fileutils.go") + if !match { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"*.go"}), "fileutils.go") + if !match { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"!fileutils.go", "*.go"}), "fileutils.go") + if !match { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"docs", "!docs/README.md"}), "docs/README.md") + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"docs/", "!docs/README.md"}), "docs/README.md") + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"docs/*", "!docs/README.md"}), "docs/README.md") + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"*.go", "!fileutils.go"}), "fileutils.go") + if match { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := filepathMatches(mustPatternMatcher(t, []string{"*.go"}), ".") + if match { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := filepathMatches(mustPatternMatcher(t, []string{}), "/any/path/there") + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} diff --git a/cli/cli/command/image/build/context_unix.go b/cli/cli/command/image/build/context_unix.go new file mode 100644 index 00000000..cb2634f0 --- /dev/null +++ b/cli/cli/command/image/build/context_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package build + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/cli/cli/command/image/build/context_windows.go b/cli/cli/command/image/build/context_windows.go new file mode 100644 index 00000000..c577cfa7 --- /dev/null +++ b/cli/cli/command/image/build/context_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package build + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/cli/cli/command/image/build/dockerignore.go b/cli/cli/command/image/build/dockerignore.go new file mode 100644 index 00000000..497c3f24 --- /dev/null +++ b/cli/cli/command/image/build/dockerignore.go @@ -0,0 +1,39 @@ +package build + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// ReadDockerignore reads the .dockerignore file in the context directory and +// returns the list of paths to exclude +func ReadDockerignore(contextDir string) ([]string, error) { + var excludes []string + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + switch { + case os.IsNotExist(err): + return excludes, nil + case err != nil: + return nil, err + } + defer f.Close() + + return dockerignore.ReadAll(f) +} + +// TrimBuildFilesFromExcludes removes the named Dockerfile and .dockerignore from +// the list of excluded files. The daemon will remove them from the final context +// but they must be in available in the context when passed to the API. +func TrimBuildFilesFromExcludes(excludes []string, dockerfile string, dockerfileFromStdin bool) []string { + if keep, _ := fileutils.Matches(".dockerignore", excludes); keep { + excludes = append(excludes, "!.dockerignore") + } + if keep, _ := fileutils.Matches(dockerfile, excludes); keep && !dockerfileFromStdin { + excludes = append(excludes, "!"+dockerfile) + } + return excludes +} diff --git a/cli/cli/command/image/build_buildkit.go b/cli/cli/command/image/build_buildkit.go new file mode 100644 index 00000000..b3b978c5 --- /dev/null +++ b/cli/cli/command/image/build_buildkit.go @@ -0,0 +1,497 @@ +package image + +import ( + "bytes" + "context" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + + "github.com/containerd/console" + "github.com/containerd/containerd/platforms" + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/urlutil" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth/authprovider" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/session/secrets/secretsprovider" + "github.com/moby/buildkit/session/sshforward/sshprovider" + "github.com/moby/buildkit/util/appcontext" + "github.com/moby/buildkit/util/progress/progressui" + "github.com/pkg/errors" + fsutiltypes "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" +) + +const uploadRequestRemote = "upload-request" + +var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles") + +//nolint: gocyclo +func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { + ctx := appcontext.Context() + + s, err := trySession(dockerCli, options.context, false) + if err != nil { + return err + } + if s == nil { + return errors.Errorf("buildkit not supported by daemon") + } + + if options.imageIDFile != "" { + // Avoid leaving a stale file if we eventually fail + if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "removing image ID file") + } + } + + var ( + remote string + body io.Reader + dockerfileName = options.dockerfileName + dockerfileReader io.ReadCloser + dockerfileDir string + contextDir string + ) + + stdoutUsed := false + + switch { + case options.contextFromStdin(): + if options.dockerfileFromStdin() { + return errStdinConflict + } + rc, isArchive, err := build.DetectArchiveReader(os.Stdin) + if err != nil { + return err + } + if isArchive { + body = rc + remote = uploadRequestRemote + } else { + if options.dockerfileName != "" { + return errDockerfileConflict + } + dockerfileReader = rc + remote = clientSessionRemote + // TODO: make fssync handle empty contextdir + contextDir, _ = ioutil.TempDir("", "empty-dir") + defer os.RemoveAll(contextDir) + } + case isLocalDir(options.context): + contextDir = options.context + if options.dockerfileFromStdin() { + dockerfileReader = os.Stdin + } else if options.dockerfileName != "" { + dockerfileName = filepath.Base(options.dockerfileName) + dockerfileDir = filepath.Dir(options.dockerfileName) + } else { + dockerfileDir = options.context + } + remote = clientSessionRemote + case urlutil.IsGitURL(options.context): + remote = options.context + case urlutil.IsURL(options.context): + remote = options.context + default: + return errors.Errorf("unable to prepare context: path %q not found", options.context) + } + + if dockerfileReader != nil { + dockerfileName = build.DefaultDockerfileName + dockerfileDir, err = build.WriteTempDockerfile(dockerfileReader) + if err != nil { + return err + } + defer os.RemoveAll(dockerfileDir) + } + + outputs, err := parseOutputs(options.outputs) + if err != nil { + return errors.Wrapf(err, "failed to parse outputs") + } + + for _, out := range outputs { + switch out.Type { + case "local": + // dest is handled on client side for local exporter + outDir, ok := out.Attrs["dest"] + if !ok { + return errors.Errorf("dest is required for local output") + } + delete(out.Attrs, "dest") + s.Allow(filesync.NewFSSyncTargetDir(outDir)) + case "tar": + // dest is handled on client side for tar exporter + outFile, ok := out.Attrs["dest"] + if !ok { + return errors.Errorf("dest is required for tar output") + } + var w io.WriteCloser + if outFile == "-" { + if _, err := console.ConsoleFromFile(os.Stdout); err == nil { + return errors.Errorf("refusing to write output to console") + } + w = os.Stdout + stdoutUsed = true + } else { + f, err := os.Create(outFile) + if err != nil { + return errors.Wrapf(err, "failed to open %s", outFile) + } + w = f + } + output := func(map[string]string) (io.WriteCloser, error) { return w, nil } + s.Allow(filesync.NewFSSyncTarget(output)) + } + } + + if dockerfileDir != "" { + s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{ + { + Name: "context", + Dir: contextDir, + Map: resetUIDAndGID, + }, + { + Name: "dockerfile", + Dir: dockerfileDir, + }, + })) + } + + s.Allow(authprovider.NewDockerAuthProvider(os.Stderr)) + if len(options.secrets) > 0 { + sp, err := parseSecretSpecs(options.secrets) + if err != nil { + return errors.Wrapf(err, "could not parse secrets: %v", options.secrets) + } + s.Allow(sp) + } + if len(options.ssh) > 0 { + sshp, err := parseSSHSpecs(options.ssh) + if err != nil { + return errors.Wrapf(err, "could not parse ssh: %v", options.ssh) + } + s.Allow(sshp) + } + + eg, ctx := errgroup.WithContext(ctx) + + dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + return dockerCli.Client().DialHijack(ctx, "/session", proto, meta) + } + eg.Go(func() error { + return s.Run(context.TODO(), dialSession) + }) + + buildID := stringid.GenerateRandomID() + if body != nil { + eg.Go(func() error { + buildOptions := types.ImageBuildOptions{ + Version: types.BuilderBuildKit, + BuildID: uploadRequestRemote + ":" + buildID, + } + + response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions) + if err != nil { + return err + } + defer response.Body.Close() + return nil + }) + } + + if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && options.progress == "auto" { + options.progress = v + } + + if strings.EqualFold(options.platform, "local") { + options.platform = platforms.DefaultString() + } + + eg.Go(func() error { + defer func() { // make sure the Status ends cleanly on build errors + s.Close() + }() + + buildOptions := imageBuildOptions(dockerCli, options) + buildOptions.Version = types.BuilderBuildKit + buildOptions.Dockerfile = dockerfileName + //buildOptions.AuthConfigs = authConfigs // handled by session + buildOptions.RemoteContext = remote + buildOptions.SessionID = s.ID() + buildOptions.BuildID = buildID + buildOptions.Outputs = outputs + return doBuild(ctx, eg, dockerCli, stdoutUsed, options, buildOptions) + }) + + return eg.Wait() +} + +//nolint: gocyclo +func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, stdoutUsed bool, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) { + response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions) + if err != nil { + return err + } + defer response.Body.Close() + + done := make(chan struct{}) + defer close(done) + eg.Go(func() error { + select { + case <-ctx.Done(): + return dockerCli.Client().BuildCancel(context.TODO(), buildOptions.BuildID) + case <-done: + } + return nil + }) + + t := newTracer() + ssArr := []*client.SolveStatus{} + + if err := opts.ValidateProgressOutput(options.progress); err != nil { + return err + } + + displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) { + var c console.Console + // TODO: Handle tty output in non-tty environment. + if cons, err := console.ConsoleFromFile(out); err == nil && (options.progress == "auto" || options.progress == "tty") { + c = cons + } + // not using shared context to not disrupt display but let is finish reporting errors + eg.Go(func() error { + return progressui.DisplaySolveStatus(context.TODO(), "", c, out, displayCh) + }) + } + + if options.quiet { + eg.Go(func() error { + // TODO: make sure t.displayCh closes + for ss := range t.displayCh { + ssArr = append(ssArr, ss) + } + <-done + // TODO: verify that finalErr is indeed set when error occurs + if finalErr != nil { + displayCh := make(chan *client.SolveStatus) + go func() { + for _, ss := range ssArr { + displayCh <- ss + } + close(displayCh) + }() + displayStatus(os.Stderr, displayCh) + } + return nil + }) + } else { + displayStatus(os.Stderr, t.displayCh) + } + defer close(t.displayCh) + + buf := bytes.NewBuffer(nil) + + imageID := "" + writeAux := func(msg jsonmessage.JSONMessage) { + if msg.ID == "moby.image.id" { + var result types.BuildResult + if err := json.Unmarshal(*msg.Aux, &result); err != nil { + fmt.Fprintf(dockerCli.Err(), "failed to parse aux message: %v", err) + } + imageID = result.ID + return + } + t.write(msg) + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buf, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), writeAux) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + // + // TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1) + // instead of assuming that output is image ID if options.quiet. + if options.quiet && !stdoutUsed { + imageID = buf.String() + fmt.Fprint(dockerCli.Out(), imageID) + } + + if options.imageIDFile != "" { + if imageID == "" { + return errors.Errorf("cannot write %s because server did not provide an image ID", options.imageIDFile) + } + imageID = strings.TrimSpace(imageID) + if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { + return errors.Wrap(err, "cannot write image ID file") + } + } + return err +} + +func resetUIDAndGID(_ string, s *fsutiltypes.Stat) bool { + s.Uid = 0 + s.Gid = 0 + return true +} + +type tracer struct { + displayCh chan *client.SolveStatus +} + +func newTracer() *tracer { + return &tracer{ + displayCh: make(chan *client.SolveStatus), + } +} + +func (t *tracer) write(msg jsonmessage.JSONMessage) { + var resp controlapi.StatusResponse + + if msg.ID != "moby.buildkit.trace" { + return + } + + var dt []byte + // ignoring all messages that are not understood + if err := json.Unmarshal(*msg.Aux, &dt); err != nil { + return + } + if err := (&resp).Unmarshal(dt); err != nil { + return + } + + s := client.SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &client.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &client.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &client.VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + + t.displayCh <- &s +} + +func parseSecretSpecs(sl []string) (session.Attachable, error) { + fs := make([]secretsprovider.FileSource, 0, len(sl)) + for _, v := range sl { + s, err := parseSecret(v) + if err != nil { + return nil, err + } + fs = append(fs, *s) + } + store, err := secretsprovider.NewFileStore(fs) + if err != nil { + return nil, err + } + return secretsprovider.NewSecretProvider(store), nil +} + +func parseSecret(value string) (*secretsprovider.FileSource, error) { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return nil, errors.Wrap(err, "failed to parse csv secret") + } + + fs := secretsprovider.FileSource{} + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "type": + if value != "file" { + return nil, errors.Errorf("unsupported secret type %q", value) + } + case "id": + fs.ID = value + case "source", "src": + fs.FilePath = value + default: + return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + return &fs, nil +} + +func parseSSHSpecs(sl []string) (session.Attachable, error) { + configs := make([]sshprovider.AgentConfig, 0, len(sl)) + for _, v := range sl { + c, err := parseSSH(v) + if err != nil { + return nil, err + } + configs = append(configs, *c) + } + return sshprovider.NewSSHAgentProvider(configs) +} + +func parseSSH(value string) (*sshprovider.AgentConfig, error) { + parts := strings.SplitN(value, "=", 2) + cfg := sshprovider.AgentConfig{ + ID: parts[0], + } + if len(parts) > 1 { + cfg.Paths = strings.Split(parts[1], ",") + } + return &cfg, nil +} diff --git a/cli/cli/command/image/build_session.go b/cli/cli/command/image/build_session.go new file mode 100644 index 00000000..c1912075 --- /dev/null +++ b/cli/cli/command/image/build_session.go @@ -0,0 +1,161 @@ +package image + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/progress" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" + "golang.org/x/time/rate" +) + +const clientSessionRemote = "client-session" + +func isSessionSupported(dockerCli command.Cli, forStream bool) bool { + if !forStream && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") { + return true + } + return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31") +} + +func trySession(dockerCli command.Cli, contextDir string, forStream bool) (*session.Session, error) { + var s *session.Session + if isSessionSupported(dockerCli, forStream) { + sharedKey, err := getBuildSharedKey(contextDir) + if err != nil { + return nil, errors.Wrap(err, "failed to get build shared key") + } + s, err = session.NewSession(context.Background(), filepath.Base(contextDir), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create session") + } + } + return s, nil +} + +func addDirToSession(session *session.Session, contextDir string, progressOutput progress.Output, done chan error) error { + excludes, err := build.ReadDockerignore(contextDir) + if err != nil { + return err + } + + p := &sizeProgress{out: progressOutput, action: "Streaming build context to Docker daemon"} + + workdirProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ + {Dir: contextDir, Excludes: excludes}, + }) + session.Allow(workdirProvider) + + // this will be replaced on parallel build jobs. keep the current + // progressbar for now + if snpc, ok := workdirProvider.(interface { + SetNextProgressCallback(func(int, bool), chan error) + }); ok { + snpc.SetNextProgressCallback(p.update, done) + } + + return nil +} + +type sizeProgress struct { + out progress.Output + action string + limiter *rate.Limiter +} + +func (sp *sizeProgress) update(size int, last bool) { + if sp.limiter == nil { + sp.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + } + if last || sp.limiter.Allow() { + sp.out.WriteProgress(progress.Progress{Action: sp.action, Current: int64(size), LastUpdate: last}) + } +} + +type bufferedWriter struct { + done chan error + io.Writer + buf *bytes.Buffer + flushed chan struct{} + mu sync.Mutex +} + +func newBufferedWriter(done chan error, w io.Writer) *bufferedWriter { + bw := &bufferedWriter{done: done, Writer: w, buf: new(bytes.Buffer), flushed: make(chan struct{})} + go func() { + <-done + bw.flushBuffer() + }() + return bw +} + +func (bw *bufferedWriter) Write(dt []byte) (int, error) { + select { + case <-bw.done: + bw.flushBuffer() + return bw.Writer.Write(dt) + default: + return bw.buf.Write(dt) + } +} + +func (bw *bufferedWriter) flushBuffer() { + bw.mu.Lock() + select { + case <-bw.flushed: + default: + bw.Writer.Write(bw.buf.Bytes()) + close(bw.flushed) + } + bw.mu.Unlock() +} + +func (bw *bufferedWriter) String() string { + return fmt.Sprintf("%s", bw.Writer) +} + +func getBuildSharedKey(dir string) (string, error) { + // build session is hash of build dir with node based randomness + s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir))) + return hex.EncodeToString(s[:]), nil +} + +func tryNodeIdentifier() string { + out := cliconfig.Dir() // return config dir as default on permission error + if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil { + sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID") + if _, err := os.Lstat(sessionFile); err != nil { + if os.IsNotExist(err) { // create a new file with stored randomness + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return out + } + if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil { + return out + } + } + } + + dt, err := ioutil.ReadFile(sessionFile) + if err == nil { + return string(dt) + } + } + return out +} diff --git a/cli/cli/command/image/build_test.go b/cli/cli/command/image/build_test.go new file mode 100644 index 00000000..402aa3ae --- /dev/null +++ b/cli/cli/command/image/build_test.go @@ -0,0 +1,278 @@ +package image + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/cli/cli/streams" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/google/go-cmp/cmp" + "github.com/moby/buildkit/session/secrets/secretsprovider" + "gotest.tools/assert" + "gotest.tools/fs" + "gotest.tools/skip" +) + +func TestRunBuildDockerfileFromStdinWithCompress(t *testing.T) { + buffer := new(bytes.Buffer) + fakeBuild := newFakeBuild() + fakeImageBuild := func(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + tee := io.TeeReader(context, buffer) + gzipReader, err := gzip.NewReader(tee) + assert.NilError(t, err) + return fakeBuild.build(ctx, gzipReader, options) + } + + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeImageBuild}) + dockerfile := bytes.NewBufferString(` + FROM alpine:3.6 + COPY foo / + `) + cli.SetIn(streams.NewIn(ioutil.NopCloser(dockerfile))) + + dir := fs.NewDir(t, t.Name(), + fs.WithFile("foo", "some content")) + defer dir.Remove() + + options := newBuildOptions() + options.compress = true + options.dockerfileName = "-" + options.context = dir.Path() + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + expected := []string{fakeBuild.options.Dockerfile, ".dockerignore", "foo"} + assert.DeepEqual(t, expected, fakeBuild.filenames(t)) + + header := buffer.Bytes()[:10] + assert.Equal(t, archive.Gzip, archive.DetectCompression(header)) +} + +func TestRunBuildResetsUidAndGidInContext(t *testing.T) { + skip.If(t, os.Getuid() != 0, "root is required to chown files") + fakeBuild := newFakeBuild() + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build}) + + dir := fs.NewDir(t, "test-build-context", + fs.WithFile("foo", "some content", fs.AsUser(65534, 65534)), + fs.WithFile("Dockerfile", ` + FROM alpine:3.6 + COPY foo bar / + `), + ) + defer dir.Remove() + + options := newBuildOptions() + options.context = dir.Path() + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + headers := fakeBuild.headers(t) + expected := []*tar.Header{ + {Name: "Dockerfile"}, + {Name: "foo"}, + } + var cmpTarHeaderNameAndOwner = cmp.Comparer(func(x, y tar.Header) bool { + return x.Name == y.Name && x.Uid == y.Uid && x.Gid == y.Gid + }) + assert.DeepEqual(t, expected, headers, cmpTarHeaderNameAndOwner) +} + +func TestRunBuildDockerfileOutsideContext(t *testing.T) { + dir := fs.NewDir(t, t.Name(), + fs.WithFile("data", "data file")) + defer dir.Remove() + + // Dockerfile outside of build-context + df := fs.NewFile(t, t.Name(), + fs.WithContent(` +FROM FOOBAR +COPY data /data + `), + ) + defer df.Remove() + + fakeBuild := newFakeBuild() + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build}) + + options := newBuildOptions() + options.context = dir.Path() + options.dockerfileName = df.Path() + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + expected := []string{fakeBuild.options.Dockerfile, ".dockerignore", "data"} + assert.DeepEqual(t, expected, fakeBuild.filenames(t)) +} + +// TestRunBuildFromLocalGitHubDirNonExistingRepo tests that build contexts +// starting with `github.com/` are special-cased, and the build command attempts +// to clone the remote repo. +// TODO: test "context selection" logic directly when runBuild is refactored +// to support testing (ex: docker/cli#294) +func TestRunBuildFromGitHubSpecialCase(t *testing.T) { + cmd := NewBuildCommand(test.NewFakeCli(nil)) + // Clone a small repo that exists so git doesn't prompt for credentials + cmd.SetArgs([]string{"github.com/docker/for-win"}) + cmd.SetOutput(ioutil.Discard) + err := cmd.Execute() + assert.ErrorContains(t, err, "unable to prepare context") + assert.ErrorContains(t, err, "docker-build-git") +} + +// TestRunBuildFromLocalGitHubDirNonExistingRepo tests that a local directory +// starting with `github.com` takes precedence over the `github.com` special +// case. +func TestRunBuildFromLocalGitHubDir(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-build-from-local-dir-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + buildDir := filepath.Join(tmpDir, "github.com", "docker", "no-such-repository") + err = os.MkdirAll(buildDir, 0777) + assert.NilError(t, err) + err = ioutil.WriteFile(filepath.Join(buildDir, "Dockerfile"), []byte("FROM busybox\n"), 0644) + assert.NilError(t, err) + + client := test.NewFakeCli(&fakeClient{}) + cmd := NewBuildCommand(client) + cmd.SetArgs([]string{buildDir}) + cmd.SetOutput(ioutil.Discard) + err = cmd.Execute() + assert.NilError(t, err) +} + +func TestRunBuildWithSymlinkedContext(t *testing.T) { + dockerfile := ` +FROM alpine:3.6 +RUN echo hello world +` + + tmpDir := fs.NewDir(t, t.Name(), + fs.WithDir("context", + fs.WithFile("Dockerfile", dockerfile)), + fs.WithSymlink("context-link", "context")) + defer tmpDir.Remove() + + fakeBuild := newFakeBuild() + cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build}) + options := newBuildOptions() + options.context = tmpDir.Join("context-link") + options.untrusted = true + assert.NilError(t, runBuild(cli, options)) + + assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"}) +} + +func TestParseSecret(t *testing.T) { + type testcase struct { + value string + errExpected bool + errMatch string + filesource *secretsprovider.FileSource + } + var testcases = []testcase{ + { + value: "", + errExpected: true, + }, { + value: "foobar", + errExpected: true, + errMatch: "must be a key=value pair", + }, { + value: "foo,bar", + errExpected: true, + errMatch: "must be a key=value pair", + }, { + value: "foo=bar", + errExpected: true, + errMatch: "unexpected key", + }, { + value: "src=somefile", + filesource: &secretsprovider.FileSource{FilePath: "somefile"}, + }, { + value: "source=somefile", + filesource: &secretsprovider.FileSource{FilePath: "somefile"}, + }, { + value: "id=mysecret", + filesource: &secretsprovider.FileSource{ID: "mysecret"}, + }, { + value: "id=mysecret,src=somefile", + filesource: &secretsprovider.FileSource{ID: "mysecret", FilePath: "somefile"}, + }, { + value: "id=mysecret,source=somefile,type=file", + filesource: &secretsprovider.FileSource{ID: "mysecret", FilePath: "somefile"}, + }, { + value: "id=mysecret,src=somefile,src=othersecretfile", + filesource: &secretsprovider.FileSource{ID: "mysecret", FilePath: "othersecretfile"}, + }, { + value: "type=invalid", + errExpected: true, + errMatch: "unsupported secret type", + }, + } + + for _, tc := range testcases { + t.Run(tc.value, func(t *testing.T) { + secret, err := parseSecret(tc.value) + assert.Equal(t, err != nil, tc.errExpected, fmt.Sprintf("err=%v errExpected=%t", err, tc.errExpected)) + if tc.errMatch != "" { + assert.ErrorContains(t, err, tc.errMatch) + } + assert.DeepEqual(t, secret, tc.filesource) + }) + } +} + +type fakeBuild struct { + context *tar.Reader + options types.ImageBuildOptions +} + +func newFakeBuild() *fakeBuild { + return &fakeBuild{} +} + +func (f *fakeBuild) build(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + f.context = tar.NewReader(context) + f.options = options + body := new(bytes.Buffer) + return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil +} + +func (f *fakeBuild) headers(t *testing.T) []*tar.Header { + t.Helper() + headers := []*tar.Header{} + for { + hdr, err := f.context.Next() + switch err { + case io.EOF: + return headers + case nil: + headers = append(headers, hdr) + default: + assert.NilError(t, err) + } + } +} + +func (f *fakeBuild) filenames(t *testing.T) []string { + t.Helper() + names := []string{} + for _, header := range f.headers(t) { + names = append(names, header.Name) + } + sort.Strings(names) + return names +} diff --git a/cli/cli/command/image/client_test.go b/cli/cli/command/image/client_test.go new file mode 100644 index 00000000..50e46f4e --- /dev/null +++ b/cli/cli/command/image/client_test.go @@ -0,0 +1,124 @@ +package image + +import ( + "context" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + imageTagFunc func(string, string) error + imageSaveFunc func(images []string) (io.ReadCloser, error) + imageRemoveFunc func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + imagePushFunc func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) + infoFunc func() (types.Info, error) + imagePullFunc func(ref string, options types.ImagePullOptions) (io.ReadCloser, error) + imagesPruneFunc func(pruneFilter filters.Args) (types.ImagesPruneReport, error) + imageLoadFunc func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) + imageListFunc func(options types.ImageListOptions) ([]types.ImageSummary, error) + imageInspectFunc func(image string) (types.ImageInspect, []byte, error) + imageImportFunc func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + imageHistoryFunc func(image string) ([]image.HistoryResponseItem, error) + imageBuildFunc func(context.Context, io.Reader, types.ImageBuildOptions) (types.ImageBuildResponse, error) +} + +func (cli *fakeClient) ImageTag(_ context.Context, image, ref string) error { + if cli.imageTagFunc != nil { + return cli.imageTagFunc(image, ref) + } + return nil +} + +func (cli *fakeClient) ImageSave(_ context.Context, images []string) (io.ReadCloser, error) { + if cli.imageSaveFunc != nil { + return cli.imageSaveFunc(images) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) ImageRemove(_ context.Context, image string, + options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + if cli.imageRemoveFunc != nil { + return cli.imageRemoveFunc(image, options) + } + return []types.ImageDeleteResponseItem{}, nil +} + +func (cli *fakeClient) ImagePush(_ context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + if cli.imagePushFunc != nil { + return cli.imagePushFunc(ref, options) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) Info(_ context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func (cli *fakeClient) ImagePull(_ context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + if cli.imagePullFunc != nil { + cli.imagePullFunc(ref, options) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) ImagesPrune(_ context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) { + if cli.imagesPruneFunc != nil { + return cli.imagesPruneFunc(pruneFilter) + } + return types.ImagesPruneReport{}, nil +} + +func (cli *fakeClient) ImageLoad(_ context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + if cli.imageLoadFunc != nil { + return cli.imageLoadFunc(input, quiet) + } + return types.ImageLoadResponse{}, nil +} + +func (cli *fakeClient) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + if cli.imageListFunc != nil { + return cli.imageListFunc(options) + } + return []types.ImageSummary{{}}, nil +} + +func (cli *fakeClient) ImageInspectWithRaw(_ context.Context, image string) (types.ImageInspect, []byte, error) { + if cli.imageInspectFunc != nil { + return cli.imageInspectFunc(image) + } + return types.ImageInspect{}, nil, nil +} + +func (cli *fakeClient) ImageImport(_ context.Context, source types.ImageImportSource, ref string, + options types.ImageImportOptions) (io.ReadCloser, error) { + if cli.imageImportFunc != nil { + return cli.imageImportFunc(source, ref, options) + } + return ioutil.NopCloser(strings.NewReader("")), nil +} + +func (cli *fakeClient) ImageHistory(_ context.Context, img string) ([]image.HistoryResponseItem, error) { + if cli.imageHistoryFunc != nil { + return cli.imageHistoryFunc(img) + } + return []image.HistoryResponseItem{{ID: img, Created: time.Now().Unix()}}, nil +} + +func (cli *fakeClient) ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + if cli.imageBuildFunc != nil { + return cli.imageBuildFunc(ctx, context, options) + } + return types.ImageBuildResponse{Body: ioutil.NopCloser(strings.NewReader(""))}, nil +} diff --git a/cli/cli/command/image/cmd.go b/cli/cli/command/image/cmd.go new file mode 100644 index 00000000..a12bf339 --- /dev/null +++ b/cli/cli/command/image/cmd.go @@ -0,0 +1,33 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +func NewImageCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/image/formatter_history.go b/cli/cli/command/image/formatter_history.go new file mode 100644 index 00000000..723066a5 --- /dev/null +++ b/cli/cli/command/image/formatter_history.go @@ -0,0 +1,110 @@ +package image + +import ( + "strconv" + "strings" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + defaultHistoryTableFormat = "table {{.ID}}\t{{.CreatedSince}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}" + nonHumanHistoryTableFormat = "table {{.ID}}\t{{.CreatedAt}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}" + + historyIDHeader = "IMAGE" + createdByHeader = "CREATED BY" + commentHeader = "COMMENT" +) + +// NewHistoryFormat returns a format for rendering an HistoryContext +func NewHistoryFormat(source string, quiet bool, human bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + switch { + case quiet: + return formatter.DefaultQuietFormat + case !human: + return nonHumanHistoryTableFormat + default: + return defaultHistoryTableFormat + } + } + + return formatter.Format(source) +} + +// HistoryWrite writes the context +func HistoryWrite(ctx formatter.Context, human bool, histories []image.HistoryResponseItem) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, history := range histories { + historyCtx := &historyContext{trunc: ctx.Trunc, h: history, human: human} + if err := format(historyCtx); err != nil { + return err + } + } + return nil + } + historyCtx := &historyContext{} + historyCtx.Header = formatter.SubHeaderContext{ + "ID": historyIDHeader, + "CreatedSince": formatter.CreatedSinceHeader, + "CreatedAt": formatter.CreatedAtHeader, + "CreatedBy": createdByHeader, + "Size": formatter.SizeHeader, + "Comment": commentHeader, + } + return ctx.Write(historyCtx, render) +} + +type historyContext struct { + formatter.HeaderContext + trunc bool + human bool + h image.HistoryResponseItem +} + +func (c *historyContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *historyContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.h.ID) + } + return c.h.ID +} + +func (c *historyContext) CreatedAt() string { + return time.Unix(c.h.Created, 0).Format(time.RFC3339) +} + +func (c *historyContext) CreatedSince() string { + if !c.human { + return c.CreatedAt() + } + created := units.HumanDuration(time.Now().UTC().Sub(time.Unix(c.h.Created, 0))) + return created + " ago" +} + +func (c *historyContext) CreatedBy() string { + createdBy := strings.Replace(c.h.CreatedBy, "\t", " ", -1) + if c.trunc { + return formatter.Ellipsis(createdBy, 45) + } + return createdBy +} + +func (c *historyContext) Size() string { + if c.human { + return units.HumanSizeWithPrecision(float64(c.h.Size), 3) + } + return strconv.FormatInt(c.h.Size, 10) +} + +func (c *historyContext) Comment() string { + return c.h.Comment +} diff --git a/cli/cli/command/image/formatter_history_test.go b/cli/cli/command/image/formatter_history_test.go new file mode 100644 index 00000000..fe8e88a8 --- /dev/null +++ b/cli/cli/command/image/formatter_history_test.go @@ -0,0 +1,228 @@ +package image + +import ( + "bytes" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +type historyCase struct { + historyCtx historyContext + expValue string + call func() string +} + +func TestHistoryContext_ID(t *testing.T) { + id := stringid.GenerateRandomID() + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{ID: id}, + trunc: false, + }, id, ctx.ID, + }, + { + historyContext{ + h: image.HistoryResponseItem{ID: id}, + trunc: true, + }, stringid.TruncateID(id), ctx.ID, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_CreatedSince(t *testing.T) { + dateStr := "2009-11-10T23:00:00Z" + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{Created: time.Now().AddDate(0, 0, -7).Unix()}, + trunc: false, + human: true, + }, "7 days ago", ctx.CreatedSince, + }, + { + historyContext{ + h: image.HistoryResponseItem{Created: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()}, + trunc: false, + human: false, + }, dateStr, ctx.CreatedSince, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_CreatedBy(t *testing.T) { + withTabs := `/bin/sh -c apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 && echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list && apt-get update && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates nginx=${NGINX_VERSION} nginx-module-xslt nginx-module-geoip nginx-module-image-filter nginx-module-perl nginx-module-njs gettext-base && rm -rf /var/lib/apt/lists/*` // nolint: lll + expected := `/bin/sh -c apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 && echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list && apt-get update && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates nginx=${NGINX_VERSION} nginx-module-xslt nginx-module-geoip nginx-module-image-filter nginx-module-perl nginx-module-njs gettext-base && rm -rf /var/lib/apt/lists/*` // nolint: lll + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{CreatedBy: withTabs}, + trunc: false, + }, expected, ctx.CreatedBy, + }, + { + historyContext{ + h: image.HistoryResponseItem{CreatedBy: withTabs}, + trunc: true, + }, formatter.Ellipsis(expected, 45), ctx.CreatedBy, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_Size(t *testing.T) { + size := int64(182964289) + expected := "183MB" + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{Size: size}, + trunc: false, + human: true, + }, expected, ctx.Size, + }, { + historyContext{ + h: image.HistoryResponseItem{Size: size}, + trunc: false, + human: false, + }, strconv.Itoa(182964289), ctx.Size, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_Comment(t *testing.T) { + comment := "Some comment" + + var ctx historyContext + cases := []historyCase{ + { + historyContext{ + h: image.HistoryResponseItem{Comment: comment}, + trunc: false, + }, comment, ctx.Comment, + }, + } + + for _, c := range cases { + ctx = c.historyCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestHistoryContext_Table(t *testing.T) { + out := bytes.NewBufferString("") + unixTime := time.Now().AddDate(0, 0, -1).Unix() + histories := []image.HistoryResponseItem{ + { + ID: "imageID1", + Created: unixTime, + CreatedBy: "/bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on", + Size: int64(182964289), + Comment: "Hi", + Tags: []string{"image:tag2"}, + }, + {ID: "imageID2", Created: unixTime, CreatedBy: "/bin/bash echo", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}}, + {ID: "imageID3", Created: unixTime, CreatedBy: "/bin/bash ls", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}}, + {ID: "imageID4", Created: unixTime, CreatedBy: "/bin/bash grep", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}}, + } + // nolint: lll + expectedNoTrunc := `IMAGE CREATED CREATED BY SIZE COMMENT +imageID1 24 hours ago /bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on 183MB Hi +imageID2 24 hours ago /bin/bash echo 183MB Hi +imageID3 24 hours ago /bin/bash ls 183MB Hi +imageID4 24 hours ago /bin/bash grep 183MB Hi +` + expectedTrunc := `IMAGE CREATED CREATED BY SIZE COMMENT +imageID1 24 hours ago /bin/bash ls && npm i && npm run test && kar… 183MB Hi +imageID2 24 hours ago /bin/bash echo 183MB Hi +imageID3 24 hours ago /bin/bash ls 183MB Hi +imageID4 24 hours ago /bin/bash grep 183MB Hi +` + + contexts := []struct { + context formatter.Context + expected string + }{ + {formatter.Context{ + Format: NewHistoryFormat("table", false, true), + Trunc: true, + Output: out, + }, + expectedTrunc, + }, + {formatter.Context{ + Format: NewHistoryFormat("table", false, true), + Trunc: false, + Output: out, + }, + expectedNoTrunc, + }, + } + + for _, context := range contexts { + HistoryWrite(context.context, true, histories) + assert.Check(t, is.Equal(context.expected, out.String())) + // Clean buffer + out.Reset() + } +} diff --git a/cli/cli/command/image/history.go b/cli/cli/command/image/history.go new file mode 100644 index 00000000..f1559a37 --- /dev/null +++ b/cli/cli/command/image/history.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool + format string +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli command.Cli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runHistory(dockerCli command.Cli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + historyCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewHistoryFormat(format, opts.quiet, opts.human), + Trunc: !opts.noTrunc, + } + return HistoryWrite(historyCtx, opts.human, history) +} diff --git a/cli/cli/command/image/history_test.go b/cli/cli/command/image/history_test.go new file mode 100644 index 00000000..ad2beb9a --- /dev/null +++ b/cli/cli/command/image/history_test.go @@ -0,0 +1,105 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/image" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" + "gotest.tools/skip" +) + +func TestNewHistoryCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageHistoryFunc func(img string) ([]image.HistoryResponseItem, error) + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires exactly 1 argument.", + }, + { + name: "client-error", + args: []string{"image:tag"}, + expectedError: "something went wrong", + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{}}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewHistoryCommand(test.NewFakeCli(&fakeClient{imageHistoryFunc: tc.imageHistoryFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func notUTCTimezone() bool { + now := time.Now() + return now != now.UTC() +} + +func TestNewHistoryCommandSuccess(t *testing.T) { + skip.If(t, notUTCTimezone, "expected output requires UTC timezone") + testCases := []struct { + name string + args []string + imageHistoryFunc func(img string) ([]image.HistoryResponseItem, error) + }{ + { + name: "simple", + args: []string{"image:tag"}, + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{ + ID: "1234567890123456789", + Created: time.Now().Unix(), + }}, nil + }, + }, + { + name: "quiet", + args: []string{"--quiet", "image:tag"}, + }, + { + name: "non-human", + args: []string{"--human=false", "image:tag"}, + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{ + ID: "abcdef", + Created: time.Date(2017, 1, 1, 12, 0, 3, 0, time.UTC).Unix(), + CreatedBy: "rose", + Comment: "new history item!", + }}, nil + }, + }, + { + name: "quiet-no-trunc", + args: []string{"--quiet", "--no-trunc", "image:tag"}, + imageHistoryFunc: func(img string) ([]image.HistoryResponseItem, error) { + return []image.HistoryResponseItem{{ + ID: "1234567890123456789", + Created: time.Now().Unix(), + }}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageHistoryFunc: tc.imageHistoryFunc}) + cmd := NewHistoryCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + actual := cli.OutBuffer().String() + golden.Assert(t, actual, fmt.Sprintf("history-command-success.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/image/import.go b/cli/cli/command/image/import.go new file mode 100644 index 00000000..2d2b7efc --- /dev/null +++ b/cli/cli/command/image/import.go @@ -0,0 +1,90 @@ +package image + +import ( + "context" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + dockeropts "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string + platform string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli command.Cli) *cobra.Command { + var options importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.source = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runImport(dockerCli, options) + }, + } + + flags := cmd.Flags() + + options.changes = dockeropts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image") + command.AddPlatformFlag(flags, &options.platform) + + return cmd +} + +func runImport(dockerCli command.Cli, options importOptions) error { + var ( + in io.Reader + srcName = options.source + ) + + if options.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(options.source) { + srcName = "-" + file, err := os.Open(options.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + importOptions := types.ImageImportOptions{ + Message: options.message, + Changes: options.changes.GetAll(), + Platform: options.platform, + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/cli/cli/command/image/import_test.go b/cli/cli/command/image/import_test.go new file mode 100644 index 00000000..9e2fad61 --- /dev/null +++ b/cli/cli/command/image/import_test.go @@ -0,0 +1,97 @@ +package image + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNewImportCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageImportFunc func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + name: "import-failed", + args: []string{"testdata/import-command-success.input.txt"}, + expectedError: "something went wrong", + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + return nil, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewImportCommand(test.NewFakeCli(&fakeClient{imageImportFunc: tc.imageImportFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewImportCommandInvalidFile(t *testing.T) { + cmd := NewImportCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"testdata/import-command-success.unexistent-file"}) + assert.ErrorContains(t, cmd.Execute(), "testdata/import-command-success.unexistent-file") +} + +func TestNewImportCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageImportFunc func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + }{ + { + name: "simple", + args: []string{"testdata/import-command-success.input.txt"}, + }, + { + name: "terminal-source", + args: []string{"-"}, + }, + { + name: "double", + args: []string{"-", "image:local"}, + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal("image:local", ref)) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + { + name: "message", + args: []string{"--message", "test message", "-"}, + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal("test message", options.Message)) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + { + name: "change", + args: []string{"--change", "ENV DEBUG true", "-"}, + imageImportFunc: func(source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal("ENV DEBUG true", options.Changes[0])) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + } + for _, tc := range testCases { + cmd := NewImportCommand(test.NewFakeCli(&fakeClient{imageImportFunc: tc.imageImportFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + } +} diff --git a/cli/cli/command/image/inspect.go b/cli/cli/command/image/inspect.go new file mode 100644 index 00000000..2044fcaf --- /dev/null +++ b/cli/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/cli/cli/command/image/inspect_test.go b/cli/cli/command/image/inspect_test.go new file mode 100644 index 00000000..d881ae0a --- /dev/null +++ b/cli/cli/command/image/inspect_test.go @@ -0,0 +1,88 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewInspectCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewInspectCommandSuccess(t *testing.T) { + imageInspectInvocationCount := 0 + testCases := []struct { + name string + args []string + imageCount int + imageInspectFunc func(image string) (types.ImageInspect, []byte, error) + }{ + { + name: "simple", + args: []string{"image"}, + imageCount: 1, + imageInspectFunc: func(image string) (types.ImageInspect, []byte, error) { + imageInspectInvocationCount++ + assert.Check(t, is.Equal("image", image)) + return types.ImageInspect{}, nil, nil + }, + }, + { + name: "format", + imageCount: 1, + args: []string{"--format='{{.ID}}'", "image"}, + imageInspectFunc: func(image string) (types.ImageInspect, []byte, error) { + imageInspectInvocationCount++ + return types.ImageInspect{ID: image}, nil, nil + }, + }, + { + name: "simple-many", + args: []string{"image1", "image2"}, + imageCount: 2, + imageInspectFunc: func(image string) (types.ImageInspect, []byte, error) { + imageInspectInvocationCount++ + if imageInspectInvocationCount == 1 { + assert.Check(t, is.Equal("image1", image)) + } else { + assert.Check(t, is.Equal("image2", image)) + } + return types.ImageInspect{}, nil, nil + }, + }, + } + for _, tc := range testCases { + imageInspectInvocationCount = 0 + cli := test.NewFakeCli(&fakeClient{imageInspectFunc: tc.imageInspectFunc}) + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("inspect-command-success.%s.golden", tc.name)) + assert.Check(t, is.Equal(imageInspectInvocationCount, tc.imageCount)) + } +} diff --git a/cli/cli/command/image/list.go b/cli/cli/command/image/list.go new file mode 100644 index 00000000..2dd9786e --- /dev/null +++ b/cli/cli/command/image/list.go @@ -0,0 +1,96 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli command.Cli) *cobra.Command { + options := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + options.matchName = args[0] + } + return runImages(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&options.showDigests, "digests", false, "Show digests") + flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli command.Cli, options imagesOptions) error { + ctx := context.Background() + + filters := options.filter.Value() + if options.matchName != "" { + filters.Add("reference", options.matchName) + } + + listOptions := types.ImageListOptions{ + All: options.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, options.quiet, options.showDigests), + Trunc: !options.noTrunc, + }, + Digest: options.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/cli/cli/command/image/list_test.go b/cli/cli/command/image/list_test.go new file mode 100644 index 00000000..81394a79 --- /dev/null +++ b/cli/cli/command/image/list_test.go @@ -0,0 +1,98 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewImagesCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageListFunc func(options types.ImageListOptions) ([]types.ImageSummary, error) + }{ + { + name: "wrong-args", + args: []string{"arg1", "arg2"}, + expectedError: "requires at most 1 argument.", + }, + { + name: "failed-list", + expectedError: "something went wrong", + imageListFunc: func(options types.ImageListOptions) ([]types.ImageSummary, error) { + return []types.ImageSummary{{}}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewImagesCommand(test.NewFakeCli(&fakeClient{imageListFunc: tc.imageListFunc})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewImagesCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageFormat string + imageListFunc func(options types.ImageListOptions) ([]types.ImageSummary, error) + }{ + { + name: "simple", + }, + { + name: "format", + imageFormat: "raw", + }, + { + name: "quiet-format", + args: []string{"-q"}, + imageFormat: "table", + }, + { + name: "match-name", + args: []string{"image"}, + imageListFunc: func(options types.ImageListOptions) ([]types.ImageSummary, error) { + assert.Check(t, is.Equal("image", options.Filters.Get("reference")[0])) + return []types.ImageSummary{{}}, nil + }, + }, + { + name: "filters", + args: []string{"--filter", "name=value"}, + imageListFunc: func(options types.ImageListOptions) ([]types.ImageSummary, error) { + assert.Check(t, is.Equal("value", options.Filters.Get("name")[0])) + return []types.ImageSummary{{}}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageListFunc: tc.imageListFunc}) + cli.SetConfigFile(&configfile.ConfigFile{ImagesFormat: tc.imageFormat}) + cmd := NewImagesCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("list-command-success.%s.golden", tc.name)) + } +} + +func TestNewListCommandAlias(t *testing.T) { + cmd := newListCommand(test.NewFakeCli(&fakeClient{})) + assert.Check(t, cmd.HasAlias("images")) + assert.Check(t, cmd.HasAlias("list")) + assert.Check(t, !cmd.HasAlias("other")) +} diff --git a/cli/cli/command/image/load.go b/cli/cli/command/image/load.go new file mode 100644 index 00000000..6809c620 --- /dev/null +++ b/cli/cli/command/image/load.go @@ -0,0 +1,76 @@ +package image + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli command.Cli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli command.Cli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return errors.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/cli/cli/command/image/load_test.go b/cli/cli/command/image/load_test.go new file mode 100644 index 00000000..5fe4344f --- /dev/null +++ b/cli/cli/command/image/load_test.go @@ -0,0 +1,101 @@ +package image + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestNewLoadCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + isTerminalIn bool + expectedError string + imageLoadFunc func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) + }{ + { + name: "wrong-args", + args: []string{"arg"}, + expectedError: "accepts no arguments.", + }, + { + name: "input-to-terminal", + isTerminalIn: true, + expectedError: "requested load from stdin, but stdin is empty", + }, + { + name: "pull-error", + expectedError: "something went wrong", + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + return types.ImageLoadResponse{}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageLoadFunc: tc.imageLoadFunc}) + cli.In().SetIsTerminal(tc.isTerminalIn) + cmd := NewLoadCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewLoadCommandInvalidInput(t *testing.T) { + expectedError := "open *" + cmd := NewLoadCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"--input", "*"}) + err := cmd.Execute() + assert.ErrorContains(t, err, expectedError) +} + +func TestNewLoadCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageLoadFunc func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) + }{ + { + name: "simple", + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + return types.ImageLoadResponse{Body: ioutil.NopCloser(strings.NewReader("Success"))}, nil + }, + }, + { + name: "json", + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + json := "{\"ID\": \"1\"}" + return types.ImageLoadResponse{ + Body: ioutil.NopCloser(strings.NewReader(json)), + JSON: true, + }, nil + }, + }, + { + name: "input-file", + args: []string{"--input", "testdata/load-command-success.input.txt"}, + imageLoadFunc: func(input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + return types.ImageLoadResponse{Body: ioutil.NopCloser(strings.NewReader("Success"))}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageLoadFunc: tc.imageLoadFunc}) + cmd := NewLoadCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("load-command-success.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/image/prune.go b/cli/cli/command/image/prune.go new file mode 100644 index 00000000..bb16bbcf --- /dev/null +++ b/cli/cli/command/image/prune.go @@ -0,0 +1,101 @@ +package image + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := options.filter.Value().Clone() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all)) + pruneFilters = command.PruneFilters(dockerCli, pruneFilters) + + warning := danglingWarning + if options.all { + warning = allImageWarning + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return 0, "", nil + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return 0, "", err + } + + if len(report.ImagesDeleted) > 0 { + var sb strings.Builder + sb.WriteString("Deleted Images:\n") + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + sb.WriteString("untagged: ") + sb.WriteString(st.Untagged) + sb.WriteByte('\n') + } else { + sb.WriteString("deleted: ") + sb.WriteString(st.Deleted) + sb.WriteByte('\n') + } + } + output = sb.String() + spaceReclaimed = report.SpaceReclaimed + } + + return spaceReclaimed, output, nil +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) +} diff --git a/cli/cli/command/image/prune_test.go b/cli/cli/command/image/prune_test.go new file mode 100644 index 00000000..884a4854 --- /dev/null +++ b/cli/cli/command/image/prune_test.go @@ -0,0 +1,102 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewPruneCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imagesPruneFunc func(pruneFilter filters.Args) (types.ImagesPruneReport, error) + }{ + { + name: "wrong-args", + args: []string{"something"}, + expectedError: "accepts no arguments.", + }, + { + name: "prune-error", + args: []string{"--force"}, + expectedError: "something went wrong", + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + return types.ImagesPruneReport{}, errors.Errorf("something went wrong") + }, + }, + } + for _, tc := range testCases { + cmd := NewPruneCommand(test.NewFakeCli(&fakeClient{ + imagesPruneFunc: tc.imagesPruneFunc, + })) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewPruneCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imagesPruneFunc func(pruneFilter filters.Args) (types.ImagesPruneReport, error) + }{ + { + name: "all", + args: []string{"--all"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("false", pruneFilter.Get("dangling")[0])) + return types.ImagesPruneReport{}, nil + }, + }, + { + name: "force-deleted", + args: []string{"--force"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("true", pruneFilter.Get("dangling")[0])) + return types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{{Deleted: "image1"}}, + SpaceReclaimed: 1, + }, nil + }, + }, + { + name: "label-filter", + args: []string{"--force", "--filter", "label=foobar"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("foobar", pruneFilter.Get("label")[0])) + return types.ImagesPruneReport{}, nil + }, + }, + { + name: "force-untagged", + args: []string{"--force"}, + imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) { + assert.Check(t, is.Equal("true", pruneFilter.Get("dangling")[0])) + return types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{{Untagged: "image1"}}, + SpaceReclaimed: 2, + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imagesPruneFunc: tc.imagesPruneFunc}) + cmd := NewPruneCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("prune-command-success.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/image/pull.go b/cli/cli/command/image/pull.go new file mode 100644 index 00000000..cf15fca7 --- /dev/null +++ b/cli/cli/command/image/pull.go @@ -0,0 +1,86 @@ +package image + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// PullOptions defines what and how to pull +type PullOptions struct { + remote string + all bool + platform string + quiet bool + untrusted bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli command.Cli) *cobra.Command { + var opts PullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return RunPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress verbose output") + + command.AddPlatformFlag(flags, &opts.platform) + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + + return cmd +} + +// RunPull performs a pull against the engine based on the specified options +func RunPull(cli command.Cli, opts PullOptions) error { + distributionRef, err := reference.ParseNormalizedNamed(opts.remote) + switch { + case err != nil: + return err + case opts.all && !reference.IsNameOnly(distributionRef): + return errors.New("tag can't be used with --all-tags/-a") + case !opts.all && reference.IsNameOnly(distributionRef): + distributionRef = reference.TagNameOnly(distributionRef) + if tagged, ok := distributionRef.(reference.Tagged); ok && !opts.quiet { + fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag()) + } + } + + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, AuthResolver(cli), distributionRef.String()) + if err != nil { + return err + } + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if !opts.untrusted && !isCanonical { + err = trustedPull(ctx, cli, imgRefAndAuth, opts) + } else { + err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts) + } + if err != nil { + if strings.Contains(err.Error(), "when fetching 'plugin'") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + fmt.Fprintln(cli.Out(), imgRefAndAuth.Reference().String()) + return nil +} diff --git a/cli/cli/command/image/pull_test.go b/cli/cli/command/image/pull_test.go new file mode 100644 index 00000000..4cb1ca10 --- /dev/null +++ b/cli/cli/command/image/pull_test.go @@ -0,0 +1,130 @@ +package image + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNewPullCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "wrong-args", + expectedError: "requires exactly 1 argument.", + args: []string{}, + }, + { + name: "invalid-name", + expectedError: "invalid reference format: repository name must be lowercase", + args: []string{"UPPERCASE_REPO"}, + }, + { + name: "all-tags-with-tag", + expectedError: "tag can't be used with --all-tags/-a", + args: []string{"--all-tags", "image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := NewPullCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewPullCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + expectedTag string + }{ + { + name: "simple", + args: []string{"image:tag"}, + expectedTag: "image:tag", + }, + { + name: "simple-no-tag", + args: []string{"image"}, + expectedTag: "image:latest", + }, + { + name: "simple-quiet", + args: []string{"image"}, + flags: map[string]string{ + "quiet": "true", + }, + expectedTag: "image:latest", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + imagePullFunc: func(ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + assert.Check(t, is.Equal(tc.expectedTag, ref), tc.name) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }) + cmd := NewPullCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("pull-command-success.%s.golden", tc.name)) + } +} + +func TestNewPullCommandWithContentTrustErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + name: "offline-notary-server", + notaryFunc: notary.GetOfflineNotaryRepository, + expectedError: "client is offline", + args: []string{"image:tag"}, + }, + { + name: "uninitialized-notary-server", + notaryFunc: notary.GetUninitializedNotaryRepository, + expectedError: "remote trust data does not exist", + args: []string{"image:tag"}, + }, + { + name: "empty-notary-server", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + expectedError: "No valid trust data for tag", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + imagePullFunc: func(ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), fmt.Errorf("shouldn't try to pull image") + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := NewPullCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + err := cmd.Execute() + assert.ErrorContains(t, err, tc.expectedError) + } +} diff --git a/cli/cli/command/image/push.go b/cli/cli/command/image/push.go new file mode 100644 index 00000000..de6c2ec3 --- /dev/null +++ b/cli/cli/command/image/push.go @@ -0,0 +1,70 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type pushOptions struct { + remote string + untrusted bool +} + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli command.Cli) *cobra.Command { + var opts pushOptions + + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return RunPush(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + command.AddTrustSigningFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + + return cmd +} + +// RunPush performs a push against the engine based on the specified options +func RunPush(dockerCli command.Cli, opts pushOptions) error { + ref, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if !opts.untrusted { + return TrustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/cli/cli/command/image/push_test.go b/cli/cli/command/image/push_test.go new file mode 100644 index 00000000..75798aaa --- /dev/null +++ b/cli/cli/command/image/push_test.go @@ -0,0 +1,71 @@ +package image + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNewPushCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imagePushFunc func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) + }{ + { + name: "wrong-args", + args: []string{}, + expectedError: "requires exactly 1 argument.", + }, + { + name: "invalid-name", + args: []string{"UPPERCASE_REPO"}, + expectedError: "invalid reference format: repository name must be lowercase", + }, + { + name: "push-failed", + args: []string{"image:repo"}, + expectedError: "Failed to push", + imagePushFunc: func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), errors.Errorf("Failed to push") + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imagePushFunc: tc.imagePushFunc}) + cmd := NewPushCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewPushCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + }{ + { + name: "simple", + args: []string{"image:tag"}, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + imagePushFunc: func(ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }) + cmd := NewPushCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + } +} diff --git a/cli/cli/command/image/remove.go b/cli/cli/command/image/remove.go new file mode 100644 index 00000000..a4c72e44 --- /dev/null +++ b/cli/cli/command/image/remove.go @@ -0,0 +1,86 @@ +package image + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + var fatalErr = false + for _, img := range images { + dels, err := client.ImageRemove(ctx, img, options) + if err != nil { + if !apiclient.IsErrNotFound(err) { + fatalErr = true + } + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + msg := strings.Join(errs, "\n") + if !opts.force || fatalErr { + return errors.New(msg) + } + fmt.Fprintln(dockerCli.Err(), msg) + } + return nil +} diff --git a/cli/cli/command/image/remove_test.go b/cli/cli/command/image/remove_test.go new file mode 100644 index 00000000..6db2e031 --- /dev/null +++ b/cli/cli/command/image/remove_test.go @@ -0,0 +1,134 @@ +package image + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +type notFound struct { + imageID string +} + +func (n notFound) Error() string { + return fmt.Sprintf("Error: No such image: %s", n.imageID) +} + +func (n notFound) NotFound() bool { + return true +} + +func TestNewRemoveCommandAlias(t *testing.T) { + cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{})) + assert.Check(t, cmd.HasAlias("rmi")) + assert.Check(t, cmd.HasAlias("remove")) + assert.Check(t, !cmd.HasAlias("other")) +} + +func TestNewRemoveCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + imageRemoveFunc func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + }{ + { + name: "wrong args", + expectedError: "requires at least 1 argument.", + }, + { + name: "ImageRemove fail with force option", + args: []string{"-f", "image1"}, + expectedError: "error removing image", + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + return []types.ImageDeleteResponseItem{}, errors.Errorf("error removing image") + }, + }, + { + name: "ImageRemove fail", + args: []string{"arg1"}, + expectedError: "error removing image", + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, !options.Force) + assert.Check(t, options.PruneChildren) + return []types.ImageDeleteResponseItem{}, errors.Errorf("error removing image") + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := NewRemoveCommand(test.NewFakeCli(&fakeClient{ + imageRemoveFunc: tc.imageRemoveFunc, + })) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + }) + } +} + +func TestNewRemoveCommandSuccess(t *testing.T) { + testCases := []struct { + name string + args []string + imageRemoveFunc func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + expectedStderr string + }{ + { + name: "Image Deleted", + args: []string{"image1"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + return []types.ImageDeleteResponseItem{{Deleted: image}}, nil + }, + }, + { + name: "Image not found with force option", + args: []string{"-f", "image1"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + assert.Check(t, is.Equal(true, options.Force)) + return []types.ImageDeleteResponseItem{}, notFound{"image1"} + }, + expectedStderr: "Error: No such image: image1\n", + }, + + { + name: "Image Untagged", + args: []string{"image1"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + assert.Check(t, is.Equal("image1", image)) + return []types.ImageDeleteResponseItem{{Untagged: image}}, nil + }, + }, + { + name: "Image Deleted and Untagged", + args: []string{"image1", "image2"}, + imageRemoveFunc: func(image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + if image == "image1" { + return []types.ImageDeleteResponseItem{{Untagged: image}}, nil + } + return []types.ImageDeleteResponseItem{{Deleted: image}}, nil + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{imageRemoveFunc: tc.imageRemoveFunc}) + cmd := NewRemoveCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(tc.expectedStderr, cli.ErrBuffer().String())) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("remove-command-success.%s.golden", tc.name)) + }) + } +} diff --git a/cli/cli/command/image/save.go b/cli/cli/command/image/save.go new file mode 100644 index 00000000..218a9a79 --- /dev/null +++ b/cli/cli/command/image/save.go @@ -0,0 +1,61 @@ +package image + +import ( + "context" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli command.Cli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return RunSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +// RunSave performs a save against the engine based on the specified options +func RunSave(dockerCli command.Cli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + if err := command.ValidateOutputPath(opts.output); err != nil { + return errors.Wrap(err, "failed to save image") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/cli/cli/command/image/save_test.go b/cli/cli/command/image/save_test.go new file mode 100644 index 00000000..cc960ba0 --- /dev/null +++ b/cli/cli/command/image/save_test.go @@ -0,0 +1,108 @@ +package image + +import ( + "io" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNewSaveCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + isTerminal bool + expectedError string + imageSaveFunc func(images []string) (io.ReadCloser, error) + }{ + { + name: "wrong args", + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + name: "output to terminal", + args: []string{"output", "file", "arg1"}, + isTerminal: true, + expectedError: "cowardly refusing to save to a terminal. Use the -o flag or redirect", + }, + { + name: "ImageSave fail", + args: []string{"arg1"}, + isTerminal: false, + expectedError: "error saving image", + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), errors.Errorf("error saving image") + }, + }, + { + name: "output directory does not exist", + args: []string{"-o", "fakedir/out.tar", "arg1"}, + expectedError: "failed to save image: invalid output path: directory \"fakedir\" does not exist", + }, + { + name: "output file is irregular", + args: []string{"-o", "/dev/null", "arg1"}, + expectedError: "failed to save image: invalid output path: \"/dev/null\" must be a directory or a regular file", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{imageSaveFunc: tc.imageSaveFunc}) + cli.Out().SetIsTerminal(tc.isTerminal) + cmd := NewSaveCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNewSaveCommandSuccess(t *testing.T) { + testCases := []struct { + args []string + isTerminal bool + imageSaveFunc func(images []string) (io.ReadCloser, error) + deferredFunc func() + }{ + { + args: []string{"-o", "save_tmp_file", "arg1"}, + isTerminal: true, + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + assert.Assert(t, is.Len(images, 1)) + assert.Check(t, is.Equal("arg1", images[0])) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + deferredFunc: func() { + os.Remove("save_tmp_file") + }, + }, + { + args: []string{"arg1", "arg2"}, + isTerminal: false, + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + assert.Assert(t, is.Len(images, 2)) + assert.Check(t, is.Equal("arg1", images[0])) + assert.Check(t, is.Equal("arg2", images[1])) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + } + for _, tc := range testCases { + cmd := NewSaveCommand(test.NewFakeCli(&fakeClient{ + imageSaveFunc: func(images []string) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + })) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + if tc.deferredFunc != nil { + tc.deferredFunc() + } + } +} diff --git a/cli/cli/command/image/tag.go b/cli/cli/command/image/tag.go new file mode 100644 index 00000000..39d4caaf --- /dev/null +++ b/cli/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli command.Cli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli command.Cli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/cli/cli/command/image/tag_test.go b/cli/cli/command/image/tag_test.go new file mode 100644 index 00000000..9c43f3fe --- /dev/null +++ b/cli/cli/command/image/tag_test.go @@ -0,0 +1,41 @@ +package image + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCliNewTagCommandErrors(t *testing.T) { + testCases := [][]string{ + {}, + {"image1"}, + {"image1", "image2", "image3"}, + } + expectedError := "\"tag\" requires exactly 2 arguments." + for _, args := range testCases { + cmd := NewTagCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), expectedError) + } +} + +func TestCliNewTagCommand(t *testing.T) { + cmd := NewTagCommand( + test.NewFakeCli(&fakeClient{ + imageTagFunc: func(image string, ref string) error { + assert.Check(t, is.Equal("image1", image)) + assert.Check(t, is.Equal("image2", ref)) + return nil + }, + })) + cmd.SetArgs([]string{"image1", "image2"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + value, _ := cmd.Flags().GetBool("interspersed") + assert.Check(t, !value) +} diff --git a/cli/cli/command/image/testdata/Dockerfile.test b/cli/cli/command/image/testdata/Dockerfile.test new file mode 100644 index 00000000..5b54d9b4 --- /dev/null +++ b/cli/cli/command/image/testdata/Dockerfile.test @@ -0,0 +1,3 @@ +FROM busybox +ADD ./README.md / +CMD ["cat", "/README.md"] diff --git a/cli/cli/command/image/testdata/gittar.test b/cli/cli/command/image/testdata/gittar.test new file mode 100644 index 0000000000000000000000000000000000000000..86ccb3187b4759770e6f472406e723fc822a0fb3 GIT binary patch literal 10240 zcmeIzO;5rw90u^6`4n|$16{ioF!4YfMh}Y#M~NYASCBZY8N=YWx2R#sIFJ!YFg(5N z<-d0AZ+o^6wVup!r&T_AwJNhkaQsGnlTrqot#9?MFUCQ{xnx{+nb_2$Sg(K9mBO2<>9`o_o}r;#?S z@1KaJaim#awa$7$m|P=)#F;l+Oy{|6)UDCQn`lU%@p?r5jw$U%_55=xL_ize<-m(C zJpcEjr9O`zc;?9-1tg!fvuS+@TCSZtPHP2Qnql*5SVW><&{Vtb)i8{_iCv zZxYg9d1#1>_p*9hu1a;WGQ0A_p=!rn{T_`;t+LMfTjx{joif&rMqY`z*o_Vj-qcNA ziKr^Z=3x5hv8#Wy;W{QBW>1;SH}&T{Zj*5c(jUwwp}*}eHP?JFY-p;h~9Vf zYjPd0$1OVqem-``x^voX=>N>WbNl?8w9UV9DfvIg{M5bA(@g4oS=@j BJ+J@( literal 0 HcmV?d00001 diff --git a/cli/cli/command/image/trust.go b/cli/cli/command/image/trust.go new file mode 100644 index 00000000..75f3ab1c --- /dev/null +++ b/cli/cli/command/image/trust.go @@ -0,0 +1,363 @@ +package image + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/streams" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// TrustedPush handles content trust pushing of an image +func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +// nolint: gocyclo +func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(msg jsonmessage.JSONMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called once. This will be treated as an error. + return + } + + var pushResult types.PushResult + err := json.Unmarshal(*msg.Aux, &pushResult) + if err == nil && pushResult.Tag != "" { + if dgst, err := digest.Parse(pushResult.Digest); err == nil { + h, err := hex.DecodeString(dgst.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(dgst.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil { + return err + } + fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return errors.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + return errors.Errorf("no targets found, please provide a specific tag in order to sign it") + } + + fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull") + if err != nil { + return errors.Wrap(err, "error establishing connection to trust repository") + } + + // get the latest repository metadata so we can figure out which roles to sign + _, err = repo.ListTargets() + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.GetCryptoService().ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.GetCryptoService().Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.Name.Name(), err) + } + fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = AddTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + err = errors.Wrapf(err, "failed to sign %s:%s", repoInfo.Name.Name(), tag) + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag) + return nil +} + +// AddTargetToAllSignableRoles attempts to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func AddTargetToAllSignableRoles(repo client.Repository, target *client.Target) error { + signableRoles, err := trust.GetSignableRoles(repo, target) + if err != nil { + return err + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, opts PullOptions) error { + refs, err := getTrustedPullTargets(cli, imgRefAndAuth) + if err != nil { + return err + } + + ref := imgRefAndAuth.Reference() + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest) + + trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest) + if err != nil { + return err + } + updatedImgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, AuthResolver(cli), trustedRef.String()) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, updatedImgRefAndAuth, PullOptions{ + all: false, + platform: opts.platform, + quiet: opts.quiet, + remote: opts.remote, + }); err != nil { + return err + } + + tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name) + if err != nil { + return err + } + + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +func getTrustedPullTargets(cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth) ([]target, error) { + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly) + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + ref := imgRefAndAuth.Reference() + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(ref.Name(), err) + } + var refs []target + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Err(), "Skipping target for %q\n", reference.FamiliarName(ref)) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) + } + return refs, nil + } + + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(ref.Name(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role", t.Role) + r, err := convertTarget(t.Target) + return []target{r}, err +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, opts PullOptions) error { + ref := reference.FamiliarString(imgRefAndAuth.Reference()) + + encodedAuth, err := command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig()) + if err != nil { + return err + } + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "pull") + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: opts.all, + Platform: opts.platform, + } + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + out := cli.Out() + if opts.quiet { + out = streams.NewOut(ioutil.Discard) + } + return jsonmessage.DisplayJSONMessagesToStream(responseBody, out, nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, rs, AuthResolver(cli), ref.String()) + if err != nil { + return nil, err + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, []string{"pull"}) + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), client.ErrNoSuchTarget(ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +// nolint: interfacer +func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + // Use familiar references when interacting with client and output + familiarRef := reference.FamiliarString(ref) + trustedFamiliarRef := reference.FamiliarString(trustedRef) + + fmt.Fprintf(cli.Err(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef) + + return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef) +} + +// AuthResolver returns an auth resolver function from a command.Cli +func AuthResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { + return command.ResolveAuthConfig(ctx, cli, index) + } +} diff --git a/cli/cli/command/image/trust_test.go b/cli/cli/command/image/trust_test.go new file mode 100644 index 00000000..97585a72 --- /dev/null +++ b/cli/cli/command/image/trust_test.go @@ -0,0 +1,73 @@ +package image + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/trust" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "gotest.tools/assert" +) + +func unsetENV() { + os.Unsetenv("DOCKER_CONTENT_TRUST") + os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") +} + +func TestENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + output, err := trust.Server(indexInfo) + expectedStr := "https://notary-test.com:5000" + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestHTTPENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + _, err := trust.Server(indexInfo) + if err == nil { + t.Fatal("Expected error with invalid scheme") + } +} + +func TestOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} + output, err := trust.Server(indexInfo) + if err != nil || output != trust.NotaryServer { + t.Fatalf("Expected server to be %s, got %s", trust.NotaryServer, output) + } +} + +func TestNonOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} + output, err := trust.Server(indexInfo) + expectedStr := "https://" + indexInfo.Name + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestAddTargetToAllSignableRolesError(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever("password"), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + target := client.Target{} + err = AddTargetToAllSignableRoles(notaryRepo, &target) + assert.Error(t, err, "client is offline") +} diff --git a/cli/cli/command/inspect/inspector.go b/cli/cli/command/inspect/inspector.go new file mode 100644 index 00000000..aef31e62 --- /dev/null +++ b/cli/cli/command/inspect/inspector.go @@ -0,0 +1,199 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "io" + "strings" + "text/template" + + "github.com/docker/cli/cli" + "github.com/docker/cli/templates" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// NewTemplateInspectorFromString creates a new TemplateInspector from a string +// which is compiled into a template. +func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { + if tmplStr == "" { + return NewIndentedInspector(out), nil + } + + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, errors.Errorf("Template parsing error: %s", err) + } + return NewTemplateInspector(out, tmpl), nil +} + +// GetRefFunc is a function which used by Inspect to fetch an object from a +// reference +type GetRefFunc func(ref string) (interface{}, []byte, error) + +// Inspect fetches objects by reference using GetRefFunc and writes the json +// representation to the output writer. +func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { + inspector, err := NewTemplateInspectorFromString(out, tmplStr) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErrs []string + for _, ref := range references { + element, raw, err := getRef(ref) + if err != nil { + inspectErrs = append(inspectErrs, err.Error()) + continue + } + + if err := inspector.Inspect(element, raw); err != nil { + inspectErrs = append(inspectErrs, err.Error()) + } + } + + if err := inspector.Flush(); err != nil { + logrus.Errorf("%s\n", err) + } + + if len(inspectErrs) != 0 { + return cli.StatusError{ + StatusCode: 1, + Status: strings.Join(inspectErrs, "\n"), + } + } + return nil +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return errors.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// tryRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + dec.UseNumber() + + if rawErr := dec.Decode(&raw); rawErr != nil { + return errors.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return errors.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/cli/cli/command/inspect/inspector_test.go b/cli/cli/command/inspect/inspector_test.go new file mode 100644 index 00000000..f4df3684 --- /dev/null +++ b/cli/cli/command/inspect/inspector_test.go @@ -0,0 +1,259 @@ +package inspect + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/cli/templates" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +type testElement struct { + DNS string `json:"Dns"` +} + +func TestTemplateInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "\n" { + t.Fatalf("Expected `\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorTemplateError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Foo}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + err = i.Inspect(testElement{"0.0.0.0"}, nil) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorRawFallback(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorRawFallbackError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n1.1.1.1\n" { + t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) + } +} + +func TestIndentedInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + }, + { + "Dns": "1.1.1.1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := "[]\n" + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorRawElements(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0", + "Node": "0" + }, + { + "Dns": "1.1.1.1", + "Node": "1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +// moby/moby#32235 +// This test verifies that even if `tryRawInspectFallback` is called the fields containing +// numerical values are displayed correctly. +// For example, `docker inspect --format "{{.Id}} {{.Size}} alpine` and +// `docker inspect --format "{{.ID}} {{.Size}} alpine" will have the same output which is +// sha256:651aa95985aa4a17a38ffcf71f598ec461924ca96865facc2c5782ef2d2be07f 3983636 +func TestTemplateInspectorRawFallbackNumber(t *testing.T) { + // Using typedElem to automatically fall to tryRawInspectFallback. + typedElem := struct { + ID string `json:"Id"` + }{"ad3"} + testcases := []struct { + raw []byte + exp string + }{ + {raw: []byte(`{"Id": "ad3", "Size": 53317}`), exp: "53317 ad3\n"}, + {raw: []byte(`{"Id": "ad3", "Size": 53317.102}`), exp: "53317.102 ad3\n"}, + {raw: []byte(`{"Id": "ad3", "Size": 53317.0}`), exp: "53317.0 ad3\n"}, + } + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Size}} {{.Id}}") + assert.NilError(t, err) + + i := NewTemplateInspector(b, tmpl) + for _, tc := range testcases { + err = i.Inspect(typedElem, tc.raw) + assert.NilError(t, err) + + err = i.Flush() + assert.NilError(t, err) + + assert.Check(t, is.Equal(tc.exp, b.String())) + b.Reset() + } +} diff --git a/cli/cli/command/manifest/annotate.go b/cli/cli/command/manifest/annotate.go new file mode 100644 index 00000000..e6c47394 --- /dev/null +++ b/cli/cli/command/manifest/annotate.go @@ -0,0 +1,97 @@ +package manifest + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/store" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type annotateOptions struct { + target string // the target manifest list name (also transaction ID) + image string // the manifest to annotate within the list + variant string // an architecture variant + os string + arch string + osFeatures []string +} + +// NewAnnotateCommand creates a new `docker manifest annotate` command +func newAnnotateCommand(dockerCli command.Cli) *cobra.Command { + var opts annotateOptions + + cmd := &cobra.Command{ + Use: "annotate [OPTIONS] MANIFEST_LIST MANIFEST", + Short: "Add additional information to a local image manifest", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + opts.image = args[1] + return runManifestAnnotate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVar(&opts.os, "os", "", "Set operating system") + flags.StringVar(&opts.arch, "arch", "", "Set architecture") + flags.StringSliceVar(&opts.osFeatures, "os-features", []string{}, "Set operating system feature") + flags.StringVar(&opts.variant, "variant", "", "Set architecture variant") + + return cmd +} + +func runManifestAnnotate(dockerCli command.Cli, opts annotateOptions) error { + targetRef, err := normalizeReference(opts.target) + if err != nil { + return errors.Wrapf(err, "annotate: error parsing name for manifest list %s", opts.target) + } + imgRef, err := normalizeReference(opts.image) + if err != nil { + return errors.Wrapf(err, "annotate: error parsing name for manifest %s", opts.image) + } + + manifestStore := dockerCli.ManifestStore() + imageManifest, err := manifestStore.Get(targetRef, imgRef) + switch { + case store.IsNotFound(err): + return fmt.Errorf("manifest for image %s does not exist in %s", opts.image, opts.target) + case err != nil: + return err + } + + // Update the mf + if imageManifest.Descriptor.Platform == nil { + imageManifest.Descriptor.Platform = new(ocispec.Platform) + } + if opts.os != "" { + imageManifest.Descriptor.Platform.OS = opts.os + } + if opts.arch != "" { + imageManifest.Descriptor.Platform.Architecture = opts.arch + } + for _, osFeature := range opts.osFeatures { + imageManifest.Descriptor.Platform.OSFeatures = appendIfUnique(imageManifest.Descriptor.Platform.OSFeatures, osFeature) + } + if opts.variant != "" { + imageManifest.Descriptor.Platform.Variant = opts.variant + } + + if !isValidOSArch(imageManifest.Descriptor.Platform.OS, imageManifest.Descriptor.Platform.Architecture) { + return errors.Errorf("manifest entry for image has unsupported os/arch combination: %s/%s", opts.os, opts.arch) + } + return manifestStore.Save(targetRef, imgRef, imageManifest) +} + +func appendIfUnique(list []string, str string) []string { + for _, s := range list { + if s == str { + return list + } + } + return append(list, str) +} diff --git a/cli/cli/command/manifest/annotate_test.go b/cli/cli/command/manifest/annotate_test.go new file mode 100644 index 00000000..e5cce8f6 --- /dev/null +++ b/cli/cli/command/manifest/annotate_test.go @@ -0,0 +1,77 @@ +package manifest + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestManifestAnnotateError(t *testing.T) { + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"th!si'sa/fa!ke/li$t/name", "example.com/alpine:3.0"}, + expectedError: "error parsing name for manifest list", + }, + { + args: []string{"example.com/list:v1", "th!si'sa/fa!ke/im@ge/nam32"}, + expectedError: "error parsing name for manifest", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(nil) + cmd := newAnnotateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestManifestAnnotate(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newAnnotateCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/fake:0.0"}) + cmd.SetOutput(ioutil.Discard) + expectedError := "manifest for image example.com/fake:0.0 does not exist" + assert.ErrorContains(t, cmd.Execute(), expectedError) + + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + cmd.Flags().Set("os", "freebsd") + cmd.Flags().Set("arch", "fake") + cmd.Flags().Set("os-features", "feature1") + cmd.Flags().Set("variant", "v7") + expectedError = "manifest entry for image has unsupported os/arch combination" + assert.ErrorContains(t, cmd.Execute(), expectedError) + + cmd.Flags().Set("arch", "arm") + assert.NilError(t, cmd.Execute()) + + cmd = newInspectCommand(cli) + err = cmd.Flags().Set("verbose", "true") + assert.NilError(t, err) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + assert.NilError(t, cmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-annotate.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} diff --git a/cli/cli/command/manifest/client_test.go b/cli/cli/command/manifest/client_test.go new file mode 100644 index 00000000..c5cb9ea1 --- /dev/null +++ b/cli/cli/command/manifest/client_test.go @@ -0,0 +1,56 @@ +package manifest + +import ( + "context" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/cli/registry/client" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +type fakeRegistryClient struct { + getManifestFunc func(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) + getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) + mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error + putManifestFunc func(ctx context.Context, source reference.Named, mf distribution.Manifest) (digest.Digest, error) + getTagsFunc func(ctx context.Context, ref reference.Named) ([]string, error) +} + +func (c *fakeRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + if c.getManifestFunc != nil { + return c.getManifestFunc(ctx, ref) + } + return manifesttypes.ImageManifest{}, nil +} + +func (c *fakeRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + if c.getManifestListFunc != nil { + return c.getManifestListFunc(ctx, ref) + } + return nil, nil +} + +func (c *fakeRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error { + if c.mountBlobFunc != nil { + return c.mountBlobFunc(ctx, source, target) + } + return nil +} + +func (c *fakeRegistryClient) PutManifest(ctx context.Context, ref reference.Named, mf distribution.Manifest) (digest.Digest, error) { + if c.putManifestFunc != nil { + return c.putManifestFunc(ctx, ref, mf) + } + return digest.Digest(""), nil +} + +func (c *fakeRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) { + if c.getTagsFunc != nil { + return c.getTagsFunc(ctx, ref) + } + return nil, nil +} + +var _ client.RegistryClient = &fakeRegistryClient{} diff --git a/cli/cli/command/manifest/cmd.go b/cli/cli/command/manifest/cmd.go new file mode 100644 index 00000000..8cc4987a --- /dev/null +++ b/cli/cli/command/manifest/cmd.go @@ -0,0 +1,45 @@ +package manifest + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + + "github.com/spf13/cobra" +) + +// NewManifestCommand returns a cobra command for `manifest` subcommands +func NewManifestCommand(dockerCli command.Cli) *cobra.Command { + // use dockerCli as command.Cli + cmd := &cobra.Command{ + Use: "manifest COMMAND", + Short: "Manage Docker image manifests and manifest lists", + Long: manifestDescription, + Args: cli.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) + }, + Annotations: map[string]string{"experimentalCLI": ""}, + } + cmd.AddCommand( + newCreateListCommand(dockerCli), + newInspectCommand(dockerCli), + newAnnotateCommand(dockerCli), + newPushListCommand(dockerCli), + ) + return cmd +} + +var manifestDescription = ` +The **docker manifest** command has subcommands for managing image manifests and +manifest lists. A manifest list allows you to use one name to refer to the same image +built for multiple architectures. + +To see help for a subcommand, use: + + docker manifest CMD --help + +For full details on using docker manifest lists, see the registry v2 specification. + +` diff --git a/cli/cli/command/manifest/create_list.go b/cli/cli/command/manifest/create_list.go new file mode 100644 index 00000000..f2e54dcf --- /dev/null +++ b/cli/cli/command/manifest/create_list.go @@ -0,0 +1,82 @@ +package manifest + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/store" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOpts struct { + amend bool + insecure bool +} + +func newCreateListCommand(dockerCli command.Cli) *cobra.Command { + opts := createOpts{} + + cmd := &cobra.Command{ + Use: "create MANIFEST_LIST MANIFEST [MANIFEST...]", + Short: "Create a local manifest list for annotating and pushing to a registry", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return createManifestList(dockerCli, args, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.insecure, "insecure", false, "Allow communication with an insecure registry") + flags.BoolVarP(&opts.amend, "amend", "a", false, "Amend an existing manifest list") + return cmd +} + +func createManifestList(dockerCli command.Cli, args []string, opts createOpts) error { + newRef := args[0] + targetRef, err := normalizeReference(newRef) + if err != nil { + return errors.Wrapf(err, "error parsing name for manifest list %s", newRef) + } + + _, err = registry.ParseRepositoryInfo(targetRef) + if err != nil { + return errors.Wrapf(err, "error parsing repository name for manifest list %s", newRef) + } + + manifestStore := dockerCli.ManifestStore() + _, err = manifestStore.GetList(targetRef) + switch { + case store.IsNotFound(err): + // New manifest list + case err != nil: + return err + case !opts.amend: + return errors.Errorf("refusing to amend an existing manifest list with no --amend flag") + } + + ctx := context.Background() + // Now create the local manifest list transaction by looking up the manifest schemas + // for the constituent images: + manifests := args[1:] + for _, manifestRef := range manifests { + namedRef, err := normalizeReference(manifestRef) + if err != nil { + // TODO: wrap error? + return err + } + + manifest, err := getManifest(ctx, dockerCli, targetRef, namedRef, opts.insecure) + if err != nil { + return err + } + if err := manifestStore.Save(targetRef, namedRef, manifest); err != nil { + return err + } + } + fmt.Fprintf(dockerCli.Out(), "Created manifest list %s\n", targetRef.String()) + return nil +} diff --git a/cli/cli/command/manifest/create_test.go b/cli/cli/command/manifest/create_test.go new file mode 100644 index 00000000..fbf0ae7b --- /dev/null +++ b/cli/cli/command/manifest/create_test.go @@ -0,0 +1,116 @@ +package manifest + +import ( + "context" + "io/ioutil" + "testing" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestManifestCreateErrors(t *testing.T) { + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{"too-few-arguments"}, + expectedError: "requires at least 2 arguments", + }, + { + args: []string{"th!si'sa/fa!ke/li$t/name", "example.com/alpine:3.0"}, + expectedError: "error parsing name for manifest list", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(nil) + cmd := newCreateListCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +// create a manifest list, then overwrite it, and inspect to see if the old one is still there +func TestManifestCreateAmend(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + namedRef = ref(t, "alpine:3.1") + imageManifest = fullImageManifest(t, namedRef) + err = store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newCreateListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.1"}) + cmd.Flags().Set("amend", "true") + cmd.SetOutput(ioutil.Discard) + err = cmd.Execute() + assert.NilError(t, err) + + // make a new cli to clear the buffers + cli = test.NewFakeCli(nil) + cli.SetManifestStore(store) + inspectCmd := newInspectCommand(cli) + inspectCmd.SetArgs([]string{"example.com/list:v1"}) + assert.NilError(t, inspectCmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-manifest-list.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} + +// attempt to overwrite a saved manifest and get refused +func TestManifestCreateRefuseAmend(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newCreateListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + cmd.SetOutput(ioutil.Discard) + err = cmd.Execute() + assert.Error(t, err, "refusing to amend an existing manifest list with no --amend flag") +} + +// attempt to make a manifest list without valid images +func TestManifestCreateNoManifest(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(&fakeRegistryClient{ + getManifestFunc: func(_ context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, errors.Errorf("No such image: %v", ref) + }, + getManifestListFunc: func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, errors.Errorf("No such manifest: %s", ref) + }, + }) + + cmd := newCreateListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + cmd.SetOutput(ioutil.Discard) + err := cmd.Execute() + assert.Error(t, err, "No such image: example.com/alpine:3.0") +} diff --git a/cli/cli/command/manifest/inspect.go b/cli/cli/command/manifest/inspect.go new file mode 100644 index 00000000..c270ee53 --- /dev/null +++ b/cli/cli/command/manifest/inspect.go @@ -0,0 +1,148 @@ +package manifest + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + ref string + list string + verbose bool + insecure bool +} + +// NewInspectCommand creates a new `docker manifest inspect` command +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] [MANIFEST_LIST] MANIFEST", + Short: "Display an image manifest, or manifest list", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + switch len(args) { + case 1: + opts.ref = args[0] + case 2: + opts.list = args[0] + opts.ref = args[1] + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.insecure, "insecure", false, "Allow communication with an insecure registry") + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Output additional info including layers and platform") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + namedRef, err := normalizeReference(opts.ref) + if err != nil { + return err + } + + // If list reference is provided, display the local manifest in a list + if opts.list != "" { + listRef, err := normalizeReference(opts.list) + if err != nil { + return err + } + + imageManifest, err := dockerCli.ManifestStore().Get(listRef, namedRef) + if err != nil { + return err + } + return printManifest(dockerCli, imageManifest, opts) + } + + // Try a local manifest list first + localManifestList, err := dockerCli.ManifestStore().GetList(namedRef) + if err == nil { + return printManifestList(dockerCli, namedRef, localManifestList, opts) + } + + // Next try a remote manifest + ctx := context.Background() + registryClient := dockerCli.RegistryClient(opts.insecure) + imageManifest, err := registryClient.GetManifest(ctx, namedRef) + if err == nil { + return printManifest(dockerCli, imageManifest, opts) + } + + // Finally try a remote manifest list + manifestList, err := registryClient.GetManifestList(ctx, namedRef) + if err != nil { + return err + } + return printManifestList(dockerCli, namedRef, manifestList, opts) +} + +func printManifest(dockerCli command.Cli, manifest types.ImageManifest, opts inspectOptions) error { + buffer := new(bytes.Buffer) + if !opts.verbose { + _, raw, err := manifest.Payload() + if err != nil { + return err + } + if err := json.Indent(buffer, raw, "", "\t"); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), buffer.String()) + return nil + } + jsonBytes, err := json.MarshalIndent(manifest, "", "\t") + if err != nil { + return err + } + dockerCli.Out().Write(append(jsonBytes, '\n')) + return nil +} + +func printManifestList(dockerCli command.Cli, namedRef reference.Named, list []types.ImageManifest, opts inspectOptions) error { + if !opts.verbose { + targetRepo, err := registry.ParseRepositoryInfo(namedRef) + if err != nil { + return err + } + + manifests := []manifestlist.ManifestDescriptor{} + // More than one response. This is a manifest list. + for _, img := range list { + mfd, err := buildManifestDescriptor(targetRepo, img) + if err != nil { + return errors.Wrap(err, "failed to assemble ManifestDescriptor") + } + manifests = append(manifests, mfd) + } + deserializedML, err := manifestlist.FromDescriptors(manifests) + if err != nil { + return err + } + jsonBytes, err := deserializedML.MarshalJSON() + if err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), string(jsonBytes)) + return nil + } + jsonBytes, err := json.MarshalIndent(list, "", "\t") + if err != nil { + return err + } + dockerCli.Out().Write(append(jsonBytes, '\n')) + return nil +} diff --git a/cli/cli/command/manifest/inspect_test.go b/cli/cli/command/manifest/inspect_test.go new file mode 100644 index 00000000..7abe06d2 --- /dev/null +++ b/cli/cli/command/manifest/inspect_test.go @@ -0,0 +1,146 @@ +package manifest + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/manifest/store" + "github.com/docker/cli/cli/manifest/types" + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func newTempManifestStore(t *testing.T) (store.Store, func()) { + tmpdir, err := ioutil.TempDir("", "test-manifest-storage") + assert.NilError(t, err) + + return store.NewStore(tmpdir), func() { os.RemoveAll(tmpdir) } +} + +func ref(t *testing.T, name string) reference.Named { + named, err := reference.ParseNamed("example.com/" + name) + assert.NilError(t, err) + return named +} + +func fullImageManifest(t *testing.T, ref reference.Named) types.ImageManifest { + man, err := schema2.FromStruct(schema2.Manifest{ + Versioned: schema2.SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560", + Size: 1520, + MediaType: schema2.MediaTypeImageConfig, + }, + Layers: []distribution.Descriptor{ + { + MediaType: schema2.MediaTypeLayer, + Size: 1990402, + Digest: "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926", + }, + }, + }) + assert.NilError(t, err) + + // TODO: include image data for verbose inspect + mt, raw, err := man.Payload() + assert.NilError(t, err) + + desc := ocispec.Descriptor{ + Digest: digest.FromBytes(raw), + Size: int64(len(raw)), + MediaType: mt, + Platform: &ocispec.Platform{ + Architecture: "amd64", + OS: "linux", + }, + } + + return types.NewImageManifest(ref, desc, man) +} + +func TestInspectCommandLocalManifestNotFound(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + err := cmd.Execute() + assert.Error(t, err, "No such manifest: example.com/alpine:3.0") +} + +func TestInspectCommandNotFound(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(&fakeRegistryClient{ + getManifestFunc: func(_ context.Context, _ reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, errors.New("missing") + }, + getManifestListFunc: func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, errors.Errorf("No such manifest: %s", ref) + }, + }) + + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"example.com/alpine:3.0"}) + err := cmd.Execute() + assert.Error(t, err, "No such manifest: example.com/alpine:3.0") +} + +func TestInspectCommandLocalManifest(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1", "example.com/alpine:3.0"}) + assert.NilError(t, cmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-manifest.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} + +func TestInspectcommandRemoteManifest(t *testing.T) { + store, cleanup := newTempManifestStore(t) + defer cleanup() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(&fakeRegistryClient{ + getManifestFunc: func(_ context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + return fullImageManifest(t, ref), nil + }, + }) + + cmd := newInspectCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"example.com/alpine:3.0"}) + assert.NilError(t, cmd.Execute()) + actual := cli.OutBuffer() + expected := golden.Get(t, "inspect-manifest.golden") + assert.Check(t, is.Equal(string(expected), actual.String())) +} diff --git a/cli/cli/command/manifest/push.go b/cli/cli/command/manifest/push.go new file mode 100644 index 00000000..fa734afa --- /dev/null +++ b/cli/cli/command/manifest/push.go @@ -0,0 +1,281 @@ +package manifest + +import ( + "context" + "encoding/json" + "fmt" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/types" + registryclient "github.com/docker/cli/cli/registry/client" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type pushOpts struct { + insecure bool + purge bool + target string +} + +type mountRequest struct { + ref reference.Named + manifest types.ImageManifest +} + +type manifestBlob struct { + canonical reference.Canonical + os string +} + +type pushRequest struct { + targetRef reference.Named + list *manifestlist.DeserializedManifestList + mountRequests []mountRequest + manifestBlobs []manifestBlob + insecure bool +} + +func newPushListCommand(dockerCli command.Cli) *cobra.Command { + opts := pushOpts{} + + cmd := &cobra.Command{ + Use: "push [OPTIONS] MANIFEST_LIST", + Short: "Push a manifest list to a repository", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + return runPush(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.purge, "purge", "p", false, "Remove the local manifest list after push") + flags.BoolVar(&opts.insecure, "insecure", false, "Allow push to an insecure registry") + return cmd +} + +func runPush(dockerCli command.Cli, opts pushOpts) error { + + targetRef, err := normalizeReference(opts.target) + if err != nil { + return err + } + + manifests, err := dockerCli.ManifestStore().GetList(targetRef) + if err != nil { + return err + } + if len(manifests) == 0 { + return errors.Errorf("%s not found", targetRef) + } + + pushRequest, err := buildPushRequest(manifests, targetRef, opts.insecure) + if err != nil { + return err + } + + ctx := context.Background() + if err := pushList(ctx, dockerCli, pushRequest); err != nil { + return err + } + if opts.purge { + return dockerCli.ManifestStore().Remove(targetRef) + } + return nil +} + +func buildPushRequest(manifests []types.ImageManifest, targetRef reference.Named, insecure bool) (pushRequest, error) { + req := pushRequest{targetRef: targetRef, insecure: insecure} + + var err error + req.list, err = buildManifestList(manifests, targetRef) + if err != nil { + return req, err + } + + targetRepo, err := registry.ParseRepositoryInfo(targetRef) + if err != nil { + return req, err + } + targetRepoName, err := registryclient.RepoNameForReference(targetRepo.Name) + if err != nil { + return req, err + } + + for _, imageManifest := range manifests { + manifestRepoName, err := registryclient.RepoNameForReference(imageManifest.Ref) + if err != nil { + return req, err + } + + repoName, _ := reference.WithName(manifestRepoName) + if repoName.Name() != targetRepoName { + blobs, err := buildBlobRequestList(imageManifest, repoName) + if err != nil { + return req, err + } + req.manifestBlobs = append(req.manifestBlobs, blobs...) + + manifestPush, err := buildPutManifestRequest(imageManifest, targetRef) + if err != nil { + return req, err + } + req.mountRequests = append(req.mountRequests, manifestPush) + } + } + return req, nil +} + +func buildManifestList(manifests []types.ImageManifest, targetRef reference.Named) (*manifestlist.DeserializedManifestList, error) { + targetRepoInfo, err := registry.ParseRepositoryInfo(targetRef) + if err != nil { + return nil, err + } + + descriptors := []manifestlist.ManifestDescriptor{} + for _, imageManifest := range manifests { + if imageManifest.Descriptor.Platform == nil || + imageManifest.Descriptor.Platform.Architecture == "" || + imageManifest.Descriptor.Platform.OS == "" { + return nil, errors.Errorf( + "manifest %s must have an OS and Architecture to be pushed to a registry", imageManifest.Ref) + } + descriptor, err := buildManifestDescriptor(targetRepoInfo, imageManifest) + if err != nil { + return nil, err + } + descriptors = append(descriptors, descriptor) + } + + return manifestlist.FromDescriptors(descriptors) +} + +func buildManifestDescriptor(targetRepo *registry.RepositoryInfo, imageManifest types.ImageManifest) (manifestlist.ManifestDescriptor, error) { + repoInfo, err := registry.ParseRepositoryInfo(imageManifest.Ref) + if err != nil { + return manifestlist.ManifestDescriptor{}, err + } + + manifestRepoHostname := reference.Domain(repoInfo.Name) + targetRepoHostname := reference.Domain(targetRepo.Name) + if manifestRepoHostname != targetRepoHostname { + return manifestlist.ManifestDescriptor{}, errors.Errorf("cannot use source images from a different registry than the target image: %s != %s", manifestRepoHostname, targetRepoHostname) + } + + manifest := manifestlist.ManifestDescriptor{ + Descriptor: distribution.Descriptor{ + Digest: imageManifest.Descriptor.Digest, + Size: imageManifest.Descriptor.Size, + MediaType: imageManifest.Descriptor.MediaType, + }, + } + + platform := types.PlatformSpecFromOCI(imageManifest.Descriptor.Platform) + if platform != nil { + manifest.Platform = *platform + } + + if err = manifest.Descriptor.Digest.Validate(); err != nil { + return manifestlist.ManifestDescriptor{}, errors.Wrapf(err, + "digest parse of image %q failed", imageManifest.Ref) + } + + return manifest, nil +} + +func buildBlobRequestList(imageManifest types.ImageManifest, repoName reference.Named) ([]manifestBlob, error) { + var blobReqs []manifestBlob + + for _, blobDigest := range imageManifest.Blobs() { + canonical, err := reference.WithDigest(repoName, blobDigest) + if err != nil { + return nil, err + } + var os string + if imageManifest.Descriptor.Platform != nil { + os = imageManifest.Descriptor.Platform.OS + } + blobReqs = append(blobReqs, manifestBlob{canonical: canonical, os: os}) + } + return blobReqs, nil +} + +// nolint: interfacer +func buildPutManifestRequest(imageManifest types.ImageManifest, targetRef reference.Named) (mountRequest, error) { + refWithoutTag, err := reference.WithName(targetRef.Name()) + if err != nil { + return mountRequest{}, err + } + mountRef, err := reference.WithDigest(refWithoutTag, imageManifest.Descriptor.Digest) + if err != nil { + return mountRequest{}, err + } + + // This indentation has to be added to ensure sha parity with the registry + v2ManifestBytes, err := json.MarshalIndent(imageManifest.SchemaV2Manifest, "", " ") + if err != nil { + return mountRequest{}, err + } + // indent only the DeserializedManifest portion of this, in order to maintain parity with the registry + // and not alter the sha + var v2Manifest schema2.DeserializedManifest + if err = v2Manifest.UnmarshalJSON(v2ManifestBytes); err != nil { + return mountRequest{}, err + } + imageManifest.SchemaV2Manifest = &v2Manifest + + return mountRequest{ref: mountRef, manifest: imageManifest}, err +} + +func pushList(ctx context.Context, dockerCli command.Cli, req pushRequest) error { + rclient := dockerCli.RegistryClient(req.insecure) + + if err := mountBlobs(ctx, rclient, req.targetRef, req.manifestBlobs); err != nil { + return err + } + if err := pushReferences(ctx, dockerCli.Out(), rclient, req.mountRequests); err != nil { + return err + } + dgst, err := rclient.PutManifest(ctx, req.targetRef, req.list) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), dgst.String()) + return nil +} + +func pushReferences(ctx context.Context, out io.Writer, client registryclient.RegistryClient, mounts []mountRequest) error { + for _, mount := range mounts { + newDigest, err := client.PutManifest(ctx, mount.ref, mount.manifest) + if err != nil { + return err + } + fmt.Fprintf(out, "Pushed ref %s with digest: %s\n", mount.ref, newDigest) + } + return nil +} + +func mountBlobs(ctx context.Context, client registryclient.RegistryClient, ref reference.Named, blobs []manifestBlob) error { + for _, blob := range blobs { + err := client.MountBlob(ctx, blob.canonical, ref) + switch err.(type) { + case nil: + case registryclient.ErrBlobCreated: + if blob.os != "windows" { + return fmt.Errorf("error mounting %s to %s", blob.canonical, ref) + } + default: + return err + } + } + return nil +} diff --git a/cli/cli/command/manifest/push_test.go b/cli/cli/command/manifest/push_test.go new file mode 100644 index 00000000..3a2e9b8a --- /dev/null +++ b/cli/cli/command/manifest/push_test.go @@ -0,0 +1,69 @@ +package manifest + +import ( + "context" + "io/ioutil" + "testing" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/internal/test" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func newFakeRegistryClient() *fakeRegistryClient { + return &fakeRegistryClient{ + getManifestFunc: func(_ context.Context, _ reference.Named) (manifesttypes.ImageManifest, error) { + return manifesttypes.ImageManifest{}, errors.New("") + }, + getManifestListFunc: func(_ context.Context, _ reference.Named) ([]manifesttypes.ImageManifest, error) { + return nil, errors.Errorf("") + }, + } +} + +func TestManifestPushErrors(t *testing.T) { + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{"one-arg", "extra-arg"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"th!si'sa/fa!ke/li$t/-name"}, + expectedError: "invalid reference format", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(nil) + cmd := newPushListCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestManifestPush(t *testing.T) { + store, sCleanup := newTempManifestStore(t) + defer sCleanup() + + registry := newFakeRegistryClient() + + cli := test.NewFakeCli(nil) + cli.SetManifestStore(store) + cli.SetRegistryClient(registry) + + namedRef := ref(t, "alpine:3.0") + imageManifest := fullImageManifest(t, namedRef) + err := store.Save(ref(t, "list:v1"), namedRef, imageManifest) + assert.NilError(t, err) + + cmd := newPushListCommand(cli) + cmd.SetArgs([]string{"example.com/list:v1"}) + err = cmd.Execute() + assert.NilError(t, err) +} diff --git a/cli/cli/command/manifest/testdata/inspect-annotate.golden b/cli/cli/command/manifest/testdata/inspect-annotate.golden new file mode 100644 index 00000000..4d65b729 --- /dev/null +++ b/cli/cli/command/manifest/testdata/inspect-annotate.golden @@ -0,0 +1,32 @@ +{ + "Ref": "example.com/alpine:3.0", + "Descriptor": { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe", + "size": 528, + "platform": { + "architecture": "arm", + "os": "freebsd", + "os.features": [ + "feature1" + ], + "variant": "v7" + } + }, + "SchemaV2Manifest": { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1520, + "digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1990402, + "digest": "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926" + } + ] + } +} diff --git a/cli/cli/command/manifest/testdata/inspect-manifest-list.golden b/cli/cli/command/manifest/testdata/inspect-manifest-list.golden new file mode 100644 index 00000000..a0c2673e --- /dev/null +++ b/cli/cli/command/manifest/testdata/inspect-manifest-list.golden @@ -0,0 +1,24 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 528, + "digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe", + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 528, + "digest": "sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe", + "platform": { + "architecture": "amd64", + "os": "linux" + } + } + ] +} diff --git a/cli/cli/command/manifest/testdata/inspect-manifest.golden b/cli/cli/command/manifest/testdata/inspect-manifest.golden new file mode 100644 index 00000000..7089d9bd --- /dev/null +++ b/cli/cli/command/manifest/testdata/inspect-manifest.golden @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1520, + "digest": "sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1990402, + "digest": "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926" + } + ] +} diff --git a/cli/cli/command/manifest/util.go b/cli/cli/command/manifest/util.go new file mode 100644 index 00000000..7464a1d1 --- /dev/null +++ b/cli/cli/command/manifest/util.go @@ -0,0 +1,81 @@ +package manifest + +import ( + "context" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/manifest/store" + "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution/reference" +) + +type osArch struct { + os string + arch string +} + +// Remove any unsupported os/arch combo +// list of valid os/arch values (see "Optional Environment Variables" section +// of https://golang.org/doc/install/source +// Added linux/s390x as we know System z support already exists +// Keep in sync with _docker_manifest_annotate in contrib/completion/bash/docker +var validOSArches = map[osArch]bool{ + {os: "darwin", arch: "386"}: true, + {os: "darwin", arch: "amd64"}: true, + {os: "darwin", arch: "arm"}: true, + {os: "darwin", arch: "arm64"}: true, + {os: "dragonfly", arch: "amd64"}: true, + {os: "freebsd", arch: "386"}: true, + {os: "freebsd", arch: "amd64"}: true, + {os: "freebsd", arch: "arm"}: true, + {os: "linux", arch: "386"}: true, + {os: "linux", arch: "amd64"}: true, + {os: "linux", arch: "arm"}: true, + {os: "linux", arch: "arm64"}: true, + {os: "linux", arch: "ppc64le"}: true, + {os: "linux", arch: "mips64"}: true, + {os: "linux", arch: "mips64le"}: true, + {os: "linux", arch: "s390x"}: true, + {os: "netbsd", arch: "386"}: true, + {os: "netbsd", arch: "amd64"}: true, + {os: "netbsd", arch: "arm"}: true, + {os: "openbsd", arch: "386"}: true, + {os: "openbsd", arch: "amd64"}: true, + {os: "openbsd", arch: "arm"}: true, + {os: "plan9", arch: "386"}: true, + {os: "plan9", arch: "amd64"}: true, + {os: "solaris", arch: "amd64"}: true, + {os: "windows", arch: "386"}: true, + {os: "windows", arch: "amd64"}: true, +} + +func isValidOSArch(os string, arch string) bool { + // check for existence of this combo + _, ok := validOSArches[osArch{os, arch}] + return ok +} + +func normalizeReference(ref string) (reference.Named, error) { + namedRef, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, isDigested := namedRef.(reference.Canonical); !isDigested { + return reference.TagNameOnly(namedRef), nil + } + return namedRef, nil +} + +// getManifest from the local store, and fallback to the remote registry if it +// doesn't exist locally +func getManifest(ctx context.Context, dockerCli command.Cli, listRef, namedRef reference.Named, insecure bool) (types.ImageManifest, error) { + data, err := dockerCli.ManifestStore().Get(listRef, namedRef) + switch { + case store.IsNotFound(err): + return dockerCli.RegistryClient(insecure).GetManifest(ctx, namedRef) + case err != nil: + return types.ImageManifest{}, err + default: + return data, nil + } +} diff --git a/cli/cli/command/network/client_test.go b/cli/cli/command/network/client_test.go new file mode 100644 index 00000000..33cec6e5 --- /dev/null +++ b/cli/cli/command/network/client_test.go @@ -0,0 +1,45 @@ +package network + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + networkCreateFunc func(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + networkConnectFunc func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + networkDisconnectFunc func(ctx context.Context, networkID, container string, force bool) error + networkListFunc func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) +} + +func (c *fakeClient) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + if c.networkCreateFunc != nil { + return c.networkCreateFunc(ctx, name, options) + } + return types.NetworkCreateResponse{}, nil +} + +func (c *fakeClient) NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error { + if c.networkConnectFunc != nil { + return c.networkConnectFunc(ctx, networkID, container, config) + } + return nil +} + +func (c *fakeClient) NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error { + if c.networkDisconnectFunc != nil { + return c.networkDisconnectFunc(ctx, networkID, container, force) + } + return nil +} + +func (c *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + if c.networkListFunc != nil { + return c.networkListFunc(ctx, options) + } + return []types.NetworkResource{}, nil +} diff --git a/cli/cli/command/network/cmd.go b/cli/cli/command/network/cmd.go new file mode 100644 index 00000000..028e1377 --- /dev/null +++ b/cli/cli/command/network/cmd.go @@ -0,0 +1,29 @@ +package network + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewNetworkCommand returns a cobra command for `network` subcommands +func NewNetworkCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage networks", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{"version": "1.21"}, + } + cmd.AddCommand( + newConnectCommand(dockerCli), + newCreateCommand(dockerCli), + newDisconnectCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/network/connect.go b/cli/cli/command/network/connect.go new file mode 100644 index 00000000..04108e20 --- /dev/null +++ b/cli/cli/command/network/connect.go @@ -0,0 +1,85 @@ +package network + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/network" + "github.com/spf13/cobra" +) + +type connectOptions struct { + network string + container string + ipaddress string + ipv6address string + links opts.ListOpts + aliases []string + linklocalips []string + driverOpts []string +} + +func newConnectCommand(dockerCli command.Cli) *cobra.Command { + options := connectOptions{ + links: opts.NewListOpts(opts.ValidateLink), + } + + cmd := &cobra.Command{ + Use: "connect [OPTIONS] NETWORK CONTAINER", + Short: "Connect a container to a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.network = args[0] + options.container = args[1] + return runConnect(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.ipaddress, "ip", "", "IPv4 address (e.g., 172.30.100.104)") + flags.StringVar(&options.ipv6address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") + flags.Var(&options.links, "link", "Add link to another container") + flags.StringSliceVar(&options.aliases, "alias", []string{}, "Add network-scoped alias for the container") + flags.StringSliceVar(&options.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") + flags.StringSliceVar(&options.driverOpts, "driver-opt", []string{}, "driver options for the network") + return cmd +} + +func runConnect(dockerCli command.Cli, options connectOptions) error { + client := dockerCli.Client() + + driverOpts, err := convertDriverOpt(options.driverOpts) + if err != nil { + return err + } + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: options.ipaddress, + IPv6Address: options.ipv6address, + LinkLocalIPs: options.linklocalips, + }, + Links: options.links.GetAll(), + Aliases: options.aliases, + DriverOpts: driverOpts, + } + + return client.NetworkConnect(context.Background(), options.network, options.container, epConfig) +} + +func convertDriverOpt(opts []string) (map[string]string, error) { + driverOpt := make(map[string]string) + for _, opt := range opts { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid key/value pair format in driver options") + } + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + driverOpt[key] = value + } + return driverOpt, nil +} diff --git a/cli/cli/command/network/connect_test.go b/cli/cli/command/network/connect_test.go new file mode 100644 index 00000000..2c1d0401 --- /dev/null +++ b/cli/cli/command/network/connect_test.go @@ -0,0 +1,70 @@ +package network + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworkConnectErrors(t *testing.T) { + testCases := []struct { + args []string + networkConnectFunc func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + expectedError string + }{ + { + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"toto", "titi"}, + networkConnectFunc: func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error { + return errors.Errorf("error connecting network") + }, + expectedError: "error connecting network", + }, + } + + for _, tc := range testCases { + cmd := newConnectCommand( + test.NewFakeCli(&fakeClient{ + networkConnectFunc: tc.networkConnectFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + + } +} + +func TestNetworkConnectWithFlags(t *testing.T) { + expectedOpts := []network.IPAMConfig{ + { + Subnet: "192.168.4.0/24", + IPRange: "192.168.4.0/24", + Gateway: "192.168.4.1/24", + AuxAddress: map[string]string{}, + }, + } + cli := test.NewFakeCli(&fakeClient{ + networkConnectFunc: func(ctx context.Context, networkID, container string, config *network.EndpointSettings) error { + assert.Check(t, is.DeepEqual(expectedOpts, config.IPAMConfig), "not expected driver error") + return nil + }, + }) + args := []string{"banana"} + cmd := newCreateCommand(cli) + + cmd.SetArgs(args) + cmd.Flags().Set("driver", "foo") + cmd.Flags().Set("ip-range", "192.168.4.0/24") + cmd.Flags().Set("gateway", "192.168.4.1/24") + cmd.Flags().Set("subnet", "192.168.4.0/24") + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/network/create.go b/cli/cli/command/network/create.go new file mode 100644 index 00000000..bcb37294 --- /dev/null +++ b/cli/cli/command/network/create.go @@ -0,0 +1,248 @@ +package network + +import ( + "context" + "fmt" + "net" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + scope string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts + internal bool + ipv6 bool + attachable bool + ingress bool + configOnly bool + configFrom string + + ipamDriver string + ipamSubnet []string + ipamIPRange []string + ipamGateway []string + ipamAux opts.MapOpts + ipamOpt opts.MapOpts +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(opts.ValidateLabel), + ipamAux: *opts.NewMapOpts(nil, nil), + ipamOpt: *opts.NewMapOpts(nil, nil), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] NETWORK", + Short: "Create a network", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&options.driver, "driver", "d", "bridge", "Driver to manage the Network") + flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&options.labels, "label", "Set metadata on a network") + flags.BoolVar(&options.internal, "internal", false, "Restrict external access to the network") + flags.BoolVar(&options.ipv6, "ipv6", false, "Enable IPv6 networking") + flags.BoolVar(&options.attachable, "attachable", false, "Enable manual container attachment") + flags.SetAnnotation("attachable", "version", []string{"1.25"}) + flags.BoolVar(&options.ingress, "ingress", false, "Create swarm routing-mesh network") + flags.SetAnnotation("ingress", "version", []string{"1.29"}) + flags.StringVar(&options.scope, "scope", "", "Control the network's scope") + flags.SetAnnotation("scope", "version", []string{"1.30"}) + flags.BoolVar(&options.configOnly, "config-only", false, "Create a configuration only network") + flags.SetAnnotation("config-only", "version", []string{"1.30"}) + flags.StringVar(&options.configFrom, "config-from", "", "The network from which copying the configuration") + flags.SetAnnotation("config-from", "version", []string{"1.30"}) + + flags.StringVar(&options.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") + flags.StringSliceVar(&options.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") + flags.StringSliceVar(&options.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") + flags.StringSliceVar(&options.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") + + flags.Var(&options.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") + flags.Var(&options.ipamOpt, "ipam-opt", "Set IPAM driver specific options") + + return cmd +} + +func runCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + + ipamCfg, err := consolidateIpam(options.ipamSubnet, options.ipamIPRange, options.ipamGateway, options.ipamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Driver: options.driver, + Options: options.driverOpts.GetAll(), + IPAM: &network.IPAM{ + Driver: options.ipamDriver, + Config: ipamCfg, + Options: options.ipamOpt.GetAll(), + }, + CheckDuplicate: true, + Internal: options.internal, + EnableIPv6: options.ipv6, + Attachable: options.attachable, + Ingress: options.ingress, + Scope: options.scope, + ConfigOnly: options.configOnly, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + } + + if from := options.configFrom; from != "" { + nc.ConfigFrom = &network.ConfigReference{ + Network: from, + } + } + + resp, err := client.NetworkCreate(context.Background(), options.name, nc) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consolidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +// nolint: gocyclo +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, errors.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, errors.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, errors.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, errors.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, errors.Wrap(err, "invalid subnet") + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, err + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} diff --git a/cli/cli/command/network/create_test.go b/cli/cli/command/network/create_test.go new file mode 100644 index 00000000..6bfa7b65 --- /dev/null +++ b/cli/cli/command/network/create_test.go @@ -0,0 +1,174 @@ +package network + +import ( + "context" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworkCreateErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + networkCreateFunc func(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + expectedError string + }{ + { + expectedError: "exactly 1 argument", + }, + { + args: []string{"toto"}, + networkCreateFunc: func(ctx context.Context, name string, createBody types.NetworkCreate) (types.NetworkCreateResponse, error) { + return types.NetworkCreateResponse{}, errors.Errorf("error creating network") + }, + expectedError: "error creating network", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.0.0/24", + "gateway": "255.0.255.0/24", + "subnet": "10.1.2.0.30.50", + }, + expectedError: "invalid CIDR address: 10.1.2.0.30.50", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.0.0.30/24", + "gateway": "255.0.255.0/24", + "subnet": "255.0.0.0/24", + }, + expectedError: "invalid CIDR address: 255.255.0.0.30/24", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "gateway": "255.0.0.0/24", + }, + expectedError: "every ip-range or gateway must have a corresponding subnet", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.0.0.0/24", + }, + expectedError: "every ip-range or gateway must have a corresponding subnet", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.0.0.0/24", + "gateway": "255.0.0.0/24", + }, + expectedError: "every ip-range or gateway must have a corresponding subnet", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.0.0/24", + "gateway": "255.0.255.0/24", + "subnet": "10.1.2.0/23,10.1.3.248/30", + }, + expectedError: "multiple overlapping subnet configuration is not supported", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "192.168.1.0/24,192.168.1.200/24", + "gateway": "192.168.1.1,192.168.1.4", + "subnet": "192.168.2.0/24,192.168.1.250/24", + }, + expectedError: "cannot configure multiple ranges (192.168.1.200/24, 192.168.1.0/24) on the same subnet (192.168.1.250/24)", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "255.255.200.0/24,255.255.120.0/24", + "gateway": "255.0.255.0/24", + "subnet": "255.255.255.0/24,255.255.0.255/24", + }, + expectedError: "no matching subnet for range 255.255.200.0/24", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "192.168.1.0/24", + "gateway": "192.168.1.1,192.168.1.4", + "subnet": "192.168.2.0/24,192.168.1.250/24", + }, + expectedError: "cannot configure multiple gateways (192.168.1.4, 192.168.1.1) for the same subnet (192.168.1.250/24)", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "ip-range": "192.168.1.0/24", + "gateway": "192.168.4.1,192.168.5.4", + "subnet": "192.168.2.0/24,192.168.1.250/24", + }, + expectedError: "no matching subnet for gateway 192.168.4.1", + }, + { + args: []string{"toto"}, + flags: map[string]string{ + "gateway": "255.255.0.0/24", + "subnet": "255.255.0.0/24", + "aux-address": "255.255.0.30/24", + }, + expectedError: "no matching subnet for aux-address", + }, + } + + for _, tc := range testCases { + cmd := newCreateCommand( + test.NewFakeCli(&fakeClient{ + networkCreateFunc: tc.networkCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + assert.NilError(t, cmd.Flags().Set(key, value)) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + + } +} +func TestNetworkCreateWithFlags(t *testing.T) { + expectedDriver := "foo" + expectedOpts := []network.IPAMConfig{ + { + Subnet: "192.168.4.0/24", + IPRange: "192.168.4.0/24", + Gateway: "192.168.4.1/24", + AuxAddress: map[string]string{}, + }, + } + cli := test.NewFakeCli(&fakeClient{ + networkCreateFunc: func(ctx context.Context, name string, createBody types.NetworkCreate) (types.NetworkCreateResponse, error) { + assert.Check(t, is.Equal(expectedDriver, createBody.Driver), "not expected driver error") + assert.Check(t, is.DeepEqual(expectedOpts, createBody.IPAM.Config), "not expected driver error") + return types.NetworkCreateResponse{ + ID: name, + }, nil + }, + }) + args := []string{"banana"} + cmd := newCreateCommand(cli) + + cmd.SetArgs(args) + cmd.Flags().Set("driver", "foo") + cmd.Flags().Set("ip-range", "192.168.4.0/24") + cmd.Flags().Set("gateway", "192.168.4.1/24") + cmd.Flags().Set("subnet", "192.168.4.0/24") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("banana", strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/network/disconnect.go b/cli/cli/command/network/disconnect.go new file mode 100644 index 00000000..18bf4c7b --- /dev/null +++ b/cli/cli/command/network/disconnect.go @@ -0,0 +1,41 @@ +package network + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type disconnectOptions struct { + network string + container string + force bool +} + +func newDisconnectCommand(dockerCli command.Cli) *cobra.Command { + opts := disconnectOptions{} + + cmd := &cobra.Command{ + Use: "disconnect [OPTIONS] NETWORK CONTAINER", + Short: "Disconnect a container from a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runDisconnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") + + return cmd +} + +func runDisconnect(dockerCli command.Cli, opts disconnectOptions) error { + client := dockerCli.Client() + + return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) +} diff --git a/cli/cli/command/network/disconnect_test.go b/cli/cli/command/network/disconnect_test.go new file mode 100644 index 00000000..9a552570 --- /dev/null +++ b/cli/cli/command/network/disconnect_test.go @@ -0,0 +1,41 @@ +package network + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNetworkDisconnectErrors(t *testing.T) { + testCases := []struct { + args []string + networkDisconnectFunc func(ctx context.Context, networkID, container string, force bool) error + expectedError string + }{ + { + expectedError: "requires exactly 2 arguments", + }, + { + args: []string{"toto", "titi"}, + networkDisconnectFunc: func(ctx context.Context, networkID, container string, force bool) error { + return errors.Errorf("error disconnecting network") + }, + expectedError: "error disconnecting network", + }, + } + + for _, tc := range testCases { + cmd := newDisconnectCommand( + test.NewFakeCli(&fakeClient{ + networkDisconnectFunc: tc.networkDisconnectFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} diff --git a/cli/cli/command/network/formatter.go b/cli/cli/command/network/formatter.go new file mode 100644 index 00000000..ed8ac257 --- /dev/null +++ b/cli/cli/command/network/formatter.go @@ -0,0 +1,120 @@ +package network + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" + + networkIDHeader = "NETWORK ID" + ipv6Header = "IPV6" + internalHeader = "INTERNAL" +) + +// NewFormat returns a Format for rendering using a network Context +func NewFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultNetworkTableFormat + case formatter.RawFormatKey: + if quiet { + return `network_id: {{.ID}}` + } + return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` + } + return formatter.Format(source) +} + +// FormatWrite writes the context +func FormatWrite(ctx formatter.Context, networks []types.NetworkResource) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, network := range networks { + networkCtx := &networkContext{trunc: ctx.Trunc, n: network} + if err := format(networkCtx); err != nil { + return err + } + } + return nil + } + networkCtx := networkContext{} + networkCtx.Header = formatter.SubHeaderContext{ + "ID": networkIDHeader, + "Name": formatter.NameHeader, + "Driver": formatter.DriverHeader, + "Scope": formatter.ScopeHeader, + "IPv6": ipv6Header, + "Internal": internalHeader, + "Labels": formatter.LabelsHeader, + "CreatedAt": formatter.CreatedAtHeader, + } + return ctx.Write(&networkCtx, render) +} + +type networkContext struct { + formatter.HeaderContext + trunc bool + n types.NetworkResource +} + +func (c *networkContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *networkContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.n.ID) + } + return c.n.ID +} + +func (c *networkContext) Name() string { + return c.n.Name +} + +func (c *networkContext) Driver() string { + return c.n.Driver +} + +func (c *networkContext) Scope() string { + return c.n.Scope +} + +func (c *networkContext) IPv6() string { + return fmt.Sprintf("%v", c.n.EnableIPv6) +} + +func (c *networkContext) Internal() string { + return fmt.Sprintf("%v", c.n.Internal) +} + +func (c *networkContext) Labels() string { + if c.n.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.n.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *networkContext) Label(name string) string { + if c.n.Labels == nil { + return "" + } + return c.n.Labels[name] +} + +func (c *networkContext) CreatedAt() string { + return c.n.Created.String() +} diff --git a/cli/cli/command/network/formatter_test.go b/cli/cli/command/network/formatter_test.go new file mode 100644 index 00000000..4fd86d8e --- /dev/null +++ b/cli/cli/command/network/formatter_test.go @@ -0,0 +1,215 @@ +package network + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworkContext(t *testing.T) { + networkID := stringid.GenerateRandomID() + + var ctx networkContext + cases := []struct { + networkCtx networkContext + expValue string + call func() string + }{ + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: false, + }, networkID, ctx.ID}, + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: true, + }, stringid.TruncateID(networkID), ctx.ID}, + {networkContext{ + n: types.NetworkResource{Name: "network_name"}, + }, "network_name", ctx.Name}, + {networkContext{ + n: types.NetworkResource{Driver: "driver_name"}, + }, "driver_name", ctx.Driver}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: true}, + }, "true", ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: false}, + }, "false", ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{Internal: true}, + }, "true", ctx.Internal}, + {networkContext{ + n: types.NetworkResource{Internal: false}, + }, "false", ctx.Internal}, + {networkContext{ + n: types.NetworkResource{}, + }, "", ctx.Labels}, + {networkContext{ + n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", ctx.Labels}, + } + + for _, c := range cases { + ctx = c.networkCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestNetworkContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: NewFormat("table", false)}, + `NETWORK ID NAME DRIVER SCOPE +networkID1 foobar_baz foo local +networkID2 foobar_bar bar local +`, + }, + { + formatter.Context{Format: NewFormat("table", true)}, + `networkID1 +networkID2 +`, + }, + { + formatter.Context{Format: NewFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + formatter.Context{Format: NewFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + formatter.Context{Format: NewFormat("raw", false)}, + `network_id: networkID1 +name: foobar_baz +driver: foo +scope: local + +network_id: networkID2 +name: foobar_bar +driver: bar +scope: local + +`, + }, + { + formatter.Context{Format: NewFormat("raw", true)}, + `network_id: networkID1 +network_id: networkID2 +`, + }, + // Custom Format + { + formatter.Context{Format: NewFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + // Custom Format with CreatedAt + { + formatter.Context{Format: NewFormat("{{.Name}} {{.CreatedAt}}", false)}, + `foobar_baz 2016-01-01 00:00:00 +0000 UTC +foobar_bar 2017-01-01 00:00:00 +0000 UTC +`, + }, + } + + timestamp1, _ := time.Parse("2006-01-02", "2016-01-01") + timestamp2, _ := time.Parse("2006-01-02", "2017-01-01") + + for _, testcase := range cases { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local", Created: timestamp1}, + {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local", Created: timestamp2}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := FormatWrite(testcase.context, networks) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestNetworkContextWriteJSON(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"}, + {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"}, + } + + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} + +func TestNetworkContextWriteJSONField(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .ID}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(networks[i].ID, s), msg) + } +} diff --git a/cli/cli/command/network/inspect.go b/cli/cli/command/network/inspect.go new file mode 100644 index 00000000..3d7543d9 --- /dev/null +++ b/cli/cli/command/network/inspect.go @@ -0,0 +1,48 @@ +package network + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string + verbose bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NETWORK [NETWORK...]", + Short: "Display detailed information on one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVarP(&opts.verbose, "verbose", "v", false, "Verbose output for diagnostics") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getNetFunc := func(name string) (interface{}, []byte, error) { + return client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{Verbose: opts.verbose}) + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) +} diff --git a/cli/cli/command/network/list.go b/cli/cli/command/network/list.go new file mode 100644 index 00000000..b2700552 --- /dev/null +++ b/cli/cli/command/network/list.go @@ -0,0 +1,72 @@ +package network + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display network IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate the output") + flags.StringVar(&options.format, "format", "", "Pretty-print networks using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + listOptions := types.NetworkListOptions{Filters: options.filter.Value()} + networkResources, err := client.NetworkList(context.Background(), listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().NetworksFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Slice(networkResources, func(i, j int) bool { + return sortorder.NaturalLess(networkResources[i].Name, networkResources[j].Name) + }) + + networksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(format, options.quiet), + Trunc: !options.noTrunc, + } + return FormatWrite(networksCtx, networkResources) +} diff --git a/cli/cli/command/network/list_test.go b/cli/cli/command/network/list_test.go new file mode 100644 index 00000000..2f76e517 --- /dev/null +++ b/cli/cli/command/network/list_test.go @@ -0,0 +1,94 @@ +package network + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestNetworkListErrors(t *testing.T) { + testCases := []struct { + networkListFunc func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + expectedError string + }{ + { + networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + return []types.NetworkResource{}, errors.Errorf("error creating network") + }, + expectedError: "error creating network", + }, + } + + for _, tc := range testCases { + cmd := newListCommand( + test.NewFakeCli(&fakeClient{ + networkListFunc: tc.networkListFunc, + }), + ) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNetworkList(t *testing.T) { + testCases := []struct { + doc string + networkListFunc func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + flags map[string]string + golden string + }{ + { + doc: "network list with flags", + flags: map[string]string{ + "filter": "image.name=ubuntu", + }, + golden: "network-list.golden", + networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + expectedOpts := types.NetworkListOptions{ + Filters: filters.NewArgs(filters.Arg("image.name", "ubuntu")), + } + assert.Check(t, is.DeepEqual(expectedOpts, options, cmp.AllowUnexported(filters.Args{}))) + + return []types.NetworkResource{*NetworkResource(NetworkResourceID("123454321"), + NetworkResourceName("network_1"), + NetworkResourceDriver("09.7.01"), + NetworkResourceScope("global"))}, nil + }, + }, + { + doc: "network list sort order", + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "network-list-sort.golden", + networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + return []types.NetworkResource{ + *NetworkResource(NetworkResourceName("network-2-foo")), + *NetworkResource(NetworkResourceName("network-1-foo")), + *NetworkResource(NetworkResourceName("network-10-foo"))}, nil + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{networkListFunc: tc.networkListFunc}) + cmd := newListCommand(cli) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + }) + } +} diff --git a/cli/cli/command/network/prune.go b/cli/cli/command/network/prune.go new file mode 100644 index 00000000..462d3616 --- /dev/null +++ b/cli/cli/command/network/prune.go @@ -0,0 +1,76 @@ +package network + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for networks +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const warning = `WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) (output string, err error) { + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return "", nil + } + + report, err := dockerCli.Client().NetworksPrune(context.Background(), pruneFilters) + if err != nil { + return "", err + } + + if len(report.NetworksDeleted) > 0 { + output = "Deleted Networks:\n" + for _, id := range report.NetworksDeleted { + output += id + "\n" + } + } + + return output, nil +} + +// RunPrune calls the Network Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter}) + return 0, output, err +} diff --git a/cli/cli/command/network/remove.go b/cli/cli/command/network/remove.go new file mode 100644 index 00000000..66f48197 --- /dev/null +++ b/cli/cli/command/network/remove.go @@ -0,0 +1,53 @@ +package network + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "rm NETWORK [NETWORK...]", + Aliases: []string{"remove"}, + Short: "Remove one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } +} + +const ingressWarning = "WARNING! Before removing the routing-mesh network, " + + "make sure all the nodes in your swarm run the same docker engine version. " + + "Otherwise, removal may not be effective and functionality of newly create " + + "ingress networks will be impaired.\nAre you sure you want to continue?" + +func runRemove(dockerCli command.Cli, networks []string) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range networks { + if nw, _, err := client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{}); err == nil && + nw.Ingress && + !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), ingressWarning) { + continue + } + if err := client.NetworkRemove(ctx, name); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/cli/cli/command/network/testdata/network-list-sort.golden b/cli/cli/command/network/testdata/network-list-sort.golden new file mode 100644 index 00000000..165f9395 --- /dev/null +++ b/cli/cli/command/network/testdata/network-list-sort.golden @@ -0,0 +1,3 @@ +network-1-foo +network-2-foo +network-10-foo diff --git a/cli/cli/command/network/testdata/network-list.golden b/cli/cli/command/network/testdata/network-list.golden new file mode 100644 index 00000000..61d982e0 --- /dev/null +++ b/cli/cli/command/network/testdata/network-list.golden @@ -0,0 +1,2 @@ +NETWORK ID NAME DRIVER SCOPE +123454321 network_1 09.7.01 global diff --git a/cli/cli/command/node/client_test.go b/cli/cli/command/node/client_test.go new file mode 100644 index 00000000..3948c6a8 --- /dev/null +++ b/cli/cli/command/node/client_test.go @@ -0,0 +1,77 @@ +package node + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + infoFunc func() (types.Info, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeListFunc func() ([]swarm.Node, error) + nodeRemoveFunc func() error + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + taskInspectFunc func(taskID string) (swarm.Task, []byte, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + serviceInspectFunc func(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectFunc != nil { + return cli.nodeInspectFunc() + } + return swarm.Node{}, []byte{}, nil +} + +func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + if cli.nodeListFunc != nil { + return cli.nodeListFunc() + } + return []swarm.Node{}, nil +} + +func (cli *fakeClient) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + if cli.nodeRemoveFunc != nil { + return cli.nodeRemoveFunc() + } + return nil +} + +func (cli *fakeClient) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if cli.nodeUpdateFunc != nil { + return cli.nodeUpdateFunc(nodeID, version, node) + } + return nil +} + +func (cli *fakeClient) Info(ctx context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func (cli *fakeClient) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if cli.taskInspectFunc != nil { + return cli.taskInspectFunc(taskID) + } + return swarm.Task{}, []byte{}, nil +} + +func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if cli.taskListFunc != nil { + return cli.taskListFunc(options) + } + return []swarm.Task{}, nil +} + +func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if cli.serviceInspectFunc != nil { + return cli.serviceInspectFunc(ctx, serviceID, opts) + } + return swarm.Service{}, []byte{}, nil +} diff --git a/cli/cli/command/node/cmd.go b/cli/cli/command/node/cmd.go new file mode 100644 index 00000000..f96c9a6b --- /dev/null +++ b/cli/cli/command/node/cmd.go @@ -0,0 +1,60 @@ +package node + +import ( + "context" + "errors" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +// NewNodeCommand returns a cobra command for `node` subcommands +func NewNodeCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Manage Swarm nodes", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.24", + "swarm": "", + }, + } + cmd.AddCommand( + newDemoteCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newPromoteCommand(dockerCli), + newRemoveCommand(dockerCli), + newPsCommand(dockerCli), + newUpdateCommand(dockerCli), + ) + return cmd +} + +// Reference returns the reference of a node. The special value "self" for a node +// reference is mapped to the current node, hence the node ID is retrieved using +// the `/info` endpoint. +func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { + if ref == "self" { + info, err := client.Info(ctx) + if err != nil { + return "", err + } + if info.Swarm.NodeID == "" { + // If there's no node ID in /info, the node probably + // isn't a manager. Call a swarm-specific endpoint to + // get a more specific error message. + _, err = client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return "", err + } + return "", errors.New("node ID not found in /info") + } + return info.Swarm.NodeID, nil + } + return ref, nil +} diff --git a/cli/cli/command/node/demote.go b/cli/cli/command/node/demote.go new file mode 100644 index 00000000..5250dfc0 --- /dev/null +++ b/cli/cli/command/node/demote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" +) + +func newDemoteCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "demote NODE [NODE...]", + Short: "Demote one or more nodes from manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDemote(dockerCli, args) + }, + } +} + +func runDemote(dockerCli command.Cli, nodes []string) error { + demote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleWorker { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleWorker + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, demote, success) +} diff --git a/cli/cli/command/node/demote_test.go b/cli/cli/command/node/demote_test.go new file mode 100644 index 00000000..3f18d63d --- /dev/null +++ b/cli/cli/command/node/demote_test.go @@ -0,0 +1,84 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodeDemoteErrors(t *testing.T) { + testCases := []struct { + args []string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + return errors.Errorf("error updating the node") + }, + expectedError: "error updating the node", + }, + } + for _, tc := range testCases { + cmd := newDemoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeDemoteNoChange(t *testing.T) { + cmd := newDemoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleWorker { + return errors.Errorf("expected role worker, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID"}) + assert.NilError(t, cmd.Execute()) +} + +func TestNodeDemoteMultipleNode(t *testing.T) { + cmd := newDemoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleWorker { + return errors.Errorf("expected role worker, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID1", "nodeID2"}) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/node/formatter.go b/cli/cli/command/node/formatter.go new file mode 100644 index 00000000..9cd9d015 --- /dev/null +++ b/cli/cli/command/node/formatter.go @@ -0,0 +1,334 @@ +package node + +import ( + "encoding/base64" + "fmt" + "reflect" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}\t{{.EngineVersion}}" + nodeInspectPrettyTemplate formatter.Format = `ID: {{.ID}} +{{- if .Name }} +Name: {{.Name}} +{{- end }} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Hostname: {{.Hostname}} +Joined at: {{.CreatedAt}} +Status: + State: {{.StatusState}} + {{- if .HasStatusMessage}} + Message: {{.StatusMessage}} + {{- end}} + Availability: {{.SpecAvailability}} + {{- if .Status.Addr}} + Address: {{.StatusAddr}} + {{- end}} +{{- if .HasManagerStatus}} +Manager Status: + Address: {{.ManagerStatusAddr}} + Raft Status: {{.ManagerStatusReachability}} + {{- if .IsManagerStatusLeader}} + Leader: Yes + {{- else}} + Leader: No + {{- end}} +{{- end}} +Platform: + Operating System: {{.PlatformOS}} + Architecture: {{.PlatformArchitecture}} +Resources: + CPUs: {{.ResourceNanoCPUs}} + Memory: {{.ResourceMemory}} +{{- if .HasEnginePlugins}} +Plugins: +{{- range $k, $v := .EnginePlugins }} + {{ $k }}:{{if $v }} {{ $v }}{{ end }} +{{- end }} +{{- end }} +Engine Version: {{.EngineVersion}} +{{- if .EngineLabels}} +Engine Labels: +{{- range $k, $v := .EngineLabels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{- end }} +{{- if .HasTLSInfo}} +TLS Info: + TrustRoot: +{{.TLSInfoTrustRoot}} + Issuer Subject: {{.TLSInfoCertIssuerSubject}} + Issuer Public Key: {{.TLSInfoCertIssuerPublicKey}} +{{- end}}` + nodeIDHeader = "ID" + selfHeader = "" + hostnameHeader = "HOSTNAME" + availabilityHeader = "AVAILABILITY" + managerStatusHeader = "MANAGER STATUS" + engineVersionHeader = "ENGINE VERSION" + tlsStatusHeader = "TLS STATUS" +) + +// NewFormat returns a Format for rendering using a node Context +func NewFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.PrettyFormatKey: + return nodeInspectPrettyTemplate + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultNodeTableFormat + case formatter.RawFormatKey: + if quiet { + return `node_id: {{.ID}}` + } + return `node_id: {{.ID}}\nhostname: {{.Hostname}}\nstatus: {{.Status}}\navailability: {{.Availability}}\nmanager_status: {{.ManagerStatus}}\n` + } + return formatter.Format(source) +} + +// FormatWrite writes the context +func FormatWrite(ctx formatter.Context, nodes []swarm.Node, info types.Info) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, node := range nodes { + nodeCtx := &nodeContext{n: node, info: info} + if err := format(nodeCtx); err != nil { + return err + } + } + return nil + } + nodeCtx := nodeContext{} + nodeCtx.Header = formatter.SubHeaderContext{ + "ID": nodeIDHeader, + "Self": selfHeader, + "Hostname": hostnameHeader, + "Status": formatter.StatusHeader, + "Availability": availabilityHeader, + "ManagerStatus": managerStatusHeader, + "EngineVersion": engineVersionHeader, + "TLSStatus": tlsStatusHeader, + } + return ctx.Write(&nodeCtx, render) +} + +type nodeContext struct { + formatter.HeaderContext + n swarm.Node + info types.Info +} + +func (c *nodeContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *nodeContext) ID() string { + return c.n.ID +} + +func (c *nodeContext) Self() bool { + return c.n.ID == c.info.Swarm.NodeID +} + +func (c *nodeContext) Hostname() string { + return c.n.Description.Hostname +} + +func (c *nodeContext) Status() string { + return command.PrettyPrint(string(c.n.Status.State)) +} + +func (c *nodeContext) Availability() string { + return command.PrettyPrint(string(c.n.Spec.Availability)) +} + +func (c *nodeContext) ManagerStatus() string { + reachability := "" + if c.n.ManagerStatus != nil { + if c.n.ManagerStatus.Leader { + reachability = "Leader" + } else { + reachability = string(c.n.ManagerStatus.Reachability) + } + } + return command.PrettyPrint(reachability) +} + +func (c *nodeContext) TLSStatus() string { + if c.info.Swarm.Cluster == nil || reflect.DeepEqual(c.info.Swarm.Cluster.TLSInfo, swarm.TLSInfo{}) || reflect.DeepEqual(c.n.Description.TLSInfo, swarm.TLSInfo{}) { + return "Unknown" + } + if reflect.DeepEqual(c.n.Description.TLSInfo, c.info.Swarm.Cluster.TLSInfo) { + return "Ready" + } + return "Needs Rotation" +} + +func (c *nodeContext) EngineVersion() string { + return c.n.Description.Engine.EngineVersion +} + +// InspectFormatWrite renders the context for a list of nodes +func InspectFormatWrite(ctx formatter.Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != nodeInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext formatter.SubContext) error) error { + for _, ref := range refs { + nodeI, _, err := getRef(ref) + if err != nil { + return err + } + node, ok := nodeI.(swarm.Node) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&nodeInspectContext{Node: node}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&nodeInspectContext{}, render) +} + +type nodeInspectContext struct { + swarm.Node + formatter.SubContext +} + +func (ctx *nodeInspectContext) ID() string { + return ctx.Node.ID +} + +func (ctx *nodeInspectContext) Name() string { + return ctx.Node.Spec.Name +} + +func (ctx *nodeInspectContext) Labels() map[string]string { + return ctx.Node.Spec.Labels +} + +func (ctx *nodeInspectContext) Hostname() string { + return ctx.Node.Description.Hostname +} + +func (ctx *nodeInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Node.CreatedAt) +} + +func (ctx *nodeInspectContext) StatusState() string { + return command.PrettyPrint(ctx.Node.Status.State) +} + +func (ctx *nodeInspectContext) HasStatusMessage() bool { + return ctx.Node.Status.Message != "" +} + +func (ctx *nodeInspectContext) StatusMessage() string { + return command.PrettyPrint(ctx.Node.Status.Message) +} + +func (ctx *nodeInspectContext) SpecAvailability() string { + return command.PrettyPrint(ctx.Node.Spec.Availability) +} + +func (ctx *nodeInspectContext) HasStatusAddr() bool { + return ctx.Node.Status.Addr != "" +} + +func (ctx *nodeInspectContext) StatusAddr() string { + return ctx.Node.Status.Addr +} + +func (ctx *nodeInspectContext) HasManagerStatus() bool { + return ctx.Node.ManagerStatus != nil +} + +func (ctx *nodeInspectContext) ManagerStatusAddr() string { + return ctx.Node.ManagerStatus.Addr +} + +func (ctx *nodeInspectContext) ManagerStatusReachability() string { + return command.PrettyPrint(ctx.Node.ManagerStatus.Reachability) +} + +func (ctx *nodeInspectContext) IsManagerStatusLeader() bool { + return ctx.Node.ManagerStatus.Leader +} + +func (ctx *nodeInspectContext) PlatformOS() string { + return ctx.Node.Description.Platform.OS +} + +func (ctx *nodeInspectContext) PlatformArchitecture() string { + return ctx.Node.Description.Platform.Architecture +} + +func (ctx *nodeInspectContext) ResourceNanoCPUs() int { + if ctx.Node.Description.Resources.NanoCPUs == 0 { + return int(0) + } + return int(ctx.Node.Description.Resources.NanoCPUs) / 1e9 +} + +func (ctx *nodeInspectContext) ResourceMemory() string { + if ctx.Node.Description.Resources.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Node.Description.Resources.MemoryBytes)) +} + +func (ctx *nodeInspectContext) HasEnginePlugins() bool { + return len(ctx.Node.Description.Engine.Plugins) > 0 +} + +func (ctx *nodeInspectContext) EnginePlugins() map[string]string { + pluginMap := map[string][]string{} + for _, p := range ctx.Node.Description.Engine.Plugins { + pluginMap[p.Type] = append(pluginMap[p.Type], p.Name) + } + + pluginNamesByType := map[string]string{} + for k, v := range pluginMap { + pluginNamesByType[k] = strings.Join(v, ", ") + } + return pluginNamesByType +} + +func (ctx *nodeInspectContext) EngineLabels() map[string]string { + return ctx.Node.Description.Engine.Labels +} + +func (ctx *nodeInspectContext) EngineVersion() string { + return ctx.Node.Description.Engine.EngineVersion +} + +func (ctx *nodeInspectContext) HasTLSInfo() bool { + tlsInfo := ctx.Node.Description.TLSInfo + return !reflect.DeepEqual(tlsInfo, swarm.TLSInfo{}) +} + +func (ctx *nodeInspectContext) TLSInfoTrustRoot() string { + return ctx.Node.Description.TLSInfo.TrustRoot +} + +func (ctx *nodeInspectContext) TLSInfoCertIssuerPublicKey() string { + return base64.StdEncoding.EncodeToString(ctx.Node.Description.TLSInfo.CertIssuerPublicKey) +} + +func (ctx *nodeInspectContext) TLSInfoCertIssuerSubject() string { + return base64.StdEncoding.EncodeToString(ctx.Node.Description.TLSInfo.CertIssuerSubject) +} diff --git a/cli/cli/command/node/formatter_test.go b/cli/cli/command/node/formatter_test.go new file mode 100644 index 00000000..65e13c33 --- /dev/null +++ b/cli/cli/command/node/formatter_test.go @@ -0,0 +1,350 @@ +package node + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNodeContext(t *testing.T) { + nodeID := stringid.GenerateRandomID() + + var ctx nodeContext + cases := []struct { + nodeCtx nodeContext + expValue string + call func() string + }{ + {nodeContext{ + n: swarm.Node{ID: nodeID}, + }, nodeID, ctx.ID}, + {nodeContext{ + n: swarm.Node{Description: swarm.NodeDescription{Hostname: "node_hostname"}}, + }, "node_hostname", ctx.Hostname}, + {nodeContext{ + n: swarm.Node{Status: swarm.NodeStatus{State: swarm.NodeState("foo")}}, + }, "Foo", ctx.Status}, + {nodeContext{ + n: swarm.Node{Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}}, + }, "Drain", ctx.Availability}, + {nodeContext{ + n: swarm.Node{ManagerStatus: &swarm.ManagerStatus{Leader: true}}, + }, "Leader", ctx.ManagerStatus}, + } + + for _, c := range cases { + ctx = c.nodeCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestNodeContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + clusterInfo swarm.ClusterInfo + }{ + + // Errors + { + context: formatter.Context{Format: "{{InvalidFunction}}"}, + expected: `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: formatter.Context{Format: "{{nil}}"}, + expected: `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + // Table format + { + context: formatter.Context{Format: NewFormat("table", false)}, + expected: `ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION +nodeID1 foobar_baz Foo Drain Leader 18.03.0-ce +nodeID2 foobar_bar Bar Active Reachable 1.2.3 +nodeID3 foobar_boo Boo Active ` + "\n", // (to preserve whitespace) + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: formatter.Context{Format: NewFormat("table", true)}, + expected: `nodeID1 +nodeID2 +nodeID3 +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: formatter.Context{Format: NewFormat("table {{.Hostname}}", false)}, + expected: `HOSTNAME +foobar_baz +foobar_bar +foobar_boo +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: formatter.Context{Format: NewFormat("table {{.Hostname}}", true)}, + expected: `HOSTNAME +foobar_baz +foobar_bar +foobar_boo +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: formatter.Context{Format: NewFormat("table {{.ID}}\t{{.Hostname}}\t{{.TLSStatus}}", false)}, + expected: `ID HOSTNAME TLS STATUS +nodeID1 foobar_baz Needs Rotation +nodeID2 foobar_bar Ready +nodeID3 foobar_boo Unknown +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { // no cluster TLS status info, TLS status for all nodes is unknown + context: formatter.Context{Format: NewFormat("table {{.ID}}\t{{.Hostname}}\t{{.TLSStatus}}", false)}, + expected: `ID HOSTNAME TLS STATUS +nodeID1 foobar_baz Unknown +nodeID2 foobar_bar Unknown +nodeID3 foobar_boo Unknown +`, + clusterInfo: swarm.ClusterInfo{}, + }, + // Raw Format + { + context: formatter.Context{Format: NewFormat("raw", false)}, + expected: `node_id: nodeID1 +hostname: foobar_baz +status: Foo +availability: Drain +manager_status: Leader + +node_id: nodeID2 +hostname: foobar_bar +status: Bar +availability: Active +manager_status: Reachable + +node_id: nodeID3 +hostname: foobar_boo +status: Boo +availability: Active +manager_status: ` + "\n\n", // to preserve whitespace + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + { + context: formatter.Context{Format: NewFormat("raw", true)}, + expected: `node_id: nodeID1 +node_id: nodeID2 +node_id: nodeID3 +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + // Custom Format + { + context: formatter.Context{Format: NewFormat("{{.Hostname}} {{.TLSStatus}}", false)}, + expected: `foobar_baz Needs Rotation +foobar_bar Ready +foobar_boo Unknown +`, + clusterInfo: swarm.ClusterInfo{TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}}, + }, + } + + for _, testcase := range cases { + nodes := []swarm.Node{ + { + ID: "nodeID1", + Description: swarm.NodeDescription{ + Hostname: "foobar_baz", + TLSInfo: swarm.TLSInfo{TrustRoot: "no"}, + Engine: swarm.EngineDescription{EngineVersion: "18.03.0-ce"}, + }, + Status: swarm.NodeStatus{State: swarm.NodeState("foo")}, + Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}, + ManagerStatus: &swarm.ManagerStatus{Leader: true}, + }, + { + ID: "nodeID2", + Description: swarm.NodeDescription{ + Hostname: "foobar_bar", + TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}, + Engine: swarm.EngineDescription{EngineVersion: "1.2.3"}, + }, + Status: swarm.NodeStatus{State: swarm.NodeState("bar")}, + Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("active")}, + ManagerStatus: &swarm.ManagerStatus{ + Leader: false, + Reachability: swarm.Reachability("Reachable"), + }, + }, + { + ID: "nodeID3", + Description: swarm.NodeDescription{Hostname: "foobar_boo"}, + Status: swarm.NodeStatus{State: swarm.NodeState("boo")}, + Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("active")}, + }, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := FormatWrite(testcase.context, nodes, types.Info{Swarm: swarm.Info{Cluster: &testcase.clusterInfo}}) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestNodeContextWriteJSON(t *testing.T) { + cases := []struct { + expected []map[string]interface{} + info types.Info + }{ + { + expected: []map[string]interface{}{ + {"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "1.2.3"}, + {"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": ""}, + {"Availability": "", "Hostname": "foobar_boo", "ID": "nodeID3", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "18.03.0-ce"}, + }, + info: types.Info{}, + }, + { + expected: []map[string]interface{}{ + {"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Ready", "EngineVersion": "1.2.3"}, + {"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Needs Rotation", "EngineVersion": ""}, + {"Availability": "", "Hostname": "foobar_boo", "ID": "nodeID3", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "18.03.0-ce"}, + }, + info: types.Info{ + Swarm: swarm.Info{ + Cluster: &swarm.ClusterInfo{ + TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}, + RootRotationInProgress: true, + }, + }, + }, + }, + } + + for _, testcase := range cases { + nodes := []swarm.Node{ + {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz", TLSInfo: swarm.TLSInfo{TrustRoot: "hi"}, Engine: swarm.EngineDescription{EngineVersion: "1.2.3"}}}, + {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar", TLSInfo: swarm.TLSInfo{TrustRoot: "no"}}}, + {ID: "nodeID3", Description: swarm.NodeDescription{Hostname: "foobar_boo", Engine: swarm.EngineDescription{EngineVersion: "18.03.0-ce"}}}, + } + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .}}", Output: out}, nodes, testcase.info) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(testcase.expected[i], m), msg) + } + } +} + +func TestNodeContextWriteJSONField(t *testing.T) { + nodes := []swarm.Node{ + {ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}}, + {ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}}, + } + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .ID}}", Output: out}, nodes, types.Info{}) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(nodes[i].ID, s), msg) + } +} + +func TestNodeInspectWriteContext(t *testing.T) { + node := swarm.Node{ + ID: "nodeID1", + Description: swarm.NodeDescription{ + Hostname: "foobar_baz", + TLSInfo: swarm.TLSInfo{ + TrustRoot: "-----BEGIN CERTIFICATE-----\ndata\n-----END CERTIFICATE-----\n", + CertIssuerPublicKey: []byte("pubKey"), + CertIssuerSubject: []byte("subject"), + }, + Platform: swarm.Platform{ + OS: "linux", + Architecture: "amd64", + }, + Resources: swarm.Resources{ + MemoryBytes: 1, + }, + Engine: swarm.EngineDescription{ + EngineVersion: "0.1.1", + }, + }, + Status: swarm.NodeStatus{ + State: swarm.NodeState("ready"), + Addr: "1.1.1.1", + }, + Spec: swarm.NodeSpec{ + Availability: swarm.NodeAvailability("drain"), + Role: swarm.NodeRole("manager"), + }, + } + out := bytes.NewBufferString("") + context := formatter.Context{ + Format: NewFormat("pretty", false), + Output: out, + } + err := InspectFormatWrite(context, []string{"nodeID1"}, func(string) (interface{}, []byte, error) { + return node, nil, nil + }) + if err != nil { + t.Fatal(err) + } + expected := `ID: nodeID1 +Hostname: foobar_baz +Joined at: 0001-01-01 00:00:00 +0000 utc +Status: + State: Ready + Availability: Drain + Address: 1.1.1.1 +Platform: + Operating System: linux + Architecture: amd64 +Resources: + CPUs: 0 + Memory: 1B +Engine Version: 0.1.1 +TLS Info: + TrustRoot: +-----BEGIN CERTIFICATE----- +data +-----END CERTIFICATE----- + + Issuer Subject: c3ViamVjdA== + Issuer Public Key: cHViS2V5 +` + assert.Check(t, is.Equal(expected, out.String())) +} diff --git a/cli/cli/command/node/inspect.go b/cli/cli/command/node/inspect.go new file mode 100644 index 00000000..9c68f43b --- /dev/null +++ b/cli/cli/command/node/inspect.go @@ -0,0 +1,72 @@ +package node + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + nodeIds []string + format string + pretty bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] self|NODE [NODE...]", + Short: "Display detailed information on one or more nodes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIds = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + nodeRef, err := Reference(ctx, client, ref) + if err != nil { + return nil, nil, err + } + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + return node, nil, err + } + f := opts.format + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + nodeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(f, false), + } + + if err := InspectFormatWrite(nodeCtx, opts.nodeIds, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/cli/cli/command/node/inspect_test.go b/cli/cli/command/node/inspect_test.go new file mode 100644 index 00000000..de343b0f --- /dev/null +++ b/cli/cli/command/node/inspect_test.go @@ -0,0 +1,118 @@ +package node + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestNodeInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + nodeInspectFunc func() (swarm.Node, []byte, error) + infoFunc func() (types.Info, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"self"}, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"self"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + infoFunc: func() (types.Info, error) { + return types.Info{Swarm: swarm.Info{NodeID: "abc"}}, nil + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"self"}, + flags: map[string]string{ + "pretty": "true", + }, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + infoFunc: tc.infoFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeInspectPretty(t *testing.T) { + testCases := []struct { + name string + nodeInspectFunc func() (swarm.Node, []byte, error) + }{ + { + name: "simple", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(NodeLabels(map[string]string{ + "lbl1": "value1", + })), []byte{}, nil + }, + }, + { + name: "manager", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + }, + { + name: "manager-leader", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager(Leader())), []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newInspectCommand(cli) + cmd.SetArgs([]string{"nodeID"}) + cmd.Flags().Set("pretty", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("node-inspect-pretty.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/node/list.go b/cli/cli/command/node/list.go new file mode 100644 index 00000000..fdea329d --- /dev/null +++ b/cli/cli/command/node/list.go @@ -0,0 +1,78 @@ +package node + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List nodes in the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&options.format, "format", "", "Pretty-print nodes using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + nodes, err := client.NodeList( + ctx, + types.NodeListOptions{Filters: options.filter.Value()}) + if err != nil { + return err + } + + info := types.Info{} + if len(nodes) > 0 && !options.quiet { + // only non-empty nodes and not quiet, should we call /info api + info, err = client.Info(ctx) + if err != nil { + return err + } + } + + format := options.format + if len(format) == 0 { + format = formatter.TableFormatKey + if len(dockerCli.ConfigFile().NodesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().NodesFormat + } + } + + nodesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(format, options.quiet), + } + sort.Slice(nodes, func(i, j int) bool { + return sortorder.NaturalLess(nodes[i].Description.Hostname, nodes[j].Description.Hostname) + }) + return FormatWrite(nodesCtx, nodes, info) +} diff --git a/cli/cli/command/node/list_test.go b/cli/cli/command/node/list_test.go new file mode 100644 index 00000000..5dc11c96 --- /dev/null +++ b/cli/cli/command/node/list_test.go @@ -0,0 +1,141 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodeListErrorOnAPIFailure(t *testing.T) { + testCases := []struct { + nodeListFunc func() ([]swarm.Node, error) + infoFunc func() (types.Info, error) + expectedError string + }{ + { + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{}, errors.Errorf("error listing nodes") + }, + expectedError: "error listing nodes", + }, + { + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + { + ID: "nodeID", + }, + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: tc.nodeListFunc, + infoFunc: tc.infoFunc, + }) + cmd := newListCommand(cli) + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeList(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1"), Hostname("node-2-foo"), Manager(Leader()), EngineVersion(".")), + *Node(NodeID("nodeID2"), Hostname("node-10-foo"), Manager(), EngineVersion("18.03.0-ce")), + *Node(NodeID("nodeID3"), Hostname("node-1-foo")), + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID1", + }, + }, nil + }, + }) + + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "node-list-sort.golden") +} + +func TestNodeListQuietShouldOnlyPrintIDs(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1")), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(cli.OutBuffer().String(), "nodeID1\n")) +} + +func TestNodeListDefaultFormatFromConfig(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), + *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), + *Node(NodeID("nodeID3"), Hostname("nodeHostname3")), + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID1", + }, + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + NodesFormat: "{{.ID}}: {{.Hostname}} {{.Status}}/{{.ManagerStatus}}", + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "node-list-format-from-config.golden") +} + +func TestNodeListFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + nodeListFunc: func() ([]swarm.Node, error) { + return []swarm.Node{ + *Node(NodeID("nodeID1"), Hostname("nodeHostname1"), Manager(Leader())), + *Node(NodeID("nodeID2"), Hostname("nodeHostname2"), Manager()), + }, nil + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID1", + }, + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + NodesFormat: "{{.ID}}: {{.Hostname}} {{.Status}}/{{.ManagerStatus}}", + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{.Hostname}}: {{.ManagerStatus}}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "node-list-format-flag.golden") +} diff --git a/cli/cli/command/node/opts.go b/cli/cli/command/node/opts.go new file mode 100644 index 00000000..e30e5de9 --- /dev/null +++ b/cli/cli/command/node/opts.go @@ -0,0 +1,23 @@ +package node + +import ( + "github.com/docker/cli/opts" +) + +type nodeOptions struct { + annotations + role string + availability string +} + +type annotations struct { + labels opts.ListOpts +} + +func newNodeOptions() *nodeOptions { + return &nodeOptions{ + annotations: annotations{ + labels: opts.NewListOpts(nil), + }, + } +} diff --git a/cli/cli/command/node/promote.go b/cli/cli/command/node/promote.go new file mode 100644 index 00000000..4612cc13 --- /dev/null +++ b/cli/cli/command/node/promote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" +) + +func newPromoteCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "promote NODE [NODE...]", + Short: "Promote one or more nodes to manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPromote(dockerCli, args) + }, + } +} + +func runPromote(dockerCli command.Cli, nodes []string) error { + promote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleManager { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleManager + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, promote, success) +} diff --git a/cli/cli/command/node/promote_test.go b/cli/cli/command/node/promote_test.go new file mode 100644 index 00000000..c6b53423 --- /dev/null +++ b/cli/cli/command/node/promote_test.go @@ -0,0 +1,84 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodePromoteErrors(t *testing.T) { + testCases := []struct { + args []string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + return errors.Errorf("error updating the node") + }, + expectedError: "error updating the node", + }, + } + for _, tc := range testCases { + cmd := newPromoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodePromoteNoChange(t *testing.T) { + cmd := newPromoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleManager { + return errors.Errorf("expected role manager, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID"}) + assert.NilError(t, cmd.Execute()) +} + +func TestNodePromoteMultipleNode(t *testing.T) { + cmd := newPromoteCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleManager { + return errors.Errorf("expected role manager, got %s", node.Role) + } + return nil + }, + })) + cmd.SetArgs([]string{"nodeID1", "nodeID2"}) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/node/ps.go b/cli/cli/command/node/ps.go new file mode 100644 index 00000000..2450e6af --- /dev/null +++ b/cli/cli/command/node/ps.go @@ -0,0 +1,104 @@ +package node + +import ( + "context" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/command/task" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type psOptions struct { + nodeIDs []string + noResolve bool + noTrunc bool + quiet bool + format string + filter opts.FilterOpt +} + +func newPsCommand(dockerCli command.Cli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] [NODE...]", + Short: "List tasks running on one or more nodes, defaults to current node", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + options.nodeIDs = []string{"self"} + + if len(args) != 0 { + options.nodeIDs = args + } + + return runPs(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&options.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&options.format, "format", "", "Pretty-print tasks using a Go template") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display task IDs") + + return cmd +} + +func runPs(dockerCli command.Cli, options psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var ( + errs []string + tasks []swarm.Task + ) + + for _, nodeID := range options.nodeIDs { + nodeRef, err := Reference(ctx, client, nodeID) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + filter := options.filter.Value() + filter.Add("node", node.ID) + + nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + tasks = append(tasks, nodeTasks...) + } + + format := options.format + if len(format) == 0 { + format = task.DefaultFormat(dockerCli.ConfigFile(), options.quiet) + } + + if len(errs) == 0 || len(tasks) != 0 { + if err := task.Print(ctx, dockerCli, tasks, idresolver.New(client, options.noResolve), !options.noTrunc, options.quiet, format); err != nil { + errs = append(errs, err.Error()) + } + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/node/ps_test.go b/cli/cli/command/node/ps_test.go new file mode 100644 index 00000000..74a1779b --- /dev/null +++ b/cli/cli/command/node/ps_test.go @@ -0,0 +1,151 @@ +package node + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestNodePsErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + infoFunc func() (types.Info, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + taskInspectFunc func(taskID string) (swarm.Task, []byte, error) + expectedError string + }{ + { + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{}, errors.Errorf("error returning the task list") + }, + expectedError: "error returning the task list", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + taskInspectFunc: tc.taskInspectFunc, + taskListFunc: tc.taskListFunc, + }) + cmd := newPsCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodePs(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + infoFunc func() (types.Info, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + taskInspectFunc func(taskID string) (swarm.Task, []byte, error) + serviceInspectFunc func(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) + }{ + { + name: "simple", + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{ + *Task(WithStatus(Timestamp(time.Now().Add(-2*time.Hour)), PortStatus([]swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: "tcp", + }, + }))), + }, nil + }, + serviceInspectFunc: func(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return swarm.Service{ + ID: serviceID, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: serviceID, + }, + }, + }, []byte{}, nil + }, + }, + { + name: "with-errors", + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{ + *Task(TaskID("taskID1"), TaskServiceID("failure"), + WithStatus(Timestamp(time.Now().Add(-2*time.Hour)), StatusErr("a task error"))), + *Task(TaskID("taskID2"), TaskServiceID("failure"), + WithStatus(Timestamp(time.Now().Add(-3*time.Hour)), StatusErr("a task error"))), + *Task(TaskID("taskID3"), TaskServiceID("failure"), + WithStatus(Timestamp(time.Now().Add(-4*time.Hour)), StatusErr("a task error"))), + }, nil + }, + serviceInspectFunc: func(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return swarm.Service{ + ID: serviceID, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: serviceID, + }, + }, + }, []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + taskInspectFunc: tc.taskInspectFunc, + taskListFunc: tc.taskListFunc, + serviceInspectFunc: tc.serviceInspectFunc, + }) + cmd := newPsCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("node-ps.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/node/remove.go b/cli/cli/command/node/remove.go new file mode 100644 index 00000000..65e3cdc3 --- /dev/null +++ b/cli/cli/command/node/remove.go @@ -0,0 +1,56 @@ +package node + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + opts := removeOptions{} + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] NODE [NODE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more nodes from the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") + return cmd +} + +func runRemove(dockerCli command.Cli, args []string, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, nodeID := range args { + err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/node/remove_test.go b/cli/cli/command/node/remove_test.go new file mode 100644 index 00000000..8ae01c7b --- /dev/null +++ b/cli/cli/command/node/remove_test.go @@ -0,0 +1,44 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestNodeRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + nodeRemoveFunc func() error + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"nodeID"}, + nodeRemoveFunc: func() error { + return errors.Errorf("error removing the node") + }, + expectedError: "error removing the node", + }, + } + for _, tc := range testCases { + cmd := newRemoveCommand( + test.NewFakeCli(&fakeClient{ + nodeRemoveFunc: tc.nodeRemoveFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeRemoveMultiple(t *testing.T) { + cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{})) + cmd.SetArgs([]string{"nodeID1", "nodeID2"}) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden b/cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden new file mode 100644 index 00000000..5cd95c5b --- /dev/null +++ b/cli/cli/command/node/testdata/node-inspect-pretty.manager-leader.golden @@ -0,0 +1,24 @@ +ID: nodeID +Name: defaultNodeName +Hostname: defaultNodeHostname +Joined at: 2009-11-10 23:00:00 +0000 utc +Status: + State: Ready + Availability: Active + Address: 127.0.0.1 +Manager Status: + Address: 127.0.0.1 + Raft Status: Reachable + Leader: Yes +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 0 + Memory: 20MiB +Plugins: + Network: bridge, overlay + Volume: local +Engine Version: 1.13.0 +Engine Labels: + - engine=label diff --git a/cli/cli/command/node/testdata/node-inspect-pretty.manager.golden b/cli/cli/command/node/testdata/node-inspect-pretty.manager.golden new file mode 100644 index 00000000..a6371829 --- /dev/null +++ b/cli/cli/command/node/testdata/node-inspect-pretty.manager.golden @@ -0,0 +1,24 @@ +ID: nodeID +Name: defaultNodeName +Hostname: defaultNodeHostname +Joined at: 2009-11-10 23:00:00 +0000 utc +Status: + State: Ready + Availability: Active + Address: 127.0.0.1 +Manager Status: + Address: 127.0.0.1 + Raft Status: Reachable + Leader: No +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 0 + Memory: 20MiB +Plugins: + Network: bridge, overlay + Volume: local +Engine Version: 1.13.0 +Engine Labels: + - engine=label diff --git a/cli/cli/command/node/testdata/node-inspect-pretty.simple.golden b/cli/cli/command/node/testdata/node-inspect-pretty.simple.golden new file mode 100644 index 00000000..8aaf9089 --- /dev/null +++ b/cli/cli/command/node/testdata/node-inspect-pretty.simple.golden @@ -0,0 +1,22 @@ +ID: nodeID +Name: defaultNodeName +Labels: + - lbl1=value1 +Hostname: defaultNodeHostname +Joined at: 2009-11-10 23:00:00 +0000 utc +Status: + State: Ready + Availability: Active + Address: 127.0.0.1 +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 0 + Memory: 20MiB +Plugins: + Network: bridge, overlay + Volume: local +Engine Version: 1.13.0 +Engine Labels: + - engine=label diff --git a/cli/cli/command/node/testdata/node-list-format-flag.golden b/cli/cli/command/node/testdata/node-list-format-flag.golden new file mode 100644 index 00000000..c898df13 --- /dev/null +++ b/cli/cli/command/node/testdata/node-list-format-flag.golden @@ -0,0 +1,2 @@ +nodeHostname1: Leader +nodeHostname2: Reachable diff --git a/cli/cli/command/node/testdata/node-list-format-from-config.golden b/cli/cli/command/node/testdata/node-list-format-from-config.golden new file mode 100644 index 00000000..91beb4a2 --- /dev/null +++ b/cli/cli/command/node/testdata/node-list-format-from-config.golden @@ -0,0 +1,3 @@ +nodeID1: nodeHostname1 Ready/Leader +nodeID2: nodeHostname2 Ready/Reachable +nodeID3: nodeHostname3 Ready/ diff --git a/cli/cli/command/node/testdata/node-list-sort.golden b/cli/cli/command/node/testdata/node-list-sort.golden new file mode 100644 index 00000000..ffc09c92 --- /dev/null +++ b/cli/cli/command/node/testdata/node-list-sort.golden @@ -0,0 +1,4 @@ +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION +nodeID3 node-1-foo Ready Active 1.13.0 +nodeID1 * node-2-foo Ready Active Leader . +nodeID2 node-10-foo Ready Active Reachable 18.03.0-ce diff --git a/cli/cli/command/node/testdata/node-ps.simple.golden b/cli/cli/command/node/testdata/node-ps.simple.golden new file mode 100644 index 00000000..b1818b96 --- /dev/null +++ b/cli/cli/command/node/testdata/node-ps.simple.golden @@ -0,0 +1,2 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +taskID rl02d5gwz6chzu7il5fhtb8be.1 myimage:mytag defaultNodeName Ready Ready 2 hours ago *:80->80/tcp diff --git a/cli/cli/command/node/testdata/node-ps.with-errors.golden b/cli/cli/command/node/testdata/node-ps.with-errors.golden new file mode 100644 index 00000000..99e34931 --- /dev/null +++ b/cli/cli/command/node/testdata/node-ps.with-errors.golden @@ -0,0 +1,4 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +taskID1 failure.1 myimage:mytag defaultNodeName Ready Ready 2 hours ago "a task error" +taskID2 \_ failure.1 myimage:mytag defaultNodeName Ready Ready 3 hours ago "a task error" +taskID3 \_ failure.1 myimage:mytag defaultNodeName Ready Ready 4 hours ago "a task error" diff --git a/cli/cli/command/node/update.go b/cli/cli/command/node/update.go new file mode 100644 index 00000000..dbae49c6 --- /dev/null +++ b/cli/cli/command/node/update.go @@ -0,0 +1,120 @@ +package node + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var ( + errNoRoleChange = errors.New("role was already set to the requested value") +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + options := newNodeOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] NODE", + Short: "Update a node", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.role, flagRole, "", `Role of the node ("worker"|"manager")`) + flags.StringVar(&options.availability, flagAvailability, "", `Availability of the node ("active"|"pause"|"drain")`) + flags.Var(&options.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") + labelKeys := opts.NewListOpts(nil) + flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") + return cmd +} + +func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, nodeID string) error { + success := func(_ string) { + fmt.Fprintln(dockerCli.Out(), nodeID) + } + return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) +} + +func updateNodes(dockerCli command.Cli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { + client := dockerCli.Client() + ctx := context.Background() + + for _, nodeID := range nodes { + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + err = mergeNode(&node) + if err != nil { + if err == errNoRoleChange { + continue + } + return err + } + err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) + if err != nil { + return err + } + success(nodeID) + } + return nil +} + +func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { + return func(node *swarm.Node) error { + spec := &node.Spec + + if flags.Changed(flagRole) { + str, err := flags.GetString(flagRole) + if err != nil { + return err + } + spec.Role = swarm.NodeRole(str) + } + if flags.Changed(flagAvailability) { + str, err := flags.GetString(flagAvailability) + if err != nil { + return err + } + spec.Availability = swarm.NodeAvailability(str) + } + if spec.Annotations.Labels == nil { + spec.Annotations.Labels = make(map[string]string) + } + if flags.Changed(flagLabelAdd) { + labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for k, v := range opts.ConvertKVStringsToMap(labels) { + spec.Annotations.Labels[k] = v + } + } + if flags.Changed(flagLabelRemove) { + keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, k := range keys { + // if a key doesn't exist, fail the command explicitly + if _, exists := spec.Annotations.Labels[k]; !exists { + return errors.Errorf("key %s doesn't exist in node's labels", k) + } + delete(spec.Annotations.Labels, k) + } + } + return nil + } +} + +const ( + flagRole = "role" + flagAvailability = "availability" + flagLabelAdd = "label-add" + flagLabelRemove = "label-rm" +) diff --git a/cli/cli/command/node/update_test.go b/cli/cli/command/node/update_test.go new file mode 100644 index 00000000..8b6ae807 --- /dev/null +++ b/cli/cli/command/node/update_test.go @@ -0,0 +1,169 @@ +package node + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +func TestNodeUpdateErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + expectedError string + }{ + { + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"node1", "node2"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + args: []string{"nodeID"}, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + return errors.Errorf("error updating the node") + }, + expectedError: "error updating the node", + }, + { + args: []string{"nodeID"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(NodeLabels(map[string]string{ + "key": "value", + })), []byte{}, nil + }, + flags: map[string]string{ + "label-rm": "notpresent", + }, + expectedError: "key notpresent doesn't exist in node's labels", + }, + } + for _, tc := range testCases { + cmd := newUpdateCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestNodeUpdate(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + nodeInspectFunc func() (swarm.Node, []byte, error) + nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error + }{ + { + args: []string{"nodeID"}, + flags: map[string]string{ + "role": "manager", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Role != swarm.NodeRoleManager { + return errors.Errorf("expected role manager, got %s", node.Role) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "availability": "drain", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if node.Availability != swarm.NodeAvailabilityDrain { + return errors.Errorf("expected drain availability, got %s", node.Availability) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "label-add": "lbl", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if _, present := node.Annotations.Labels["lbl"]; !present { + return errors.Errorf("expected 'lbl' label, got %v", node.Annotations.Labels) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "label-add": "key=value", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if value, present := node.Annotations.Labels["key"]; !present || value != "value" { + return errors.Errorf("expected 'key' label to be 'value', got %v", node.Annotations.Labels) + } + return nil + }, + }, + { + args: []string{"nodeID"}, + flags: map[string]string{ + "label-rm": "key", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(NodeLabels(map[string]string{ + "key": "value", + })), []byte{}, nil + }, + nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { + if len(node.Annotations.Labels) > 0 { + return errors.Errorf("expected no labels, got %v", node.Annotations.Labels) + } + return nil + }, + }, + } + for _, tc := range testCases { + cmd := newUpdateCommand( + test.NewFakeCli(&fakeClient{ + nodeInspectFunc: tc.nodeInspectFunc, + nodeUpdateFunc: tc.nodeUpdateFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + } +} diff --git a/cli/cli/command/orchestrator.go b/cli/cli/command/orchestrator.go new file mode 100644 index 00000000..b051c4a2 --- /dev/null +++ b/cli/cli/command/orchestrator.go @@ -0,0 +1,84 @@ +package command + +import ( + "fmt" + "io" + "os" +) + +// Orchestrator type acts as an enum describing supported orchestrators. +type Orchestrator string + +const ( + // OrchestratorKubernetes orchestrator + OrchestratorKubernetes = Orchestrator("kubernetes") + // OrchestratorSwarm orchestrator + OrchestratorSwarm = Orchestrator("swarm") + // OrchestratorAll orchestrator + OrchestratorAll = Orchestrator("all") + orchestratorUnset = Orchestrator("") + + defaultOrchestrator = OrchestratorSwarm + envVarDockerStackOrchestrator = "DOCKER_STACK_ORCHESTRATOR" + envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR" +) + +// HasKubernetes returns true if defined orchestrator has Kubernetes capabilities. +func (o Orchestrator) HasKubernetes() bool { + return o == OrchestratorKubernetes || o == OrchestratorAll +} + +// HasSwarm returns true if defined orchestrator has Swarm capabilities. +func (o Orchestrator) HasSwarm() bool { + return o == OrchestratorSwarm || o == OrchestratorAll +} + +// HasAll returns true if defined orchestrator has both Swarm and Kubernetes capabilities. +func (o Orchestrator) HasAll() bool { + return o == OrchestratorAll +} + +func normalize(value string) (Orchestrator, error) { + switch value { + case "kubernetes": + return OrchestratorKubernetes, nil + case "swarm": + return OrchestratorSwarm, nil + case "", "unset": // unset is the old value for orchestratorUnset. Keep accepting this for backward compat + return orchestratorUnset, nil + case "all": + return OrchestratorAll, nil + default: + return defaultOrchestrator, fmt.Errorf("specified orchestrator %q is invalid, please use either kubernetes, swarm or all", value) + } +} + +// NormalizeOrchestrator parses an orchestrator value and checks if it is valid +func NormalizeOrchestrator(value string) (Orchestrator, error) { + return normalize(value) +} + +// GetStackOrchestrator checks DOCKER_STACK_ORCHESTRATOR environment variable and configuration file +// orchestrator value and returns user defined Orchestrator. +func GetStackOrchestrator(flagValue, contextValue, globalDefault string, stderr io.Writer) (Orchestrator, error) { + // Check flag + if o, err := normalize(flagValue); o != orchestratorUnset { + return o, err + } + // Check environment variable + env := os.Getenv(envVarDockerStackOrchestrator) + if env == "" && os.Getenv(envVarDockerOrchestrator) != "" { + fmt.Fprintf(stderr, "WARNING: experimental environment variable %s is set. Please use %s instead\n", envVarDockerOrchestrator, envVarDockerStackOrchestrator) + } + if o, err := normalize(env); o != orchestratorUnset { + return o, err + } + if o, err := normalize(contextValue); o != orchestratorUnset { + return o, err + } + if o, err := normalize(globalDefault); o != orchestratorUnset { + return o, err + } + // Nothing set, use default orchestrator + return defaultOrchestrator, nil +} diff --git a/cli/cli/command/orchestrator_test.go b/cli/cli/command/orchestrator_test.go new file mode 100644 index 00000000..141c27e4 --- /dev/null +++ b/cli/cli/command/orchestrator_test.go @@ -0,0 +1,101 @@ +package command + +import ( + "io/ioutil" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/env" +) + +func TestOrchestratorSwitch(t *testing.T) { + var testcases = []struct { + doc string + globalOrchestrator string + envOrchestrator string + flagOrchestrator string + contextOrchestrator string + expectedOrchestrator string + expectedKubernetes bool + expectedSwarm bool + }{ + { + doc: "default", + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + { + doc: "kubernetesConfigFile", + globalOrchestrator: "kubernetes", + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + expectedSwarm: false, + }, + { + doc: "kubernetesEnv", + envOrchestrator: "kubernetes", + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + expectedSwarm: false, + }, + { + doc: "kubernetesFlag", + flagOrchestrator: "kubernetes", + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + expectedSwarm: false, + }, + { + doc: "allOrchestratorFlag", + flagOrchestrator: "all", + expectedOrchestrator: "all", + expectedKubernetes: true, + expectedSwarm: true, + }, + { + doc: "kubernetesContext", + contextOrchestrator: "kubernetes", + expectedOrchestrator: "kubernetes", + expectedKubernetes: true, + }, + { + doc: "contextOverridesConfigFile", + globalOrchestrator: "kubernetes", + contextOrchestrator: "swarm", + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + { + doc: "envOverridesConfigFile", + globalOrchestrator: "kubernetes", + envOrchestrator: "swarm", + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + { + doc: "flagOverridesEnv", + envOrchestrator: "kubernetes", + flagOrchestrator: "swarm", + expectedOrchestrator: "swarm", + expectedKubernetes: false, + expectedSwarm: true, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.doc, func(t *testing.T) { + if testcase.envOrchestrator != "" { + defer env.Patch(t, "DOCKER_STACK_ORCHESTRATOR", testcase.envOrchestrator)() + } + orchestrator, err := GetStackOrchestrator(testcase.flagOrchestrator, testcase.contextOrchestrator, testcase.globalOrchestrator, ioutil.Discard) + assert.NilError(t, err) + assert.Check(t, is.Equal(testcase.expectedKubernetes, orchestrator.HasKubernetes())) + assert.Check(t, is.Equal(testcase.expectedSwarm, orchestrator.HasSwarm())) + assert.Check(t, is.Equal(testcase.expectedOrchestrator, string(orchestrator))) + }) + } +} diff --git a/cli/cli/command/plugin/client_test.go b/cli/cli/command/plugin/client_test.go new file mode 100644 index 00000000..f52cefec --- /dev/null +++ b/cli/cli/command/plugin/client_test.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + pluginCreateFunc func(createContext io.Reader, createOptions types.PluginCreateOptions) error + pluginDisableFunc func(name string, disableOptions types.PluginDisableOptions) error + pluginEnableFunc func(name string, options types.PluginEnableOptions) error + pluginRemoveFunc func(name string, options types.PluginRemoveOptions) error + pluginInstallFunc func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) + pluginListFunc func(filter filters.Args) (types.PluginsListResponse, error) + pluginInspectFunc func(name string) (*types.Plugin, []byte, error) +} + +func (c *fakeClient) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + if c.pluginCreateFunc != nil { + return c.pluginCreateFunc(createContext, createOptions) + } + return nil +} + +func (c *fakeClient) PluginEnable(ctx context.Context, name string, enableOptions types.PluginEnableOptions) error { + if c.pluginEnableFunc != nil { + return c.pluginEnableFunc(name, enableOptions) + } + return nil +} + +func (c *fakeClient) PluginDisable(context context.Context, name string, disableOptions types.PluginDisableOptions) error { + if c.pluginDisableFunc != nil { + return c.pluginDisableFunc(name, disableOptions) + } + return nil +} + +func (c *fakeClient) PluginRemove(context context.Context, name string, removeOptions types.PluginRemoveOptions) error { + if c.pluginRemoveFunc != nil { + return c.pluginRemoveFunc(name, removeOptions) + } + return nil +} + +func (c *fakeClient) PluginInstall(context context.Context, name string, installOptions types.PluginInstallOptions) (io.ReadCloser, error) { + if c.pluginInstallFunc != nil { + return c.pluginInstallFunc(name, installOptions) + } + return nil, nil +} + +func (c *fakeClient) PluginList(context context.Context, filter filters.Args) (types.PluginsListResponse, error) { + if c.pluginListFunc != nil { + return c.pluginListFunc(filter) + } + + return types.PluginsListResponse{}, nil +} + +func (c *fakeClient) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if c.pluginInspectFunc != nil { + return c.pluginInspectFunc(name) + } + + return nil, nil, nil +} + +func (c *fakeClient) Info(ctx context.Context) (types.Info, error) { + return types.Info{}, nil +} diff --git a/cli/cli/command/plugin/cmd.go b/cli/cli/command/plugin/cmd.go new file mode 100644 index 00000000..2e79ab1d --- /dev/null +++ b/cli/cli/command/plugin/cmd.go @@ -0,0 +1,32 @@ +package plugin + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewPluginCommand returns a cobra command for `plugin` subcommands +func NewPluginCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "plugin", + Short: "Manage plugins", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{"version": "1.25"}, + } + + cmd.AddCommand( + newDisableCommand(dockerCli), + newEnableCommand(dockerCli), + newInspectCommand(dockerCli), + newInstallCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newSetCommand(dockerCli), + newPushCommand(dockerCli), + newCreateCommand(dockerCli), + newUpgradeCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/plugin/create.go b/cli/cli/command/plugin/create.go new file mode 100644 index 00000000..d6550eda --- /dev/null +++ b/cli/cli/command/plugin/create.go @@ -0,0 +1,128 @@ +package plugin + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// validateTag checks if the given repoName can be resolved. +func validateTag(rawRepo string) error { + _, err := reference.ParseNormalizedNamed(rawRepo) + + return err +} + +// validateConfig ensures that a valid config.json is available in the given path +func validateConfig(path string) error { + dt, err := os.Open(filepath.Join(path, "config.json")) + if err != nil { + return err + } + + m := types.PluginConfig{} + err = json.NewDecoder(dt).Decode(&m) + dt.Close() + + return err +} + +// validateContextDir validates the given dir and returns abs path on success. +func validateContextDir(contextDir string) (string, error) { + absContextDir, err := filepath.Abs(contextDir) + if err != nil { + return "", err + } + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", err + } + + if !stat.IsDir() { + return "", errors.Errorf("context must be a directory") + } + + return absContextDir, nil +} + +type pluginCreateOptions struct { + repoName string + context string + compress bool +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := pluginCreateOptions{} + + cmd := &cobra.Command{ + Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", + Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.repoName = args[0] + options.context = args[1] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") + + return cmd +} + +func runCreate(dockerCli command.Cli, options pluginCreateOptions) error { + var ( + createCtx io.ReadCloser + err error + ) + + if err := validateTag(options.repoName); err != nil { + return err + } + + absContextDir, err := validateContextDir(options.context) + if err != nil { + return err + } + + if err := validateConfig(options.context); err != nil { + return err + } + + compression := archive.Uncompressed + if options.compress { + logrus.Debugf("compression enabled") + compression = archive.Gzip + } + + createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ + Compression: compression, + }) + + if err != nil { + return err + } + + ctx := context.Background() + + createOptions := types.PluginCreateOptions{RepoName: options.repoName} + if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), options.repoName) + return nil +} diff --git a/cli/cli/command/plugin/create_test.go b/cli/cli/command/plugin/create_test.go new file mode 100644 index 00000000..bef002c0 --- /dev/null +++ b/cli/cli/command/plugin/create_test.go @@ -0,0 +1,123 @@ +package plugin + +import ( + "fmt" + "io" + "io/ioutil" + "runtime" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestCreateErrors(t *testing.T) { + noSuchFile := "no such file or directory" + if runtime.GOOS == "windows" { + noSuchFile = "The system cannot find the file specified." + } + testCases := []struct { + args []string + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 2 arguments", + }, + { + args: []string{"INVALID_TAG", "context-dir"}, + expectedError: "invalid", + }, + { + args: []string{"plugin-foo", "nonexistent_context_dir"}, + expectedError: noSuchFile, + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestCreateErrorOnFileAsContextDir(t *testing.T) { + tmpFile := fs.NewFile(t, "file-as-context-dir") + defer tmpFile.Remove() + + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpFile.Path()}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "context must be a directory") +} + +func TestCreateErrorOnContextDirWithoutConfig(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test") + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + cmd.SetOutput(ioutil.Discard) + + expectedErr := "config.json: no such file or directory" + if runtime.GOOS == "windows" { + expectedErr = "config.json: The system cannot find the file specified." + } + assert.ErrorContains(t, cmd.Execute(), expectedErr) +} + +func TestCreateErrorOnInvalidConfig(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test", + fs.WithDir("rootfs"), + fs.WithFile("config.json", "invalid-config-contents")) + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{}) + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "invalid") +} + +func TestCreateErrorFromDaemon(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test", + fs.WithDir("rootfs"), + fs.WithFile("config.json", `{ "Name": "plugin-foo" }`)) + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{ + pluginCreateFunc: func(createContext io.Reader, createOptions types.PluginCreateOptions) error { + return fmt.Errorf("Error creating plugin") + }, + }) + + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "Error creating plugin") +} + +func TestCreatePlugin(t *testing.T) { + tmpDir := fs.NewDir(t, "plugin-create-test", + fs.WithDir("rootfs"), + fs.WithFile("config.json", `{ "Name": "plugin-foo" }`)) + defer tmpDir.Remove() + + cli := test.NewFakeCli(&fakeClient{ + pluginCreateFunc: func(createContext io.Reader, createOptions types.PluginCreateOptions) error { + return nil + }, + }) + + cmd := newCreateCommand(cli) + cmd.SetArgs([]string{"plugin-foo", tmpDir.Path()}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/disable.go b/cli/cli/command/plugin/disable.go new file mode 100644 index 00000000..014d86b2 --- /dev/null +++ b/cli/cli/command/plugin/disable.go @@ -0,0 +1,36 @@ +package plugin + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +func newDisableCommand(dockerCli command.Cli) *cobra.Command { + var force bool + + cmd := &cobra.Command{ + Use: "disable [OPTIONS] PLUGIN", + Short: "Disable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDisable(dockerCli, args[0], force) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") + return cmd +} + +func runDisable(dockerCli command.Cli, name string, force bool) error { + if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/cli/cli/command/plugin/disable_test.go b/cli/cli/command/plugin/disable_test.go new file mode 100644 index 00000000..c9292965 --- /dev/null +++ b/cli/cli/command/plugin/disable_test.go @@ -0,0 +1,58 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPluginDisableErrors(t *testing.T) { + testCases := []struct { + args []string + expectedError string + pluginDisableFunc func(name string, disableOptions types.PluginDisableOptions) error + }{ + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"too", "many", "arguments"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"plugin-foo"}, + expectedError: "Error disabling plugin", + pluginDisableFunc: func(name string, disableOptions types.PluginDisableOptions) error { + return fmt.Errorf("Error disabling plugin") + }, + }, + } + + for _, tc := range testCases { + cmd := newDisableCommand( + test.NewFakeCli(&fakeClient{ + pluginDisableFunc: tc.pluginDisableFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestPluginDisable(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + pluginDisableFunc: func(name string, disableOptions types.PluginDisableOptions) error { + return nil + }, + }) + cmd := newDisableCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/enable.go b/cli/cli/command/plugin/enable.go new file mode 100644 index 00000000..19df1e7b --- /dev/null +++ b/cli/cli/command/plugin/enable.go @@ -0,0 +1,48 @@ +package plugin + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type enableOpts struct { + timeout int + name string +} + +func newEnableCommand(dockerCli command.Cli) *cobra.Command { + var opts enableOpts + + cmd := &cobra.Command{ + Use: "enable [OPTIONS] PLUGIN", + Short: "Enable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runEnable(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVar(&opts.timeout, "timeout", 30, "HTTP client timeout (in seconds)") + return cmd +} + +func runEnable(dockerCli command.Cli, opts *enableOpts) error { + name := opts.name + if opts.timeout < 0 { + return errors.Errorf("negative timeout %d is invalid", opts.timeout) + } + + if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/cli/cli/command/plugin/enable_test.go b/cli/cli/command/plugin/enable_test.go new file mode 100644 index 00000000..933ff5de --- /dev/null +++ b/cli/cli/command/plugin/enable_test.go @@ -0,0 +1,70 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPluginEnableErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + pluginEnableFunc func(name string, options types.PluginEnableOptions) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"too-many", "arguments"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"plugin-foo"}, + pluginEnableFunc: func(name string, options types.PluginEnableOptions) error { + return fmt.Errorf("failed to enable plugin") + }, + expectedError: "failed to enable plugin", + }, + { + args: []string{"plugin-foo"}, + flags: map[string]string{ + "timeout": "-1", + }, + expectedError: "negative timeout -1 is invalid", + }, + } + + for _, tc := range testCases { + cmd := newEnableCommand( + test.NewFakeCli(&fakeClient{ + pluginEnableFunc: tc.pluginEnableFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestPluginEnable(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + pluginEnableFunc: func(name string, options types.PluginEnableOptions) error { + return nil + }, + }) + + cmd := newEnableCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/formatter.go b/cli/cli/command/plugin/formatter.go new file mode 100644 index 00000000..438bf951 --- /dev/null +++ b/cli/cli/command/plugin/formatter.go @@ -0,0 +1,94 @@ +package plugin + +import ( + "strings" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultPluginTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Description}}\t{{.Enabled}}" + + enabledHeader = "ENABLED" + pluginIDHeader = "ID" +) + +// NewFormat returns a Format for rendering using a plugin Context +func NewFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultPluginTableFormat + case formatter.RawFormatKey: + if quiet { + return `plugin_id: {{.ID}}` + } + return `plugin_id: {{.ID}}\nname: {{.Name}}\ndescription: {{.Description}}\nenabled: {{.Enabled}}\n` + } + return formatter.Format(source) +} + +// FormatWrite writes the context +func FormatWrite(ctx formatter.Context, plugins []*types.Plugin) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, plugin := range plugins { + pluginCtx := &pluginContext{trunc: ctx.Trunc, p: *plugin} + if err := format(pluginCtx); err != nil { + return err + } + } + return nil + } + pluginCtx := pluginContext{} + pluginCtx.Header = formatter.SubHeaderContext{ + "ID": pluginIDHeader, + "Name": formatter.NameHeader, + "Description": formatter.DescriptionHeader, + "Enabled": enabledHeader, + "PluginReference": formatter.ImageHeader, + } + return ctx.Write(&pluginCtx, render) +} + +type pluginContext struct { + formatter.HeaderContext + trunc bool + p types.Plugin +} + +func (c *pluginContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *pluginContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.p.ID) + } + return c.p.ID +} + +func (c *pluginContext) Name() string { + return c.p.Name +} + +func (c *pluginContext) Description() string { + desc := strings.Replace(c.p.Config.Description, "\n", "", -1) + desc = strings.Replace(desc, "\r", "", -1) + if c.trunc { + desc = formatter.Ellipsis(desc, 45) + } + + return desc +} + +func (c *pluginContext) Enabled() bool { + return c.p.Enabled +} + +func (c *pluginContext) PluginReference() string { + return c.p.PluginReference +} diff --git a/cli/cli/command/plugin/formatter_test.go b/cli/cli/command/plugin/formatter_test.go new file mode 100644 index 00000000..66853eb9 --- /dev/null +++ b/cli/cli/command/plugin/formatter_test.go @@ -0,0 +1,185 @@ +package plugin + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPluginContext(t *testing.T) { + pluginID := stringid.GenerateRandomID() + + var ctx pluginContext + cases := []struct { + pluginCtx pluginContext + expValue string + call func() string + }{ + {pluginContext{ + p: types.Plugin{ID: pluginID}, + trunc: false, + }, pluginID, ctx.ID}, + {pluginContext{ + p: types.Plugin{ID: pluginID}, + trunc: true, + }, stringid.TruncateID(pluginID), ctx.ID}, + {pluginContext{ + p: types.Plugin{Name: "plugin_name"}, + }, "plugin_name", ctx.Name}, + {pluginContext{ + p: types.Plugin{Config: types.PluginConfig{Description: "plugin_description"}}, + }, "plugin_description", ctx.Description}, + } + + for _, c := range cases { + ctx = c.pluginCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestPluginContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: NewFormat("table", false)}, + `ID NAME DESCRIPTION ENABLED +pluginID1 foobar_baz description 1 true +pluginID2 foobar_bar description 2 false +`, + }, + { + formatter.Context{Format: NewFormat("table", true)}, + `pluginID1 +pluginID2 +`, + }, + { + formatter.Context{Format: NewFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + formatter.Context{Format: NewFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + formatter.Context{Format: NewFormat("raw", false)}, + `plugin_id: pluginID1 +name: foobar_baz +description: description 1 +enabled: true + +plugin_id: pluginID2 +name: foobar_bar +description: description 2 +enabled: false + +`, + }, + { + formatter.Context{Format: NewFormat("raw", true)}, + `plugin_id: pluginID1 +plugin_id: pluginID2 +`, + }, + // Custom Format + { + formatter.Context{Format: NewFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + plugins := []*types.Plugin{ + {ID: "pluginID1", Name: "foobar_baz", Config: types.PluginConfig{Description: "description 1"}, Enabled: true}, + {ID: "pluginID2", Name: "foobar_bar", Config: types.PluginConfig{Description: "description 2"}, Enabled: false}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := FormatWrite(testcase.context, plugins) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestPluginContextWriteJSON(t *testing.T) { + plugins := []*types.Plugin{ + {ID: "pluginID1", Name: "foobar_baz"}, + {ID: "pluginID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Description": "", "Enabled": false, "ID": "pluginID1", "Name": "foobar_baz", "PluginReference": ""}, + {"Description": "", "Enabled": false, "ID": "pluginID2", "Name": "foobar_bar", "PluginReference": ""}, + } + + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .}}", Output: out}, plugins) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(expectedJSONs[i], m)) + } +} + +func TestPluginContextWriteJSONField(t *testing.T) { + plugins := []*types.Plugin{ + {ID: "pluginID1", Name: "foobar_baz"}, + {ID: "pluginID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .ID}}", Output: out}, plugins) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(plugins[i].ID, s)) + } +} diff --git a/cli/cli/command/plugin/inspect.go b/cli/cli/command/plugin/inspect.go new file mode 100644 index 00000000..9ce49eb9 --- /dev/null +++ b/cli/cli/command/plugin/inspect.go @@ -0,0 +1,43 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + pluginNames []string + format string +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Display detailed information on one or more plugins", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.pluginNames = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + return client.PluginInspectWithRaw(ctx, ref) + } + + return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) +} diff --git a/cli/cli/command/plugin/inspect_test.go b/cli/cli/command/plugin/inspect_test.go new file mode 100644 index 00000000..ac892b50 --- /dev/null +++ b/cli/cli/command/plugin/inspect_test.go @@ -0,0 +1,150 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + + "gotest.tools/assert" + "gotest.tools/golden" +) + +var pluginFoo = &types.Plugin{ + ID: "id-foo", + Name: "name-foo", + Config: types.PluginConfig{ + Description: "plugin foo description", + DockerVersion: "17.12.1-ce", + Documentation: "plugin foo documentation", + Entrypoint: []string{"/foo"}, + Interface: types.PluginConfigInterface{ + Socket: "pluginfoo.sock", + }, + Linux: types.PluginConfigLinux{ + Capabilities: []string{"CAP_SYS_ADMIN"}, + }, + WorkDir: "workdir-foo", + Rootfs: &types.PluginConfigRootfs{ + DiffIds: []string{"sha256:8603eedd4ea52cebb2f22b45405a3dc8f78ba3e31bf18f27b4547a9ff930e0bd"}, + Type: "layers", + }, + }, +} + +func TestInspectErrors(t *testing.T) { + testCases := []struct { + description string + args []string + flags map[string]string + expectedError string + inspectFunc func(name string) (*types.Plugin, []byte, error) + }{ + { + description: "too few arguments", + args: []string{}, + expectedError: "requires at least 1 argument", + }, + { + description: "error inspecting plugin", + args: []string{"foo"}, + expectedError: "error inspecting plugin", + inspectFunc: func(name string) (*types.Plugin, []byte, error) { + return nil, nil, fmt.Errorf("error inspecting plugin") + }, + }, + { + description: "invalid format", + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{pluginInspectFunc: tc.inspectFunc}) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + }) + } +} + +func TestInspect(t *testing.T) { + testCases := []struct { + description string + args []string + flags map[string]string + golden string + inspectFunc func(name string) (*types.Plugin, []byte, error) + }{ + { + description: "inspect single plugin with format", + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "plugin-inspect-single-with-format.golden", + inspectFunc: func(name string) (*types.Plugin, []byte, error) { + return &types.Plugin{ + ID: "id-foo", + Name: "name-foo", + }, []byte{}, nil + }, + }, + { + description: "inspect single plugin without format", + args: []string{"foo"}, + golden: "plugin-inspect-single-without-format.golden", + inspectFunc: func(name string) (*types.Plugin, []byte, error) { + return pluginFoo, nil, nil + }, + }, + { + description: "inspect multiple plugins with format", + args: []string{"foo", "bar"}, + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "plugin-inspect-multiple-with-format.golden", + inspectFunc: func(name string) (*types.Plugin, []byte, error) { + switch name { + case "foo": + return &types.Plugin{ + ID: "id-foo", + Name: "name-foo", + }, []byte{}, nil + case "bar": + return &types.Plugin{ + ID: "id-bar", + Name: "name-bar", + }, []byte{}, nil + default: + return nil, nil, fmt.Errorf("unexpected plugin name: %s", name) + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{pluginInspectFunc: tc.inspectFunc}) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + }) + } +} diff --git a/cli/cli/command/plugin/install.go b/cli/cli/command/plugin/install.go new file mode 100644 index 00000000..9a9e443b --- /dev/null +++ b/cli/cli/command/plugin/install.go @@ -0,0 +1,174 @@ +package plugin + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type pluginOptions struct { + remote string + localName string + grantPerms bool + disable bool + args []string + skipRemoteCheck bool + untrusted bool +} + +func loadPullFlags(dockerCli command.Cli, opts *pluginOptions, flags *pflag.FlagSet) { + flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") + command.AddTrustVerificationFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) +} + +func newInstallCommand(dockerCli command.Cli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", + Short: "Install a plugin", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remote = args[0] + if len(args) > 1 { + options.args = args[1:] + } + return runInstall(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(dockerCli, &options, flags) + flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") + flags.StringVar(&options.localName, "alias", "", "Local name for plugin") + return cmd +} + +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (*registry.RepositoryInfo, error) { + repoInfo, err := s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return repoInfo, err +} + +func newRegistryService() (registry.Service, error) { + svc, err := registry.NewService(registry.ServiceOptions{}) + if err != nil { + return nil, err + } + return pluginRegistryService{Service: svc}, nil +} + +func buildPullConfig(ctx context.Context, dockerCli command.Cli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { + // Names with both tag and digest will be treated by the daemon + // as a pull by digest with a local name for the tag + // (if no local name is provided). + ref, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return types.PluginInstallOptions{}, err + } + + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + + remote := ref.String() + + _, isCanonical := ref.(reference.Canonical) + if !opts.untrusted && !isCanonical { + ref = reference.TagNameOnly(ref) + nt, ok := ref.(reference.NamedTagged) + if !ok { + return types.PluginInstallOptions{}, errors.Errorf("invalid name: %s", ref.String()) + } + + ctx := context.Background() + svc, err := newRegistryService() + if err != nil { + return types.PluginInstallOptions{}, err + } + trusted, err := image.TrustedReference(ctx, dockerCli, nt, svc) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote = reference.FamiliarString(trusted) + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return types.PluginInstallOptions{}, err + } + registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, cmdName) + + options := types.PluginInstallOptions{ + RegistryAuth: encodedAuth, + RemoteRef: remote, + Disabled: opts.disable, + AcceptAllPermissions: opts.grantPerms, + AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), + PrivilegeFunc: registryAuthFunc, + Args: opts.args, + } + return options, nil +} + +func runInstall(dockerCli command.Cli, opts pluginOptions) error { + var localName string + if opts.localName != "" { + aref, err := reference.ParseNormalizedNamed(opts.localName) + if err != nil { + return err + } + if _, ok := aref.(reference.Canonical); ok { + return errors.Errorf("invalid name: %s", opts.localName) + } + localName = reference.FamiliarString(reference.TagNameOnly(aref)) + } + + ctx := context.Background() + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) + if err != nil { + if strings.Contains(err.Error(), "(image) when fetching") { + return errors.New(err.Error() + " - Use \"docker image pull\"") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result + return nil +} + +func acceptPrivileges(dockerCli command.Cli, name string) func(privileges types.PluginPrivileges) (bool, error) { + return func(privileges types.PluginPrivileges) (bool, error) { + fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) + for _, privilege := range privileges { + fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) + } + return command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "Do you grant the above permissions?"), nil + } +} diff --git a/cli/cli/command/plugin/install_test.go b/cli/cli/command/plugin/install_test.go new file mode 100644 index 00000000..ccd5294e --- /dev/null +++ b/cli/cli/command/plugin/install_test.go @@ -0,0 +1,141 @@ +package plugin + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types" + + "gotest.tools/assert" +) + +func TestInstallErrors(t *testing.T) { + testCases := []struct { + description string + args []string + expectedError string + installFunc func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) + }{ + { + description: "insufficient number of arguments", + args: []string{}, + expectedError: "requires at least 1 argument", + }, + { + description: "invalid alias", + args: []string{"foo", "--alias", "UPPERCASE_ALIAS"}, + expectedError: "invalid", + }, + { + description: "invalid plugin name", + args: []string{"UPPERCASE_REPONAME"}, + expectedError: "invalid", + }, + { + description: "installation error", + args: []string{"foo"}, + expectedError: "Error installing plugin", + installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + return nil, fmt.Errorf("Error installing plugin") + }, + }, + { + description: "installation error due to missing image", + args: []string{"foo"}, + expectedError: "docker image pull", + installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + return nil, fmt.Errorf("(image) when fetching") + }, + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{pluginInstallFunc: tc.installFunc}) + cmd := newInstallCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestInstallContentTrustErrors(t *testing.T) { + testCases := []struct { + description string + args []string + expectedError string + notaryFunc test.NotaryClientFuncType + }{ + { + description: "install plugin, offline notary server", + args: []string{"plugin:tag"}, + expectedError: "client is offline", + notaryFunc: notary.GetOfflineNotaryRepository, + }, + { + description: "install plugin, uninitialized notary server", + args: []string{"plugin:tag"}, + expectedError: "remote trust data does not exist", + notaryFunc: notary.GetUninitializedNotaryRepository, + }, + { + description: "install plugin, empty notary server", + args: []string{"plugin:tag"}, + expectedError: "No valid trust data for tag", + notaryFunc: notary.GetEmptyTargetsNotaryRepository, + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + pluginInstallFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + return nil, fmt.Errorf("should not try to install plugin") + + }, + }, test.EnableContentTrust) + cli.SetNotaryClient(tc.notaryFunc) + cmd := newInstallCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestInstall(t *testing.T) { + testCases := []struct { + description string + args []string + expectedOutput string + installFunc func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) + }{ + { + description: "install with no additional flags", + args: []string{"foo"}, + expectedOutput: "Installed plugin foo\n", + installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + { + description: "install with disable flag", + args: []string{"--disable", "foo"}, + expectedOutput: "Installed plugin foo\n", + installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + assert.Check(t, options.Disabled) + return ioutil.NopCloser(strings.NewReader("")), nil + }, + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{pluginInstallFunc: tc.installFunc}) + cmd := newInstallCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + assert.Check(t, strings.Contains(cli.OutBuffer().String(), tc.expectedOutput)) + } +} diff --git a/cli/cli/command/plugin/list.go b/cli/cli/command/plugin/list.go new file mode 100644 index 00000000..16c7db6c --- /dev/null +++ b/cli/cli/command/plugin/list.go @@ -0,0 +1,70 @@ +package plugin + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Short: "List plugins", + Aliases: []string{"list"}, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display plugin IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.StringVar(&options.format, "format", "", "Pretty-print plugins using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'enabled=true')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + plugins, err := dockerCli.Client().PluginList(context.Background(), options.filter.Value()) + if err != nil { + return err + } + + sort.Slice(plugins, func(i, j int) bool { + return sortorder.NaturalLess(plugins[i].Name, plugins[j].Name) + }) + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PluginsFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().PluginsFormat + } else { + format = formatter.TableFormatKey + } + } + + pluginsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(format, options.quiet), + Trunc: !options.noTrunc, + } + return FormatWrite(pluginsCtx, plugins) +} diff --git a/cli/cli/command/plugin/list_test.go b/cli/cli/command/plugin/list_test.go new file mode 100644 index 00000000..fbde6553 --- /dev/null +++ b/cli/cli/command/plugin/list_test.go @@ -0,0 +1,174 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestListErrors(t *testing.T) { + testCases := []struct { + description string + args []string + flags map[string]string + expectedError string + listFunc func(filter filters.Args) (types.PluginsListResponse, error) + }{ + { + description: "too many arguments", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + description: "error listing plugins", + args: []string{}, + expectedError: "error listing plugins", + listFunc: func(filter filters.Args) (types.PluginsListResponse, error) { + return types.PluginsListResponse{}, fmt.Errorf("error listing plugins") + }, + }, + { + description: "invalid format", + args: []string{}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc}) + cmd := newListCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestList(t *testing.T) { + singlePluginListFunc := func(filter filters.Args) (types.PluginsListResponse, error) { + return types.PluginsListResponse{ + { + ID: "id-foo", + Name: "name-foo", + Enabled: true, + Config: types.PluginConfig{ + Description: "desc-bar", + }, + }, + }, nil + } + + testCases := []struct { + description string + args []string + flags map[string]string + golden string + listFunc func(filter filters.Args) (types.PluginsListResponse, error) + }{ + { + description: "list with no additional flags", + args: []string{}, + golden: "plugin-list-without-format.golden", + listFunc: singlePluginListFunc, + }, + { + description: "list with filters", + args: []string{}, + flags: map[string]string{ + "filter": "foo=bar", + }, + golden: "plugin-list-without-format.golden", + listFunc: func(filter filters.Args) (types.PluginsListResponse, error) { + assert.Check(t, is.Equal("bar", filter.Get("foo")[0])) + return singlePluginListFunc(filter) + }, + }, + { + description: "list with quiet option", + args: []string{}, + flags: map[string]string{ + "quiet": "true", + }, + golden: "plugin-list-with-quiet-option.golden", + listFunc: singlePluginListFunc, + }, + { + description: "list with no-trunc option", + args: []string{}, + flags: map[string]string{ + "no-trunc": "true", + "format": "{{ .ID }}", + }, + golden: "plugin-list-with-no-trunc-option.golden", + listFunc: func(filter filters.Args) (types.PluginsListResponse, error) { + return types.PluginsListResponse{ + { + ID: "xyg4z2hiSLO5yTnBJfg4OYia9gKA6Qjd", + Name: "name-foo", + Enabled: true, + Config: types.PluginConfig{ + Description: "desc-bar", + }, + }, + }, nil + }, + }, + { + description: "list with format", + args: []string{}, + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "plugin-list-with-format.golden", + listFunc: singlePluginListFunc, + }, + { + description: "list output is sorted based on plugin name", + args: []string{}, + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "plugin-list-sort.golden", + listFunc: func(filter filters.Args) (types.PluginsListResponse, error) { + return types.PluginsListResponse{ + { + ID: "id-1", + Name: "plugin-1-foo", + }, + { + ID: "id-2", + Name: "plugin-10-foo", + }, + { + ID: "id-3", + Name: "plugin-2-foo", + }, + }, nil + }, + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc}) + cmd := newListCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + } +} diff --git a/cli/cli/command/plugin/push.go b/cli/cli/command/plugin/push.go new file mode 100644 index 00000000..7df5a89d --- /dev/null +++ b/cli/cli/command/plugin/push.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type pushOptions struct { + name string + untrusted bool +} + +func newPushCommand(dockerCli command.Cli) *cobra.Command { + var opts pushOptions + cmd := &cobra.Command{ + Use: "push [OPTIONS] PLUGIN[:TAG]", + Short: "Push a plugin to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runPush(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + command.AddTrustSigningFlags(flags, &opts.untrusted, dockerCli.ContentTrustEnabled()) + + return cmd +} + +func runPush(dockerCli command.Cli, opts pushOptions) error { + named, err := reference.ParseNormalizedNamed(opts.name) + if err != nil { + return err + } + if _, ok := named.(reference.Canonical); ok { + return errors.Errorf("invalid name: %s", opts.name) + } + + named = reference.TagNameOnly(named) + + ctx := context.Background() + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return err + } + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginPush(ctx, reference.FamiliarString(named), encodedAuth) + if err != nil { + return err + } + defer responseBody.Close() + + if !opts.untrusted { + repoInfo.Class = "plugin" + return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) + } + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/cli/cli/command/plugin/remove.go b/cli/cli/command/plugin/remove.go new file mode 100644 index 00000000..a2092bd7 --- /dev/null +++ b/cli/cli/command/plugin/remove.go @@ -0,0 +1,54 @@ +package plugin + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type rmOptions struct { + force bool + + plugins []string +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Remove one or more plugins", + Aliases: []string{"remove"}, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.plugins = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") + return cmd +} + +func runRemove(dockerCli command.Cli, opts *rmOptions) error { + ctx := context.Background() + + var errs cli.Errors + for _, name := range opts.plugins { + if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { + errs = append(errs, err) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. + if errs != nil { + return errs + } + return nil +} diff --git a/cli/cli/command/plugin/remove_test.go b/cli/cli/command/plugin/remove_test.go new file mode 100644 index 00000000..4cfec433 --- /dev/null +++ b/cli/cli/command/plugin/remove_test.go @@ -0,0 +1,71 @@ +package plugin + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRemoveErrors(t *testing.T) { + + testCases := []struct { + args []string + pluginRemoveFunc func(name string, options types.PluginRemoveOptions) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 1 argument", + }, + { + args: []string{"plugin-foo"}, + pluginRemoveFunc: func(name string, options types.PluginRemoveOptions) error { + return fmt.Errorf("Error removing plugin") + }, + expectedError: "Error removing plugin", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + pluginRemoveFunc: tc.pluginRemoveFunc, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestRemove(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + pluginRemoveFunc: func(name string, options types.PluginRemoveOptions) error { + return nil + }, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} + +func TestRemoveWithForceOption(t *testing.T) { + force := false + cli := test.NewFakeCli(&fakeClient{ + pluginRemoveFunc: func(name string, options types.PluginRemoveOptions) error { + force = options.Force + return nil + }, + }) + cmd := newRemoveCommand(cli) + cmd.SetArgs([]string{"plugin-foo"}) + cmd.Flags().Set("force", "true") + assert.NilError(t, cmd.Execute()) + assert.Check(t, force) + assert.Check(t, is.Equal("plugin-foo\n", cli.OutBuffer().String())) +} diff --git a/cli/cli/command/plugin/set.go b/cli/cli/command/plugin/set.go new file mode 100644 index 00000000..724fdebf --- /dev/null +++ b/cli/cli/command/plugin/set.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +func newSetCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", + Short: "Change settings for a plugin", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) + }, + } + + return cmd +} diff --git a/cli/cli/command/plugin/testdata/plugin-inspect-multiple-with-format.golden b/cli/cli/command/plugin/testdata/plugin-inspect-multiple-with-format.golden new file mode 100644 index 00000000..3adb038a --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-inspect-multiple-with-format.golden @@ -0,0 +1,2 @@ +name-foo +name-bar diff --git a/cli/cli/command/plugin/testdata/plugin-inspect-single-with-format.golden b/cli/cli/command/plugin/testdata/plugin-inspect-single-with-format.golden new file mode 100644 index 00000000..ab347b69 --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-inspect-single-with-format.golden @@ -0,0 +1 @@ +name-foo diff --git a/cli/cli/command/plugin/testdata/plugin-inspect-single-without-format.golden b/cli/cli/command/plugin/testdata/plugin-inspect-single-without-format.golden new file mode 100644 index 00000000..65c8d39c --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-inspect-single-without-format.golden @@ -0,0 +1,54 @@ +[ + { + "Config": { + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "plugin foo description", + "DockerVersion": "17.12.1-ce", + "Documentation": "plugin foo documentation", + "Entrypoint": [ + "/foo" + ], + "Env": null, + "Interface": { + "Socket": "pluginfoo.sock", + "Types": null + }, + "IpcHost": false, + "Linux": { + "AllowAllDevices": false, + "Capabilities": [ + "CAP_SYS_ADMIN" + ], + "Devices": null + }, + "Mounts": null, + "Network": { + "Type": "" + }, + "PidHost": false, + "PropagatedMount": "", + "User": {}, + "WorkDir": "workdir-foo", + "rootfs": { + "diff_ids": [ + "sha256:8603eedd4ea52cebb2f22b45405a3dc8f78ba3e31bf18f27b4547a9ff930e0bd" + ], + "type": "layers" + } + }, + "Enabled": false, + "Id": "id-foo", + "Name": "name-foo", + "Settings": { + "Args": null, + "Devices": null, + "Env": null, + "Mounts": null + } + } +] diff --git a/cli/cli/command/plugin/testdata/plugin-list-sort.golden b/cli/cli/command/plugin/testdata/plugin-list-sort.golden new file mode 100644 index 00000000..62c4a098 --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-list-sort.golden @@ -0,0 +1,3 @@ +plugin-1-foo +plugin-2-foo +plugin-10-foo diff --git a/cli/cli/command/plugin/testdata/plugin-list-with-format.golden b/cli/cli/command/plugin/testdata/plugin-list-with-format.golden new file mode 100644 index 00000000..ab347b69 --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-list-with-format.golden @@ -0,0 +1 @@ +name-foo diff --git a/cli/cli/command/plugin/testdata/plugin-list-with-no-trunc-option.golden b/cli/cli/command/plugin/testdata/plugin-list-with-no-trunc-option.golden new file mode 100644 index 00000000..ac88922c --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-list-with-no-trunc-option.golden @@ -0,0 +1 @@ +xyg4z2hiSLO5yTnBJfg4OYia9gKA6Qjd diff --git a/cli/cli/command/plugin/testdata/plugin-list-with-quiet-option.golden b/cli/cli/command/plugin/testdata/plugin-list-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-list-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/plugin/testdata/plugin-list-without-format.golden b/cli/cli/command/plugin/testdata/plugin-list-without-format.golden new file mode 100644 index 00000000..a8398872 --- /dev/null +++ b/cli/cli/command/plugin/testdata/plugin-list-without-format.golden @@ -0,0 +1,2 @@ +ID NAME DESCRIPTION ENABLED +id-foo name-foo desc-bar true diff --git a/cli/cli/command/plugin/upgrade.go b/cli/cli/command/plugin/upgrade.go new file mode 100644 index 00000000..f5afb509 --- /dev/null +++ b/cli/cli/command/plugin/upgrade.go @@ -0,0 +1,90 @@ +package plugin + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newUpgradeCommand(dockerCli command.Cli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", + Short: "Upgrade an existing plugin", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.localName = args[0] + if len(args) == 2 { + options.remote = args[1] + } + return runUpgrade(dockerCli, options) + }, + Annotations: map[string]string{"version": "1.26"}, + } + + flags := cmd.Flags() + loadPullFlags(dockerCli, &options, flags) + flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") + return cmd +} + +func runUpgrade(dockerCli command.Cli, opts pluginOptions) error { + ctx := context.Background() + p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) + if err != nil { + return errors.Errorf("error reading plugin data: %v", err) + } + + if p.Enabled { + return errors.Errorf("the plugin must be disabled before upgrading") + } + + opts.localName = p.Name + if opts.remote == "" { + opts.remote = p.PluginReference + } + remote, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return errors.Wrap(err, "error parsing remote upgrade image reference") + } + remote = reference.TagNameOnly(remote) + + old, err := reference.ParseNormalizedNamed(p.PluginReference) + if err != nil { + return errors.Wrap(err, "error parsing current image reference") + } + old = reference.TagNameOnly(old) + + fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, reference.FamiliarString(old), reference.FamiliarString(remote)) + if !opts.skipRemoteCheck && remote.String() != old.String() { + if !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "Plugin images do not match, are you sure?") { + return errors.New("canceling upgrade request") + } + } + + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result + return nil +} diff --git a/cli/cli/command/registry.go b/cli/cli/command/registry.go new file mode 100644 index 00000000..16196263 --- /dev/null +++ b/cli/cli/command/registry.go @@ -0,0 +1,210 @@ +package command + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + configtypes "github.com/docker/cli/cli/config/types" + "github.com/docker/cli/cli/debug" + "github.com/docker/cli/cli/streams" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli Cli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + info, err := cli.Client().Info(ctx) + if err != nil { + // Daemon is not responding so use system default. + if debug.IsEnabled() { + // Only report the warning if we're in debug mode to prevent nagging during engine initialization workflows + fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, registry.IndexServer) + } + return registry.IndexServer + } + if info.IndexServerAddress == "" { + if debug.IsEnabled() { + fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", registry.IndexServer) + } + return registry.IndexServer + } + return info.IndexServerAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry) + if err != nil { + fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err) + } + err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(*authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.ConfigFile().GetAuthConfig(configKey) + return types.AuthConfig(a) +} + +// GetDefaultAuthConfig gets the default auth config given a serverAddress +// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it +func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) { + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + var authconfig configtypes.AuthConfig + var err error + if checkCredStore { + authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress) + } else { + authconfig = configtypes.AuthConfig{} + } + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + res := types.AuthConfig(authconfig) + return &res, err +} + +// ConfigureAuth handles prompting of user's username and password if needed +func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthConfig, isDefaultRegistry bool) error { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.SetIn(streams.NewIn(os.Stdin)) + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return errors.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return errors.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + + return nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNormalizedNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/cli/cli/command/registry/formatter_search.go b/cli/cli/command/registry/formatter_search.go new file mode 100644 index 00000000..c536a511 --- /dev/null +++ b/cli/cli/command/registry/formatter_search.go @@ -0,0 +1,104 @@ +package registry + +import ( + "strconv" + "strings" + + "github.com/docker/cli/cli/command/formatter" + registry "github.com/docker/docker/api/types/registry" +) + +const ( + defaultSearchTableFormat = "table {{.Name}}\t{{.Description}}\t{{.StarCount}}\t{{.IsOfficial}}\t{{.IsAutomated}}" + + starsHeader = "STARS" + officialHeader = "OFFICIAL" + automatedHeader = "AUTOMATED" +) + +// NewSearchFormat returns a Format for rendering using a network Context +func NewSearchFormat(source string) formatter.Format { + switch source { + case "": + return defaultSearchTableFormat + case formatter.TableFormatKey: + return defaultSearchTableFormat + } + return formatter.Format(source) +} + +// SearchWrite writes the context +func SearchWrite(ctx formatter.Context, results []registry.SearchResult, auto bool, stars int) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, result := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 + if (auto && !result.IsAutomated) || (stars > result.StarCount) { + continue + } + searchCtx := &searchContext{trunc: ctx.Trunc, s: result} + if err := format(searchCtx); err != nil { + return err + } + } + return nil + } + searchCtx := searchContext{} + searchCtx.Header = formatter.SubHeaderContext{ + "Name": formatter.NameHeader, + "Description": formatter.DescriptionHeader, + "StarCount": starsHeader, + "IsOfficial": officialHeader, + "IsAutomated": automatedHeader, + } + return ctx.Write(&searchCtx, render) +} + +type searchContext struct { + formatter.HeaderContext + trunc bool + json bool + s registry.SearchResult +} + +func (c *searchContext) MarshalJSON() ([]byte, error) { + c.json = true + return formatter.MarshalJSON(c) +} + +func (c *searchContext) Name() string { + return c.s.Name +} + +func (c *searchContext) Description() string { + desc := strings.Replace(c.s.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if c.trunc { + desc = formatter.Ellipsis(desc, 45) + } + return desc +} + +func (c *searchContext) StarCount() string { + return strconv.Itoa(c.s.StarCount) +} + +func (c *searchContext) formatBool(value bool) string { + switch { + case value && c.json: + return "true" + case value: + return "[OK]" + case c.json: + return "false" + default: + return "" + } +} + +func (c *searchContext) IsOfficial() string { + return c.formatBool(c.s.IsOfficial) +} + +func (c *searchContext) IsAutomated() string { + return c.formatBool(c.s.IsAutomated) +} diff --git a/cli/cli/command/registry/formatter_search_test.go b/cli/cli/command/registry/formatter_search_test.go new file mode 100644 index 00000000..0feb3ba5 --- /dev/null +++ b/cli/cli/command/registry/formatter_search_test.go @@ -0,0 +1,282 @@ +package registry + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/internal/test" + registrytypes "github.com/docker/docker/api/types/registry" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestSearchContext(t *testing.T) { + name := "nginx" + starCount := 5000 + + var ctx searchContext + cases := []struct { + searchCtx searchContext + expValue string + call func() string + }{ + {searchContext{ + s: registrytypes.SearchResult{Name: name}, + }, name, ctx.Name}, + {searchContext{ + s: registrytypes.SearchResult{StarCount: starCount}, + }, "5000", ctx.StarCount}, + {searchContext{ + s: registrytypes.SearchResult{IsOfficial: true}, + }, "[OK]", ctx.IsOfficial}, + {searchContext{ + s: registrytypes.SearchResult{IsOfficial: false}, + }, "", ctx.IsOfficial}, + {searchContext{ + s: registrytypes.SearchResult{IsAutomated: true}, + }, "[OK]", ctx.IsAutomated}, + {searchContext{ + s: registrytypes.SearchResult{IsAutomated: false}, + }, "", ctx.IsAutomated}, + } + + for _, c := range cases { + ctx = c.searchCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestSearchContextDescription(t *testing.T) { + shortDescription := "Official build of Nginx." + longDescription := "Automated Nginx reverse proxy for docker containers" + descriptionWReturns := "Automated\nNginx reverse\rproxy\rfor docker\ncontainers" + + var ctx searchContext + cases := []struct { + searchCtx searchContext + expValue string + call func() string + }{ + {searchContext{ + s: registrytypes.SearchResult{Description: shortDescription}, + trunc: true, + }, shortDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: shortDescription}, + trunc: false, + }, shortDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: longDescription}, + trunc: false, + }, longDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: longDescription}, + trunc: true, + }, formatter.Ellipsis(longDescription, 45), ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: descriptionWReturns}, + trunc: false, + }, longDescription, ctx.Description}, + {searchContext{ + s: registrytypes.SearchResult{Description: descriptionWReturns}, + trunc: true, + }, formatter.Ellipsis(longDescription, 45), ctx.Description}, + } + + for _, c := range cases { + ctx = c.searchCtx + v := c.call() + if strings.Contains(v, ",") { + test.CompareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestSearchContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: NewSearchFormat("table")}, + string(golden.Get(t, "search-context-write-table.golden")), + }, + { + formatter.Context{Format: NewSearchFormat("table {{.Name}}")}, + `NAME +result1 +result2 +`, + }, + // Custom Format + { + formatter.Context{Format: NewSearchFormat("{{.Name}}")}, + `result1 +result2 +`, + }, + // Custom Format with CreatedAt + { + formatter.Context{Format: NewSearchFormat("{{.Name}} {{.StarCount}}")}, + `result1 5000 +result2 5 +`, + }, + } + + for _, testcase := range cases { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SearchWrite(testcase.context, results, false, 0) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} + +func TestSearchContextWriteAutomated(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + + // Table format + { + formatter.Context{Format: NewSearchFormat("table")}, + `NAME DESCRIPTION STARS OFFICIAL AUTOMATED +result2 Not official 5 [OK] +`, + }, + { + formatter.Context{Format: NewSearchFormat("table {{.Name}}")}, + `NAME +result2 +`, + }, + } + + for _, testcase := range cases { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SearchWrite(testcase.context, results, true, 0) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} + +func TestSearchContextWriteStars(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + + // Table format + { + formatter.Context{Format: NewSearchFormat("table")}, + string(golden.Get(t, "search-context-write-stars-table.golden")), + }, + { + formatter.Context{Format: NewSearchFormat("table {{.Name}}")}, + `NAME +result1 +`, + }, + } + + for _, testcase := range cases { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SearchWrite(testcase.context, results, false, 6) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} + +func TestSearchContextWriteJSON(t *testing.T) { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + expectedJSONs := []map[string]interface{}{ + {"Name": "result1", "Description": "Official build", "StarCount": "5000", "IsOfficial": "true", "IsAutomated": "false"}, + {"Name": "result2", "Description": "Not official", "StarCount": "5", "IsOfficial": "false", "IsAutomated": "true"}, + } + + out := bytes.NewBufferString("") + err := SearchWrite(formatter.Context{Format: "{{json .}}", Output: out}, results, false, 0) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(m, expectedJSONs[i])) + } +} + +func TestSearchContextWriteJSONField(t *testing.T) { + results := []registrytypes.SearchResult{ + {Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true, IsAutomated: false}, + {Name: "result2", Description: "Not official", StarCount: 5, IsOfficial: false, IsAutomated: true}, + } + out := bytes.NewBufferString("") + err := SearchWrite(formatter.Context{Format: "{{json .Name}}", Output: out}, results, false, 0) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(s, results[i].Name)) + } +} diff --git a/cli/cli/command/registry/login.go b/cli/cli/command/registry/login.go new file mode 100644 index 00000000..f6cb579e --- /dev/null +++ b/cli/cli/command/registry/login.go @@ -0,0 +1,190 @@ +package registry + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + configtypes "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/client" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +const unencryptedWarning = `WARNING! Your password will be stored unencrypted in %s. +Configure a credential helper to remove this warning. See +https://docs.docker.com/engine/reference/commandline/login/#credentials-store +` + +type loginOptions struct { + serverAddress string + user string + password string + passwordStdin bool +} + +// NewLoginCommand creates a new `docker login` command +func NewLoginCommand(dockerCli command.Cli) *cobra.Command { + var opts loginOptions + + cmd := &cobra.Command{ + Use: "login [OPTIONS] [SERVER]", + Short: "Log in to a Docker registry", + Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.serverAddress = args[0] + } + return runLogin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.user, "username", "u", "", "Username") + flags.StringVarP(&opts.password, "password", "p", "", "Password") + flags.BoolVarP(&opts.passwordStdin, "password-stdin", "", false, "Take the password from stdin") + + return cmd +} + +// displayUnencryptedWarning warns the user when using an insecure credential storage. +// After a deprecation period, user will get prompted if stdin and stderr are a terminal. +// Otherwise, we'll assume they want it (sadly), because people may have been scripting +// insecure logins and we don't want to break them. Maybe they'll see the warning in their +// logs and fix things. +func displayUnencryptedWarning(dockerCli command.Streams, filename string) error { + _, err := fmt.Fprintln(dockerCli.Err(), fmt.Sprintf(unencryptedWarning, filename)) + + return err +} + +type isFileStore interface { + IsFileStore() bool + GetFilename() string +} + +func verifyloginOptions(dockerCli command.Cli, opts *loginOptions) error { + if opts.password != "" { + fmt.Fprintln(dockerCli.Err(), "WARNING! Using --password via the CLI is insecure. Use --password-stdin.") + if opts.passwordStdin { + return errors.New("--password and --password-stdin are mutually exclusive") + } + } + + if opts.passwordStdin { + if opts.user == "" { + return errors.New("Must provide --username with --password-stdin") + } + + contents, err := ioutil.ReadAll(dockerCli.In()) + if err != nil { + return err + } + + opts.password = strings.TrimSuffix(string(contents), "\n") + opts.password = strings.TrimSuffix(opts.password, "\r") + } + return nil +} + +func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocyclo + ctx := context.Background() + clnt := dockerCli.Client() + if err := verifyloginOptions(dockerCli, &opts); err != nil { + return err + } + var ( + serverAddress string + authServer = command.ElectAuthServer(ctx, dockerCli) + ) + if opts.serverAddress != "" && opts.serverAddress != registry.DefaultNamespace { + serverAddress = opts.serverAddress + } else { + serverAddress = authServer + } + + var err error + var authConfig *types.AuthConfig + var response registrytypes.AuthenticateOKBody + isDefaultRegistry := serverAddress == authServer + authConfig, err = command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry) + if err == nil && authConfig.Username != "" && authConfig.Password != "" { + response, err = loginWithCredStoreCreds(ctx, dockerCli, authConfig) + } + if err != nil || authConfig.Username == "" || authConfig.Password == "" { + err = command.ConfigureAuth(dockerCli, opts.user, opts.password, authConfig, isDefaultRegistry) + if err != nil { + return err + } + + response, err = clnt.RegistryLogin(ctx, *authConfig) + if err != nil && client.IsErrConnectionFailed(err) { + // If the server isn't responding (yet) attempt to login purely client side + response, err = loginClientSide(ctx, *authConfig) + } + // If we (still) have an error, give up + if err != nil { + return err + } + } + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + + creds := dockerCli.ConfigFile().GetCredentialsStore(serverAddress) + + store, isDefault := creds.(isFileStore) + // Display a warning if we're storing the users password (not a token) + if isDefault && authConfig.Password != "" { + err = displayUnencryptedWarning(dockerCli, store.GetFilename()) + if err != nil { + return err + } + } + + if err := creds.Store(configtypes.AuthConfig(*authConfig)); err != nil { + return errors.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(dockerCli.Out(), response.Status) + } + return nil +} + +func loginWithCredStoreCreds(ctx context.Context, dockerCli command.Cli, authConfig *types.AuthConfig) (registrytypes.AuthenticateOKBody, error) { + fmt.Fprintf(dockerCli.Out(), "Authenticating with existing credentials...\n") + cliClient := dockerCli.Client() + response, err := cliClient.RegistryLogin(ctx, *authConfig) + if err != nil { + if client.IsErrUnauthorized(err) { + fmt.Fprintf(dockerCli.Err(), "Stored credentials invalid or expired\n") + } else { + fmt.Fprintf(dockerCli.Err(), "Login did not succeed, error: %s\n", err) + } + } + return response, err +} + +func loginClientSide(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) { + svc, err := registry.NewService(registry.ServiceOptions{}) + if err != nil { + return registrytypes.AuthenticateOKBody{}, err + } + + status, token, err := svc.Auth(ctx, &auth, command.UserAgent()) + + return registrytypes.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }, err +} diff --git a/cli/cli/command/registry/login_test.go b/cli/cli/command/registry/login_test.go new file mode 100644 index 00000000..0f1374dd --- /dev/null +++ b/cli/cli/command/registry/login_test.go @@ -0,0 +1,182 @@ +package registry + +import ( + "bytes" + "context" + "fmt" + "testing" + + configtypes "github.com/docker/cli/cli/config/types" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/client" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +const userErr = "userunknownError" +const testAuthErrMsg = "UNKNOWN_ERR" + +var testAuthErrors = map[string]error{ + userErr: fmt.Errorf(testAuthErrMsg), +} + +var expiredPassword = "I_M_EXPIRED" +var useToken = "I_M_TOKEN" + +type fakeClient struct { + client.Client +} + +func (c fakeClient) Info(ctx context.Context) (types.Info, error) { + return types.Info{}, nil +} + +func (c fakeClient) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) { + if auth.Password == expiredPassword { + return registrytypes.AuthenticateOKBody{}, fmt.Errorf("Invalid Username or Password") + } + if auth.Password == useToken { + return registrytypes.AuthenticateOKBody{ + IdentityToken: auth.Password, + }, nil + } + err := testAuthErrors[auth.Username] + return registrytypes.AuthenticateOKBody{}, err +} + +func TestLoginWithCredStoreCreds(t *testing.T) { + testCases := []struct { + inputAuthConfig types.AuthConfig + expectedMsg string + expectedErr string + }{ + { + inputAuthConfig: types.AuthConfig{}, + expectedMsg: "Authenticating with existing credentials...\n", + }, + { + inputAuthConfig: types.AuthConfig{ + Username: userErr, + }, + expectedMsg: "Authenticating with existing credentials...\n", + expectedErr: fmt.Sprintf("Login did not succeed, error: %s\n", testAuthErrMsg), + }, + // can't easily test the 401 case because client.IsErrUnauthorized(err) involving + // creating an error of a private type + } + ctx := context.Background() + for _, tc := range testCases { + cli := (*test.FakeCli)(test.NewFakeCli(&fakeClient{})) + errBuf := new(bytes.Buffer) + cli.SetErr(errBuf) + loginWithCredStoreCreds(ctx, cli, &tc.inputAuthConfig) + outputString := cli.OutBuffer().String() + assert.Check(t, is.Equal(tc.expectedMsg, outputString)) + errorString := errBuf.String() + assert.Check(t, is.Equal(tc.expectedErr, errorString)) + } +} + +func TestRunLogin(t *testing.T) { + const storedServerAddress = "reg1" + const validUsername = "u1" + const validPassword = "p1" + const validPassword2 = "p2" + + validAuthConfig := configtypes.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + Password: validPassword, + } + expiredAuthConfig := configtypes.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + Password: expiredPassword, + } + validIdentityToken := configtypes.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + IdentityToken: useToken, + } + testCases := []struct { + inputLoginOption loginOptions + inputStoredCred *configtypes.AuthConfig + expectedErr string + expectedSavedCred configtypes.AuthConfig + }{ + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + }, + inputStoredCred: &validAuthConfig, + expectedErr: "", + expectedSavedCred: validAuthConfig, + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + }, + inputStoredCred: &expiredAuthConfig, + expectedErr: "Error: Cannot perform an interactive login from a non TTY device", + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + user: validUsername, + password: validPassword2, + }, + inputStoredCred: &validAuthConfig, + expectedErr: "", + expectedSavedCred: configtypes.AuthConfig{ + ServerAddress: storedServerAddress, + Username: validUsername, + Password: validPassword2, + }, + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + user: userErr, + password: validPassword, + }, + inputStoredCred: &validAuthConfig, + expectedErr: testAuthErrMsg, + }, + { + inputLoginOption: loginOptions{ + serverAddress: storedServerAddress, + user: validUsername, + password: useToken, + }, + inputStoredCred: &validIdentityToken, + expectedErr: "", + expectedSavedCred: validIdentityToken, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + tmpFile := fs.NewFile(t, "test-run-login") + defer tmpFile.Remove() + cli := test.NewFakeCli(&fakeClient{}) + configfile := cli.ConfigFile() + configfile.Filename = tmpFile.Path() + + if tc.inputStoredCred != nil { + cred := *tc.inputStoredCred + configfile.GetCredentialsStore(cred.ServerAddress).Store(cred) + } + loginErr := runLogin(cli, tc.inputLoginOption) + if tc.expectedErr != "" { + assert.Error(t, loginErr, tc.expectedErr) + return + } + assert.NilError(t, loginErr) + savedCred, credStoreErr := configfile.GetCredentialsStore(tc.inputStoredCred.ServerAddress).Get(tc.inputStoredCred.ServerAddress) + assert.Check(t, credStoreErr) + assert.DeepEqual(t, tc.expectedSavedCred, savedCred) + }) + } +} diff --git a/cli/cli/command/registry/logout.go b/cli/cli/command/registry/logout.go new file mode 100644 index 00000000..ac84139f --- /dev/null +++ b/cli/cli/command/registry/logout.go @@ -0,0 +1,76 @@ +package registry + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewLogoutCommand creates a new `docker logout` command +func NewLogoutCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout [SERVER]", + Short: "Log out from a Docker registry", + Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var serverAddress string + if len(args) > 0 { + serverAddress = args[0] + } + return runLogout(dockerCli, serverAddress) + }, + } + + return cmd +} + +func runLogout(dockerCli command.Cli, serverAddress string) error { + ctx := context.Background() + var isDefaultRegistry bool + + if serverAddress == "" { + serverAddress = command.ElectAuthServer(ctx, dockerCli) + isDefaultRegistry = true + } + + var ( + loggedIn bool + regsToLogout []string + hostnameAddress = serverAddress + regsToTry = []string{serverAddress} + ) + if !isDefaultRegistry { + hostnameAddress = registry.ConvertToHostname(serverAddress) + // the tries below are kept for backward compatibility where a user could have + // saved the registry in one of the following format. + regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + for _, s := range regsToTry { + if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { + loggedIn = true + regsToLogout = append(regsToLogout, s) + } + } + + if !loggedIn { + fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) + return nil + } + + fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) + for _, r := range regsToLogout { + if err := dockerCli.ConfigFile().GetCredentialsStore(r).Erase(r); err != nil { + fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) + } + } + + return nil +} diff --git a/cli/cli/command/registry/search.go b/cli/cli/command/registry/search.go new file mode 100644 index 00000000..7b9b8224 --- /dev/null +++ b/cli/cli/command/registry/search.go @@ -0,0 +1,97 @@ +package registry + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type searchOptions struct { + format string + term string + noTrunc bool + limit int + filter opts.FilterOpt + + // Deprecated + stars uint + automated bool +} + +// NewSearchCommand creates a new `docker search` command +func NewSearchCommand(dockerCli command.Cli) *cobra.Command { + options := searchOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "search [OPTIONS] TERM", + Short: "Search the Docker Hub for images", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.term = args[0] + return runSearch(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.IntVar(&options.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") + flags.StringVar(&options.format, "format", "", "Pretty-print search using a Go template") + + flags.BoolVar(&options.automated, "automated", false, "Only show automated builds") + flags.UintVarP(&options.stars, "stars", "s", 0, "Only displays with at least x stars") + + flags.MarkDeprecated("automated", "use --filter=is-automated=true instead") + flags.MarkDeprecated("stars", "use --filter=stars=3 instead") + + return cmd +} + +func runSearch(dockerCli command.Cli, options searchOptions) error { + indexInfo, err := registry.ParseSearchIndexInfo(options.term) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + searchOptions := types.ImageSearchOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + Filters: options.filter.Value(), + Limit: options.limit, + } + + clnt := dockerCli.Client() + + results, err := clnt.ImageSearch(ctx, options.term, searchOptions) + if err != nil { + return err + } + + sort.Slice(results, func(i, j int) bool { + return results[j].StarCount < results[i].StarCount + }) + searchCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewSearchFormat(options.format), + Trunc: !options.noTrunc, + } + return SearchWrite(searchCtx, results, options.automated, int(options.stars)) +} diff --git a/cli/cli/command/registry/testdata/search-context-write-stars-table.golden b/cli/cli/command/registry/testdata/search-context-write-stars-table.golden new file mode 100644 index 00000000..1a66b429 --- /dev/null +++ b/cli/cli/command/registry/testdata/search-context-write-stars-table.golden @@ -0,0 +1,2 @@ +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +result1 Official build 5000 [OK] diff --git a/cli/cli/command/registry/testdata/search-context-write-table.golden b/cli/cli/command/registry/testdata/search-context-write-table.golden new file mode 100644 index 00000000..72784fd0 --- /dev/null +++ b/cli/cli/command/registry/testdata/search-context-write-table.golden @@ -0,0 +1,3 @@ +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +result1 Official build 5000 [OK] +result2 Not official 5 [OK] diff --git a/cli/cli/command/registry_test.go b/cli/cli/command/registry_test.go new file mode 100644 index 00000000..a40c10e0 --- /dev/null +++ b/cli/cli/command/registry_test.go @@ -0,0 +1,151 @@ +package command_test + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + + // Prevents a circular import with "github.com/docker/cli/internal/test" + + . "github.com/docker/cli/cli/command" + configtypes "github.com/docker/cli/cli/config/types" + "github.com/docker/cli/cli/debug" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + infoFunc func() (types.Info, error) +} + +var testAuthConfigs = []types.AuthConfig{ + { + ServerAddress: "https://index.docker.io/v1/", + Username: "u0", + Password: "p0", + }, + { + ServerAddress: "server1.io", + Username: "u1", + Password: "p1", + }, +} + +func (cli *fakeClient) Info(_ context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func TestElectAuthServer(t *testing.T) { + testCases := []struct { + expectedAuthServer string + expectedWarning string + infoFunc func() (types.Info, error) + }{ + { + expectedAuthServer: "https://index.docker.io/v1/", + expectedWarning: "", + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: "https://index.docker.io/v1/"}, nil + }, + }, + { + expectedAuthServer: "https://index.docker.io/v1/", + expectedWarning: "Empty registry endpoint from daemon", + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: ""}, nil + }, + }, + { + expectedAuthServer: "https://foo.bar", + expectedWarning: "", + infoFunc: func() (types.Info, error) { + return types.Info{IndexServerAddress: "https://foo.bar"}, nil + }, + }, + { + expectedAuthServer: "https://index.docker.io/v1/", + expectedWarning: "failed to get default registry endpoint from daemon", + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error getting info") + }, + }, + } + // Enable debug to see warnings we're checking for + debug.Enable() + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{infoFunc: tc.infoFunc}) + server := ElectAuthServer(context.Background(), cli) + assert.Check(t, is.Equal(tc.expectedAuthServer, server)) + actual := cli.ErrBuffer().String() + if tc.expectedWarning == "" { + assert.Check(t, is.Len(actual, 0)) + } else { + assert.Check(t, is.Contains(actual, tc.expectedWarning)) + } + } +} + +func TestGetDefaultAuthConfig(t *testing.T) { + testCases := []struct { + checkCredStore bool + inputServerAddress string + expectedErr string + expectedAuthConfig types.AuthConfig + }{ + { + checkCredStore: false, + inputServerAddress: "", + expectedErr: "", + expectedAuthConfig: types.AuthConfig{ + ServerAddress: "", + Username: "", + Password: "", + }, + }, + { + checkCredStore: true, + inputServerAddress: testAuthConfigs[0].ServerAddress, + expectedErr: "", + expectedAuthConfig: testAuthConfigs[0], + }, + { + checkCredStore: true, + inputServerAddress: testAuthConfigs[1].ServerAddress, + expectedErr: "", + expectedAuthConfig: testAuthConfigs[1], + }, + { + checkCredStore: true, + inputServerAddress: fmt.Sprintf("https://%s", testAuthConfigs[1].ServerAddress), + expectedErr: "", + expectedAuthConfig: testAuthConfigs[1], + }, + } + cli := test.NewFakeCli(&fakeClient{}) + errBuf := new(bytes.Buffer) + cli.SetErr(errBuf) + for _, authconfig := range testAuthConfigs { + cli.ConfigFile().GetCredentialsStore(authconfig.ServerAddress).Store(configtypes.AuthConfig(authconfig)) + } + for _, tc := range testCases { + serverAddress := tc.inputServerAddress + authconfig, err := GetDefaultAuthConfig(cli, tc.checkCredStore, serverAddress, serverAddress == "https://index.docker.io/v1/") + if tc.expectedErr != "" { + assert.Check(t, err != nil) + assert.Check(t, is.Equal(tc.expectedErr, err.Error())) + } else { + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, *authconfig)) + } + } +} diff --git a/cli/cli/command/secret/client_test.go b/cli/cli/command/secret/client_test.go new file mode 100644 index 00000000..ea672fa4 --- /dev/null +++ b/cli/cli/command/secret/client_test.go @@ -0,0 +1,45 @@ +package secret + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + secretCreateFunc func(swarm.SecretSpec) (types.SecretCreateResponse, error) + secretInspectFunc func(string) (swarm.Secret, []byte, error) + secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) + secretRemoveFunc func(string) error +} + +func (c *fakeClient) SecretCreate(ctx context.Context, spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if c.secretCreateFunc != nil { + return c.secretCreateFunc(spec) + } + return types.SecretCreateResponse{}, nil +} + +func (c *fakeClient) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if c.secretInspectFunc != nil { + return c.secretInspectFunc(id) + } + return swarm.Secret{}, nil, nil +} + +func (c *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if c.secretListFunc != nil { + return c.secretListFunc(options) + } + return []swarm.Secret{}, nil +} + +func (c *fakeClient) SecretRemove(ctx context.Context, name string) error { + if c.secretRemoveFunc != nil { + return c.secretRemoveFunc(name) + } + return nil +} diff --git a/cli/cli/command/secret/cmd.go b/cli/cli/command/secret/cmd.go new file mode 100644 index 00000000..a29d2def --- /dev/null +++ b/cli/cli/command/secret/cmd.go @@ -0,0 +1,29 @@ +package secret + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewSecretCommand returns a cobra command for `secret` subcommands +func NewSecretCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "secret", + Short: "Manage Docker secrets", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.25", + "swarm": "", + }, + } + cmd.AddCommand( + newSecretListCommand(dockerCli), + newSecretCreateCommand(dockerCli), + newSecretInspectCommand(dockerCli), + newSecretRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/secret/create.go b/cli/cli/command/secret/create.go new file mode 100644 index 00000000..79456fdf --- /dev/null +++ b/cli/cli/command/secret/create.go @@ -0,0 +1,109 @@ +package secret + +import ( + "context" + "fmt" + "io" + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + templateDriver string + file string + labels opts.ListOpts +} + +func newSecretCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + labels: opts.NewListOpts(opts.ValidateLabel), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] SECRET [file|-]", + Short: "Create a secret from a file or STDIN as content", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + if len(args) == 2 { + options.file = args[1] + } + return runSecretCreate(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.VarP(&options.labels, "label", "l", "Secret labels") + flags.StringVarP(&options.driver, "driver", "d", "", "Secret driver") + flags.SetAnnotation("driver", "version", []string{"1.31"}) + flags.StringVar(&options.templateDriver, "template-driver", "", "Template driver") + flags.SetAnnotation("template-driver", "version", []string{"1.37"}) + + return cmd +} + +func runSecretCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if options.driver != "" && options.file != "" { + return errors.Errorf("When using secret driver secret data must be empty") + } + + secretData, err := readSecretData(dockerCli.In(), options.file) + if err != nil { + return errors.Errorf("Error reading content from %q: %v", options.file, err) + } + spec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + Data: secretData, + } + if options.driver != "" { + spec.Driver = &swarm.Driver{ + Name: options.driver, + } + } + if options.templateDriver != "" { + spec.Templating = &swarm.Driver{ + Name: options.templateDriver, + } + } + r, err := client.SecretCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} + +func readSecretData(in io.ReadCloser, file string) ([]byte, error) { + // Read secret value from external driver + if file == "" { + return nil, nil + } + if file != "-" { + var err error + in, err = system.OpenSequential(file) + if err != nil { + return nil, err + } + defer in.Close() + } + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/cli/cli/command/secret/create_test.go b/cli/cli/command/secret/create_test.go new file mode 100644 index 00000000..eb9c0898 --- /dev/null +++ b/cli/cli/command/secret/create_test.go @@ -0,0 +1,169 @@ +package secret + +import ( + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +const secretDataFile = "secret-create-with-name.golden" + +func TestSecretCreateErrors(t *testing.T) { + testCases := []struct { + args []string + secretCreateFunc func(swarm.SecretSpec) (types.SecretCreateResponse, error) + expectedError string + }{ + {args: []string{"too", "many", "arguments"}, + expectedError: "requires at least 1 and at most 2 arguments", + }, + {args: []string{"create", "--driver", "driver", "-"}, + expectedError: "secret data must be empty", + }, + { + args: []string{"name", filepath.Join("testdata", secretDataFile)}, + secretCreateFunc: func(secretSpec swarm.SecretSpec) (types.SecretCreateResponse, error) { + return types.SecretCreateResponse{}, errors.Errorf("error creating secret") + }, + expectedError: "error creating secret", + }, + } + for _, tc := range testCases { + cmd := newSecretCreateCommand( + test.NewFakeCli(&fakeClient{ + secretCreateFunc: tc.secretCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretCreateWithName(t *testing.T) { + name := "foo" + data, err := ioutil.ReadFile(filepath.Join("testdata", secretDataFile)) + assert.NilError(t, err) + + expected := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: make(map[string]string), + }, + Data: data, + } + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if !reflect.DeepEqual(spec, expected) { + return types.SecretCreateResponse{}, errors.Errorf("expected %+v, got %+v", expected, spec) + } + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", secretDataFile)}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestSecretCreateWithDriver(t *testing.T) { + expectedDriver := &swarm.Driver{ + Name: "secret-driver", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if spec.Name != name { + return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if spec.Driver.Name != expectedDriver.Name { + return types.SecretCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels) + } + + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name}) + cmd.Flags().Set("driver", expectedDriver.Name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestSecretCreateWithTemplatingDriver(t *testing.T) { + expectedDriver := &swarm.Driver{ + Name: "template-driver", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if spec.Name != name { + return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if spec.Templating.Name != expectedDriver.Name { + return types.SecretCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels) + } + + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name}) + cmd.Flags().Set("template-driver", expectedDriver.Name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} + +func TestSecretCreateWithLabels(t *testing.T) { + expectedLabels := map[string]string{ + "lbl1": "Label-foo", + "lbl2": "Label-bar", + } + name := "foo" + + cli := test.NewFakeCli(&fakeClient{ + secretCreateFunc: func(spec swarm.SecretSpec) (types.SecretCreateResponse, error) { + if spec.Name != name { + return types.SecretCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name) + } + + if !reflect.DeepEqual(spec.Labels, expectedLabels) { + return types.SecretCreateResponse{}, errors.Errorf("expected labels %v, got %v", expectedLabels, spec.Labels) + } + + return types.SecretCreateResponse{ + ID: "ID-" + spec.Name, + }, nil + }, + }) + + cmd := newSecretCreateCommand(cli) + cmd.SetArgs([]string{name, filepath.Join("testdata", secretDataFile)}) + cmd.Flags().Set("label", "lbl1=Label-foo") + cmd.Flags().Set("label", "lbl2=Label-bar") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/secret/formatter.go b/cli/cli/command/secret/formatter.go new file mode 100644 index 00000000..24a56bda --- /dev/null +++ b/cli/cli/command/secret/formatter.go @@ -0,0 +1,179 @@ +package secret + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultSecretTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" + secretIDHeader = "ID" + secretCreatedHeader = "CREATED" + secretUpdatedHeader = "UPDATED" + secretInspectPrettyTemplate formatter.Format = `ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Driver: {{.Driver}} +Created at: {{.CreatedAt}} +Updated at: {{.UpdatedAt}}` +) + +// NewFormat returns a Format for rendering using a secret Context +func NewFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.PrettyFormatKey: + return secretInspectPrettyTemplate + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultSecretTableFormat + } + return formatter.Format(source) +} + +// FormatWrite writes the context +func FormatWrite(ctx formatter.Context, secrets []swarm.Secret) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, secret := range secrets { + secretCtx := &secretContext{s: secret} + if err := format(secretCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(newSecretContext(), render) +} + +func newSecretContext() *secretContext { + sCtx := &secretContext{} + + sCtx.Header = formatter.SubHeaderContext{ + "ID": secretIDHeader, + "Name": formatter.NameHeader, + "Driver": formatter.DriverHeader, + "CreatedAt": secretCreatedHeader, + "UpdatedAt": secretUpdatedHeader, + "Labels": formatter.LabelsHeader, + } + return sCtx +} + +type secretContext struct { + formatter.HeaderContext + s swarm.Secret +} + +func (c *secretContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *secretContext) ID() string { + return c.s.ID +} + +func (c *secretContext) Name() string { + return c.s.Spec.Annotations.Name +} + +func (c *secretContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.CreatedAt)) + " ago" +} + +func (c *secretContext) Driver() string { + if c.s.Spec.Driver == nil { + return "" + } + return c.s.Spec.Driver.Name +} + +func (c *secretContext) UpdatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.UpdatedAt)) + " ago" +} + +func (c *secretContext) Labels() string { + mapLabels := c.s.Spec.Annotations.Labels + if mapLabels == nil { + return "" + } + var joinLabels []string + for k, v := range mapLabels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *secretContext) Label(name string) string { + if c.s.Spec.Annotations.Labels == nil { + return "" + } + return c.s.Spec.Annotations.Labels[name] +} + +// InspectFormatWrite renders the context for a list of secrets +func InspectFormatWrite(ctx formatter.Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != secretInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext formatter.SubContext) error) error { + for _, ref := range refs { + secretI, _, err := getRef(ref) + if err != nil { + return err + } + secret, ok := secretI.(swarm.Secret) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&secretInspectContext{Secret: secret}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&secretInspectContext{}, render) +} + +type secretInspectContext struct { + swarm.Secret + formatter.SubContext +} + +func (ctx *secretInspectContext) ID() string { + return ctx.Secret.ID +} + +func (ctx *secretInspectContext) Name() string { + return ctx.Secret.Spec.Name +} + +func (ctx *secretInspectContext) Labels() map[string]string { + return ctx.Secret.Spec.Labels +} + +func (ctx *secretInspectContext) Driver() string { + if ctx.Secret.Spec.Driver == nil { + return "" + } + return ctx.Secret.Spec.Driver.Name +} + +func (ctx *secretInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Secret.CreatedAt) +} + +func (ctx *secretInspectContext) UpdatedAt() string { + return command.PrettyPrint(ctx.Secret.UpdatedAt) +} diff --git a/cli/cli/command/secret/formatter_test.go b/cli/cli/command/secret/formatter_test.go new file mode 100644 index 00000000..751e7c3d --- /dev/null +++ b/cli/cli/command/secret/formatter_test.go @@ -0,0 +1,65 @@ +package secret + +import ( + "bytes" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSecretContextFormatWrite(t *testing.T) { + // Check default output format (verbose and non-verbose mode) for table headers + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + {formatter.Context{Format: NewFormat("table", false)}, + `ID NAME DRIVER CREATED UPDATED +1 passwords Less than a second ago Less than a second ago +2 id_rsa Less than a second ago Less than a second ago +`}, + {formatter.Context{Format: NewFormat("table {{.Name}}", true)}, + `NAME +passwords +id_rsa +`}, + {formatter.Context{Format: NewFormat("{{.ID}}-{{.Name}}", false)}, + `1-passwords +2-id_rsa +`}, + } + + secrets := []swarm.Secret{ + {ID: "1", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "passwords"}}}, + {ID: "2", + Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()}, + Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "id_rsa"}}}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + if err := FormatWrite(testcase.context, secrets); err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/secret/inspect.go b/cli/cli/command/secret/inspect.go new file mode 100644 index 00000000..f7866de1 --- /dev/null +++ b/cli/cli/command/secret/inspect.go @@ -0,0 +1,65 @@ +package secret + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + names []string + format string + pretty bool +} + +func newSecretInspectCommand(dockerCli command.Cli) *cobra.Command { + opts := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SECRET [SECRET...]", + Short: "Display detailed information on one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runSecretInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runSecretInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(id string) (interface{}, []byte, error) { + return client.SecretInspectWithRaw(ctx, id) + } + f := opts.format + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + secretCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(f, false), + } + + if err := InspectFormatWrite(secretCtx, opts.names, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/cli/cli/command/secret/inspect_test.go b/cli/cli/command/secret/inspect_test.go new file mode 100644 index 00000000..67addaea --- /dev/null +++ b/cli/cli/command/secret/inspect_test.go @@ -0,0 +1,173 @@ +package secret + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSecretInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + secretInspectFunc func(secretID string) (swarm.Secret, []byte, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"foo"}, + secretInspectFunc: func(secretID string) (swarm.Secret, []byte, error) { + return swarm.Secret{}, nil, errors.Errorf("error while inspecting the secret") + }, + expectedError: "error while inspecting the secret", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + args: []string{"foo", "bar"}, + secretInspectFunc: func(secretID string) (swarm.Secret, []byte, error) { + if secretID == "foo" { + return *Secret(SecretName("foo")), nil, nil + } + return swarm.Secret{}, nil, errors.Errorf("error while inspecting the secret") + }, + expectedError: "error while inspecting the secret", + }, + } + for _, tc := range testCases { + cmd := newSecretInspectCommand( + test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretInspectWithoutFormat(t *testing.T) { + testCases := []struct { + name string + args []string + secretInspectFunc func(secretID string) (swarm.Secret, []byte, error) + }{ + { + name: "single-secret", + args: []string{"foo"}, + secretInspectFunc: func(name string) (swarm.Secret, []byte, error) { + if name != "foo" { + return swarm.Secret{}, nil, errors.Errorf("Invalid name, expected %s, got %s", "foo", name) + } + return *Secret(SecretID("ID-foo"), SecretName("foo")), nil, nil + }, + }, + { + name: "multiple-secrets-with-labels", + args: []string{"foo", "bar"}, + secretInspectFunc: func(name string) (swarm.Secret, []byte, error) { + return *Secret(SecretID("ID-"+name), SecretName(name), SecretLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }) + cmd := newSecretInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("secret-inspect-without-format.%s.golden", tc.name)) + } +} + +func TestSecretInspectWithFormat(t *testing.T) { + secretInspectFunc := func(name string) (swarm.Secret, []byte, error) { + return *Secret(SecretName("foo"), SecretLabels(map[string]string{ + "label1": "label-foo", + })), nil, nil + } + testCases := []struct { + name string + format string + args []string + secretInspectFunc func(name string) (swarm.Secret, []byte, error) + }{ + { + name: "simple-template", + format: "{{.Spec.Name}}", + args: []string{"foo"}, + secretInspectFunc: secretInspectFunc, + }, + { + name: "json-template", + format: "{{json .Spec.Labels}}", + args: []string{"foo"}, + secretInspectFunc: secretInspectFunc, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }) + cmd := newSecretInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("format", tc.format) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("secret-inspect-with-format.%s.golden", tc.name)) + } +} + +func TestSecretInspectPretty(t *testing.T) { + testCases := []struct { + name string + secretInspectFunc func(string) (swarm.Secret, []byte, error) + }{ + { + name: "simple", + secretInspectFunc: func(id string) (swarm.Secret, []byte, error) { + return *Secret( + SecretLabels(map[string]string{ + "lbl1": "value1", + }), + SecretID("secretID"), + SecretName("secretName"), + SecretDriver("driver"), + SecretCreatedAt(time.Time{}), + SecretUpdatedAt(time.Time{}), + ), []byte{}, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + secretInspectFunc: tc.secretInspectFunc, + }) + cmd := newSecretInspectCommand(cli) + cmd.SetArgs([]string{"secretID"}) + cmd.Flags().Set("pretty", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("secret-inspect-pretty.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/secret/ls.go b/cli/cli/command/secret/ls.go new file mode 100644 index 00000000..878f6fe9 --- /dev/null +++ b/cli/cli/command/secret/ls.go @@ -0,0 +1,69 @@ +package secret + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newSecretListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List secrets", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runSecretList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVarP(&options.format, "format", "", "", "Pretty-print secrets using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runSecretList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + secrets, err := client.SecretList(ctx, types.SecretListOptions{Filters: options.filter.Value()}) + if err != nil { + return err + } + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().SecretFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().SecretFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Slice(secrets, func(i, j int) bool { + return sortorder.NaturalLess(secrets[i].Spec.Name, secrets[j].Spec.Name) + }) + + secretCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(format, options.quiet), + } + return FormatWrite(secretCtx, secrets) +} diff --git a/cli/cli/command/secret/ls_test.go b/cli/cli/command/secret/ls_test.go new file mode 100644 index 00000000..e1417115 --- /dev/null +++ b/cli/cli/command/secret/ls_test.go @@ -0,0 +1,160 @@ +package secret + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestSecretListErrors(t *testing.T) { + testCases := []struct { + args []string + secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{}, errors.Errorf("error listing secrets") + }, + expectedError: "error listing secrets", + }, + } + for _, tc := range testCases { + cmd := newSecretListCommand( + test.NewFakeCli(&fakeClient{ + secretListFunc: tc.secretListFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretList(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-1-foo"), + SecretName("1-foo"), + SecretVersion(swarm.Version{Index: 10}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Secret(SecretID("ID-10-foo"), + SecretName("10-foo"), + SecretVersion(swarm.Version{Index: 11}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + SecretDriver("driver"), + ), + *Secret(SecretID("ID-2-foo"), + SecretName("2-foo"), + SecretVersion(swarm.Version{Index: 11}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + SecretDriver("driver"), + ), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-sort.golden") +} + +func TestSecretListWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), SecretName("foo")), + *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + cmd.Flags().Set("quiet", "true") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-quiet-option.golden") +} + +func TestSecretListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), SecretName("foo")), + *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + SecretFormat: "{{ .Name }} {{ .Labels }}", + }) + cmd := newSecretListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-config-format.golden") +} + +func TestSecretListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), SecretName("foo")), + *Secret(SecretID("ID-bar"), SecretName("bar"), SecretLabels(map[string]string{ + "label": "label-bar", + })), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-format.golden") +} + +func TestSecretListWithFilter(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + secretListFunc: func(options types.SecretListOptions) ([]swarm.Secret, error) { + assert.Check(t, is.Equal("foo", options.Filters.Get("name")[0]), "foo") + assert.Check(t, is.Equal("lbl1=Label-bar", options.Filters.Get("label")[0])) + return []swarm.Secret{ + *Secret(SecretID("ID-foo"), + SecretName("foo"), + SecretVersion(swarm.Version{Index: 10}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + *Secret(SecretID("ID-bar"), + SecretName("bar"), + SecretVersion(swarm.Version{Index: 11}), + SecretCreatedAt(time.Now().Add(-2*time.Hour)), + SecretUpdatedAt(time.Now().Add(-1*time.Hour)), + ), + }, nil + }, + }) + cmd := newSecretListCommand(cli) + cmd.Flags().Set("filter", "name=foo") + cmd.Flags().Set("filter", "label=lbl1=Label-bar") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "secret-list-with-filter.golden") +} diff --git a/cli/cli/command/secret/remove.go b/cli/cli/command/secret/remove.go new file mode 100644 index 00000000..bdf47b77 --- /dev/null +++ b/cli/cli/command/secret/remove.go @@ -0,0 +1,53 @@ +package secret + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + names []string +} + +func newSecretRemoveCommand(dockerCli command.Cli) *cobra.Command { + return &cobra.Command{ + Use: "rm SECRET [SECRET...]", + Aliases: []string{"remove"}, + Short: "Remove one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := removeOptions{ + names: args, + } + return runSecretRemove(dockerCli, opts) + }, + } +} + +func runSecretRemove(dockerCli command.Cli, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, name := range opts.names { + if err := client.SecretRemove(ctx, name); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), name) + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/cli/cli/command/secret/remove_test.go b/cli/cli/command/secret/remove_test.go new file mode 100644 index 00000000..d2fc8ad0 --- /dev/null +++ b/cli/cli/command/secret/remove_test.go @@ -0,0 +1,79 @@ +package secret + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSecretRemoveErrors(t *testing.T) { + testCases := []struct { + args []string + secretRemoveFunc func(string) error + expectedError string + }{ + { + args: []string{}, + expectedError: "requires at least 1 argument.", + }, + { + args: []string{"foo"}, + secretRemoveFunc: func(name string) error { + return errors.Errorf("error removing secret") + }, + expectedError: "error removing secret", + }, + } + for _, tc := range testCases { + cmd := newSecretRemoveCommand( + test.NewFakeCli(&fakeClient{ + secretRemoveFunc: tc.secretRemoveFunc, + }), + ) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSecretRemoveWithName(t *testing.T) { + names := []string{"foo", "bar"} + var removedSecrets []string + cli := test.NewFakeCli(&fakeClient{ + secretRemoveFunc: func(name string) error { + removedSecrets = append(removedSecrets, name) + return nil + }, + }) + cmd := newSecretRemoveCommand(cli) + cmd.SetArgs(names) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(names, strings.Split(strings.TrimSpace(cli.OutBuffer().String()), "\n"))) + assert.Check(t, is.DeepEqual(names, removedSecrets)) +} + +func TestSecretRemoveContinueAfterError(t *testing.T) { + names := []string{"foo", "bar"} + var removedSecrets []string + + cli := test.NewFakeCli(&fakeClient{ + secretRemoveFunc: func(name string) error { + removedSecrets = append(removedSecrets, name) + if name == "foo" { + return errors.Errorf("error removing secret: %s", name) + } + return nil + }, + }) + + cmd := newSecretRemoveCommand(cli) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs(names) + assert.Error(t, cmd.Execute(), "error removing secret: foo") + assert.Check(t, is.DeepEqual(names, removedSecrets)) +} diff --git a/cli/cli/command/secret/testdata/secret-create-with-name.golden b/cli/cli/command/secret/testdata/secret-create-with-name.golden new file mode 100644 index 00000000..788642a9 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-create-with-name.golden @@ -0,0 +1 @@ +secret_foo_bar diff --git a/cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden b/cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden new file mode 100644 index 00000000..37234eff --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-pretty.simple.golden @@ -0,0 +1,7 @@ +ID: secretID +Name: secretName +Labels: + - lbl1=value1 +Driver: driver +Created at: 0001-01-01 00:00:00 +0000 utc +Updated at: 0001-01-01 00:00:00 +0000 utc diff --git a/cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden b/cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden new file mode 100644 index 00000000..aab678f8 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-with-format.json-template.golden @@ -0,0 +1 @@ +{"label1":"label-foo"} diff --git a/cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden b/cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden new file mode 100644 index 00000000..257cc564 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-with-format.simple-template.golden @@ -0,0 +1 @@ +foo diff --git a/cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden b/cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden new file mode 100644 index 00000000..b01a400c --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-without-format.multiple-secrets-with-labels.golden @@ -0,0 +1,26 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": { + "label1": "label-foo" + } + } + }, + { + "ID": "ID-bar", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "bar", + "Labels": { + "label1": "label-foo" + } + } + } +] diff --git a/cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden b/cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden new file mode 100644 index 00000000..c4f41c10 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-inspect-without-format.single-secret.golden @@ -0,0 +1,12 @@ +[ + { + "ID": "ID-foo", + "Version": {}, + "CreatedAt": "0001-01-01T00:00:00Z", + "UpdatedAt": "0001-01-01T00:00:00Z", + "Spec": { + "Name": "foo", + "Labels": null + } + } +] diff --git a/cli/cli/command/secret/testdata/secret-list-sort.golden b/cli/cli/command/secret/testdata/secret-list-sort.golden new file mode 100644 index 00000000..805d26f3 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-sort.golden @@ -0,0 +1,4 @@ +ID NAME DRIVER CREATED UPDATED +ID-1-foo 1-foo 2 hours ago About an hour ago +ID-2-foo 2-foo driver 2 hours ago About an hour ago +ID-10-foo 10-foo driver 2 hours ago About an hour ago diff --git a/cli/cli/command/secret/testdata/secret-list-with-config-format.golden b/cli/cli/command/secret/testdata/secret-list-with-config-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-config-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/secret/testdata/secret-list-with-filter.golden b/cli/cli/command/secret/testdata/secret-list-with-filter.golden new file mode 100644 index 00000000..388d2874 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-filter.golden @@ -0,0 +1,3 @@ +ID NAME DRIVER CREATED UPDATED +ID-bar bar 2 hours ago About an hour ago +ID-foo foo 2 hours ago About an hour ago diff --git a/cli/cli/command/secret/testdata/secret-list-with-format.golden b/cli/cli/command/secret/testdata/secret-list-with-format.golden new file mode 100644 index 00000000..a64bb595 --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-format.golden @@ -0,0 +1,2 @@ +bar label=label-bar +foo diff --git a/cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden b/cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden new file mode 100644 index 00000000..145fc38d --- /dev/null +++ b/cli/cli/command/secret/testdata/secret-list-with-quiet-option.golden @@ -0,0 +1,2 @@ +ID-bar +ID-foo diff --git a/cli/cli/command/service/client_test.go b/cli/cli/command/service/client_test.go new file mode 100644 index 00000000..8d0d592c --- /dev/null +++ b/cli/cli/command/service/client_test.go @@ -0,0 +1,77 @@ +package service + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" +) + +type fakeClient struct { + client.Client + serviceInspectWithRawFunc func(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + serviceUpdateFunc func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + serviceListFunc func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) + taskListFunc func(context.Context, types.TaskListOptions) ([]swarm.Task, error) + infoFunc func(ctx context.Context) (types.Info, error) + networkInspectFunc func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) +} + +func (f *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + return nil, nil +} + +func (f *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if f.taskListFunc != nil { + return f.taskListFunc(ctx, options) + } + return nil, nil +} + +func (f *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if f.serviceInspectWithRawFunc != nil { + return f.serviceInspectWithRawFunc(ctx, serviceID, options) + } + + return *Service(ServiceID(serviceID)), []byte{}, nil +} + +func (f *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + if f.serviceListFunc != nil { + return f.serviceListFunc(ctx, options) + } + + return nil, nil +} + +func (f *fakeClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + if f.serviceUpdateFunc != nil { + return f.serviceUpdateFunc(ctx, serviceID, version, service, options) + } + + return types.ServiceUpdateResponse{}, nil +} + +func (f *fakeClient) Info(ctx context.Context) (types.Info, error) { + if f.infoFunc == nil { + return types.Info{}, nil + } + return f.infoFunc(ctx) +} + +func (f *fakeClient) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + if f.networkInspectFunc != nil { + return f.networkInspectFunc(ctx, networkID, options) + } + return types.NetworkResource{}, nil +} + +func newService(id string, name string) swarm.Service { + return swarm.Service{ + ID: id, + Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: name}}, + } +} diff --git a/cli/cli/command/service/cmd.go b/cli/cli/command/service/cmd.go new file mode 100644 index 00000000..98af9852 --- /dev/null +++ b/cli/cli/command/service/cmd.go @@ -0,0 +1,34 @@ +package service + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewServiceCommand returns a cobra command for `service` subcommands +func NewServiceCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "service", + Short: "Manage services", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.24", + "swarm": "", + }, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newPsCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newScaleCommand(dockerCli), + newUpdateCommand(dockerCli), + newLogsCommand(dockerCli), + newRollbackCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/service/create.go b/cli/cli/command/service/create.go new file mode 100644 index 00000000..4c709eb3 --- /dev/null +++ b/cli/cli/command/service/create.go @@ -0,0 +1,181 @@ +package service + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + cliopts "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + opts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new service", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + if len(args) > 1 { + opts.args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), opts) + }, + } + flags := cmd.Flags() + flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") + flags.StringVar(&opts.name, flagName, "", "Service name") + + addServiceFlags(flags, opts, buildServiceDefaultFlagMapping()) + + flags.VarP(&opts.labels, flagLabel, "l", "Service labels") + flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") + flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") + flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") + flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") + flags.Var(&opts.constraints, flagConstraint, "Placement constraints") + flags.Var(&opts.placementPrefs, flagPlacementPref, "Add a placement preference") + flags.SetAnnotation(flagPlacementPref, "version", []string{"1.28"}) + flags.Var(&opts.networks, flagNetwork, "Network attachments") + flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") + flags.SetAnnotation(flagSecret, "version", []string{"1.25"}) + flags.Var(&opts.configs, flagConfig, "Specify configurations to expose to the service") + flags.SetAnnotation(flagConfig, "version", []string{"1.30"}) + flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") + flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") + flags.SetAnnotation(flagGroup, "version", []string{"1.25"}) + flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") + flags.SetAnnotation(flagDNS, "version", []string{"1.25"}) + flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") + flags.SetAnnotation(flagDNSOption, "version", []string{"1.25"}) + flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") + flags.SetAnnotation(flagDNSSearch, "version", []string{"1.25"}) + flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") + flags.SetAnnotation(flagHost, "version", []string{"1.25"}) + flags.BoolVar(&opts.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") + flags.SetAnnotation(flagInit, "version", []string{"1.37"}) + flags.Var(&opts.sysctls, flagSysCtl, "Sysctl options") + flags.SetAnnotation(flagSysCtl, "version", []string{"1.40"}) + + flags.Var(cliopts.NewListOptsRef(&opts.resources.resGenericResources, ValidateSingleGenericResource), "generic-resource", "User defined resources") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) + + flags.SetInterspersed(false) + return cmd +} + +func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, opts *serviceOptions) error { + apiClient := dockerCli.Client() + createOpts := types.ServiceCreateOptions{} + + ctx := context.Background() + + service, err := opts.ToService(ctx, apiClient, flags) + if err != nil { + return err + } + + if err = validateAPIVersion(service, dockerCli.Client().ClientVersion()); err != nil { + return err + } + + specifiedSecrets := opts.secrets.Value() + if len(specifiedSecrets) > 0 { + // parse and validate secrets + secrets, err := ParseSecrets(apiClient, specifiedSecrets) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Secrets = secrets + } + + if err := setConfigs(apiClient, &service, opts); err != nil { + return err + } + + if err := resolveServiceImageDigestContentTrust(dockerCli, &service); err != nil { + return err + } + + // only send auth if flag was set + if opts.registryAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) + if err != nil { + return err + } + createOpts.EncodedRegistryAuth = encodedAuth + } + + // query registry if flag disabling it was not set + if !opts.noResolveImage && versions.GreaterThanOrEqualTo(apiClient.ClientVersion(), "1.30") { + createOpts.QueryRegistry = true + } + + response, err := apiClient.ServiceCreate(ctx, service, createOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + + if opts.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { + return nil + } + + return waitOnService(ctx, dockerCli, response.ID, opts.quiet) +} + +// setConfigs does double duty: it both sets the ConfigReferences of the +// service, and it sets the service CredentialSpec. This is because there is an +// interplay between the CredentialSpec and the Config it depends on. +func setConfigs(apiClient client.ConfigAPIClient, service *swarm.ServiceSpec, opts *serviceOptions) error { + specifiedConfigs := opts.configs.Value() + // if the user has requested to use a Config, for the CredentialSpec add it + // to the specifiedConfigs as a RuntimeTarget. + if cs := opts.credentialSpec.Value(); cs != nil && cs.Config != "" { + specifiedConfigs = append(specifiedConfigs, &swarm.ConfigReference{ + ConfigName: cs.Config, + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }) + } + if len(specifiedConfigs) > 0 { + // parse and validate configs + configs, err := ParseConfigs(apiClient, specifiedConfigs) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Configs = configs + // if we have a CredentialSpec Config, find its ID and rewrite the + // field on the spec + // + // we check the opts instead of the service directly because there are + // a few layers of nullable objects in the service, which is a PITA + // to traverse, but the existence of the option implies that those are + // non-null. + if cs := opts.credentialSpec.Value(); cs != nil && cs.Config != "" { + for _, config := range configs { + if config.ConfigName == cs.Config { + service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config = config.ConfigID + // we've found the right config, no need to keep iterating + // through the rest of them. + break + } + } + } + } + + return nil +} diff --git a/cli/cli/command/service/create_test.go b/cli/cli/command/service/create_test.go new file mode 100644 index 00000000..cd675c79 --- /dev/null +++ b/cli/cli/command/service/create_test.go @@ -0,0 +1,271 @@ +package service + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + + cliopts "github.com/docker/cli/opts" +) + +// fakeConfigAPIClientList is used to let us pass a closure as a +// ConfigAPIClient, to use as ConfigList. for all the other methods in the +// interface, it does nothing, not even return an error, so don't use them +type fakeConfigAPIClientList func(context.Context, types.ConfigListOptions) ([]swarm.Config, error) + +func (f fakeConfigAPIClientList) ConfigList(ctx context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) { + return f(ctx, opts) +} + +func (f fakeConfigAPIClientList) ConfigCreate(_ context.Context, _ swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + return types.ConfigCreateResponse{}, nil +} + +func (f fakeConfigAPIClientList) ConfigRemove(_ context.Context, _ string) error { + return nil +} + +func (f fakeConfigAPIClientList) ConfigInspectWithRaw(_ context.Context, _ string) (swarm.Config, []byte, error) { + return swarm.Config{}, nil, nil +} + +func (f fakeConfigAPIClientList) ConfigUpdate(_ context.Context, _ string, _ swarm.Version, _ swarm.ConfigSpec) error { + return nil +} + +// TestSetConfigsWithCredSpecAndConfigs tests that the setConfigs function for +// create correctly looks up the right configs, and correctly handles the +// credentialSpec +func TestSetConfigsWithCredSpecAndConfigs(t *testing.T) { + // we can't directly access the internal fields of the ConfigOpt struct, so + // we need to let it do the parsing + configOpt := &cliopts.ConfigOpt{} + configOpt.Set("bar") + opts := &serviceOptions{ + credentialSpec: credentialSpecOpt{ + value: &swarm.CredentialSpec{ + Config: "foo", + }, + source: "config://foo", + }, + configs: *configOpt, + } + + // create a service spec. we need to be sure to fill in the nullable + // fields, like the code expects + service := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Privileges: &swarm.Privileges{ + CredentialSpec: opts.credentialSpec.value, + }, + }, + }, + } + + // set up a function to use as the list function + var fakeClient fakeConfigAPIClientList = func(_ context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) { + f := opts.Filters + + // we're expecting the filter to have names "foo" and "bar" + names := f.Get("name") + assert.Equal(t, len(names), 2) + assert.Assert(t, is.Contains(names, "foo")) + assert.Assert(t, is.Contains(names, "bar")) + + return []swarm.Config{ + { + ID: "fooID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "foo", + }, + }, + }, { + ID: "barID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "bar", + }, + }, + }, + }, nil + } + + // now call setConfigs + err := setConfigs(fakeClient, service, opts) + // verify no error is returned + assert.NilError(t, err) + + credSpecConfigValue := service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config + assert.Equal(t, credSpecConfigValue, "fooID") + + configRefs := service.TaskTemplate.ContainerSpec.Configs + assert.Assert(t, is.Contains(configRefs, &swarm.ConfigReference{ + ConfigID: "fooID", + ConfigName: "foo", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }), "expected configRefs to contain foo config") + assert.Assert(t, is.Contains(configRefs, &swarm.ConfigReference{ + ConfigID: "barID", + ConfigName: "bar", + File: &swarm.ConfigReferenceFileTarget{ + Name: "bar", + // these are the default field values + UID: "0", + GID: "0", + Mode: 0444, + }, + }), "expected configRefs to contain bar config") +} + +// TestSetConfigsOnlyCredSpec tests that even if a CredentialSpec is the only +// config needed, setConfigs still works +func TestSetConfigsOnlyCredSpec(t *testing.T) { + opts := &serviceOptions{ + credentialSpec: credentialSpecOpt{ + value: &swarm.CredentialSpec{ + Config: "foo", + }, + source: "config://foo", + }, + } + + service := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Privileges: &swarm.Privileges{ + CredentialSpec: opts.credentialSpec.value, + }, + }, + }, + } + + // set up a function to use as the list function + var fakeClient fakeConfigAPIClientList = func(_ context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) { + f := opts.Filters + + names := f.Get("name") + assert.Equal(t, len(names), 1) + assert.Assert(t, is.Contains(names, "foo")) + + return []swarm.Config{ + { + ID: "fooID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "foo", + }, + }, + }, + }, nil + } + + // now call setConfigs + err := setConfigs(fakeClient, service, opts) + // verify no error is returned + assert.NilError(t, err) + + credSpecConfigValue := service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config + assert.Equal(t, credSpecConfigValue, "fooID") + + configRefs := service.TaskTemplate.ContainerSpec.Configs + assert.Assert(t, is.Contains(configRefs, &swarm.ConfigReference{ + ConfigID: "fooID", + ConfigName: "foo", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + })) +} + +// TestSetConfigsOnlyConfigs verifies setConfigs when only configs (and not a +// CredentialSpec) is needed. +func TestSetConfigsOnlyConfigs(t *testing.T) { + configOpt := &cliopts.ConfigOpt{} + configOpt.Set("bar") + opts := &serviceOptions{ + configs: *configOpt, + } + + service := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + + var fakeClient fakeConfigAPIClientList = func(_ context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) { + f := opts.Filters + + names := f.Get("name") + assert.Equal(t, len(names), 1) + assert.Assert(t, is.Contains(names, "bar")) + + return []swarm.Config{ + { + ID: "barID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "bar", + }, + }, + }, + }, nil + } + + // now call setConfigs + err := setConfigs(fakeClient, service, opts) + // verify no error is returned + assert.NilError(t, err) + + configRefs := service.TaskTemplate.ContainerSpec.Configs + assert.Assert(t, is.Contains(configRefs, &swarm.ConfigReference{ + ConfigID: "barID", + ConfigName: "bar", + File: &swarm.ConfigReferenceFileTarget{ + Name: "bar", + // these are the default field values + UID: "0", + GID: "0", + Mode: 0444, + }, + })) +} + +// TestSetConfigsNoConfigs checks that setConfigs works when there are no +// configs of any kind needed +func TestSetConfigsNoConfigs(t *testing.T) { + // add a credentialSpec that isn't a config + opts := &serviceOptions{ + credentialSpec: credentialSpecOpt{ + value: &swarm.CredentialSpec{ + File: "foo", + }, + source: "file://foo", + }, + } + service := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Privileges: &swarm.Privileges{ + CredentialSpec: opts.credentialSpec.value, + }, + }, + }, + } + + var fakeClient fakeConfigAPIClientList = func(_ context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) { + // assert false -- we should never call this function + assert.Assert(t, false, "we should not be listing configs") + return nil, nil + } + + err := setConfigs(fakeClient, service, opts) + assert.NilError(t, err) + + // ensure that the value of the credentialspec has not changed + assert.Equal(t, service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.File, "foo") + assert.Equal(t, service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config, "") +} diff --git a/cli/cli/command/service/formatter.go b/cli/cli/command/service/formatter.go new file mode 100644 index 00000000..3ebe52d5 --- /dev/null +++ b/cli/cli/command/service/formatter.go @@ -0,0 +1,682 @@ +package service + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +const serviceInspectPrettyTemplate formatter.Format = ` +ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Service Mode: +{{- if .IsModeGlobal }} Global +{{- else if .IsModeReplicated }} Replicated +{{- if .ModeReplicatedReplicas }} + Replicas: {{ .ModeReplicatedReplicas }} +{{- end }}{{ end }} +{{- if .HasUpdateStatus }} +UpdateStatus: + State: {{ .UpdateStatusState }} +{{- if .HasUpdateStatusStarted }} + Started: {{ .UpdateStatusStarted }} +{{- end }} +{{- if .UpdateIsCompleted }} + Completed: {{ .UpdateStatusCompleted }} +{{- end }} + Message: {{ .UpdateStatusMessage }} +{{- end }} +Placement: +{{- if .TaskPlacementConstraints }} + Constraints: {{ .TaskPlacementConstraints }} +{{- end }} +{{- if .TaskPlacementPreferences }} + Preferences: {{ .TaskPlacementPreferences }} +{{- end }} +{{- if .MaxReplicas }} + Max Replicas Per Node: {{ .MaxReplicas }} +{{- end }} +{{- if .HasUpdateConfig }} +UpdateConfig: + Parallelism: {{ .UpdateParallelism }} +{{- if .HasUpdateDelay}} + Delay: {{ .UpdateDelay }} +{{- end }} + On failure: {{ .UpdateOnFailure }} +{{- if .HasUpdateMonitor}} + Monitoring Period: {{ .UpdateMonitor }} +{{- end }} + Max failure ratio: {{ .UpdateMaxFailureRatio }} + Update order: {{ .UpdateOrder }} +{{- end }} +{{- if .HasRollbackConfig }} +RollbackConfig: + Parallelism: {{ .RollbackParallelism }} +{{- if .HasRollbackDelay}} + Delay: {{ .RollbackDelay }} +{{- end }} + On failure: {{ .RollbackOnFailure }} +{{- if .HasRollbackMonitor}} + Monitoring Period: {{ .RollbackMonitor }} +{{- end }} + Max failure ratio: {{ .RollbackMaxFailureRatio }} + Rollback order: {{ .RollbackOrder }} +{{- end }} +ContainerSpec: + Image: {{ .ContainerImage }} +{{- if .ContainerArgs }} + Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} +{{- end -}} +{{- if .ContainerEnv }} + Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} +{{- end -}} +{{- if .ContainerWorkDir }} + Dir: {{ .ContainerWorkDir }} +{{- end -}} +{{- if .HasContainerInit }} + Init: {{ .ContainerInit }} +{{- end -}} +{{- if .ContainerUser }} + User: {{ .ContainerUser }} +{{- end }} +{{- if .ContainerSysCtls }} +SysCtls: +{{- range $k, $v := .ContainerSysCtls }} + {{ $k }}{{if $v }}: {{ $v }}{{ end }} +{{- end }}{{ end }} +{{- if .ContainerMounts }} +Mounts: +{{- end }} +{{- range $mount := .ContainerMounts }} + Target: {{ $mount.Target }} + Source: {{ $mount.Source }} + ReadOnly: {{ $mount.ReadOnly }} + Type: {{ $mount.Type }} +{{- end -}} +{{- if .Configs}} +Configs: +{{- range $config := .Configs }} + Target: {{$config.File.Name}} + Source: {{$config.ConfigName}} +{{- end }}{{ end }} +{{- if .Secrets }} +Secrets: +{{- range $secret := .Secrets }} + Target: {{$secret.File.Name}} + Source: {{$secret.SecretName}} +{{- end }}{{ end }} +{{- if .HasResources }} +Resources: +{{- if .HasResourceReservations }} + Reservations: +{{- if gt .ResourceReservationNanoCPUs 0.0 }} + CPU: {{ .ResourceReservationNanoCPUs }} +{{- end }} +{{- if .ResourceReservationMemory }} + Memory: {{ .ResourceReservationMemory }} +{{- end }}{{ end }} +{{- if .HasResourceLimits }} + Limits: +{{- if gt .ResourceLimitsNanoCPUs 0.0 }} + CPU: {{ .ResourceLimitsNanoCPUs }} +{{- end }} +{{- if .ResourceLimitMemory }} + Memory: {{ .ResourceLimitMemory }} +{{- end }}{{ end }}{{ end }} +{{- if .Networks }} +Networks: +{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} +Endpoint Mode: {{ .EndpointMode }} +{{- if .Ports }} +Ports: +{{- range $port := .Ports }} + PublishedPort = {{ $port.PublishedPort }} + Protocol = {{ $port.Protocol }} + TargetPort = {{ $port.TargetPort }} + PublishMode = {{ $port.PublishMode }} +{{- end }} {{ end -}} +{{- if .Healthcheck }} + Healthcheck: + Interval = {{ .Healthcheck.Interval }} + Retries = {{ .Healthcheck.Retries }} + StartPeriod = {{ .Healthcheck.StartPeriod }} + Timeout = {{ .Healthcheck.Timeout }} + {{- if .Healthcheck.Test }} + Tests: + {{- range $test := .Healthcheck.Test }} + Test = {{ $test }} + {{- end }} {{ end -}} +{{- end }} +` + +// NewFormat returns a Format for rendering using a Context +func NewFormat(source string) formatter.Format { + switch source { + case formatter.PrettyFormatKey: + return serviceInspectPrettyTemplate + default: + return formatter.Format(strings.TrimPrefix(source, formatter.RawFormatKey)) + } +} + +func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string { + networkNames := make(map[string]string) + for _, network := range service.Spec.TaskTemplate.Networks { + if resolved, _, err := getNetwork(network.Target); err == nil { + if resolvedNetwork, ok := resolved.(types.NetworkResource); ok { + networkNames[resolvedNetwork.ID] = resolvedNetwork.Name + } + } + } + return networkNames +} + +// InspectFormatWrite renders the context for a list of services +func InspectFormatWrite(ctx formatter.Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error { + if ctx.Format != serviceInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext formatter.SubContext) error) error { + for _, ref := range refs { + serviceI, _, err := getRef(ref) + if err != nil { + return err + } + service, ok := serviceI.(swarm.Service) + if !ok { + return errors.Errorf("got wrong object to inspect") + } + if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&serviceInspectContext{}, render) +} + +type serviceInspectContext struct { + swarm.Service + formatter.SubContext + + // networkNames is a map from network IDs (as found in + // Networks[x].Target) to network names. + networkNames map[string]string +} + +func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(ctx) +} + +func (ctx *serviceInspectContext) ID() string { + return ctx.Service.ID +} + +func (ctx *serviceInspectContext) Name() string { + return ctx.Service.Spec.Name +} + +func (ctx *serviceInspectContext) Labels() map[string]string { + return ctx.Service.Spec.Labels +} + +func (ctx *serviceInspectContext) Configs() []*swarm.ConfigReference { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Configs +} + +func (ctx *serviceInspectContext) Secrets() []*swarm.SecretReference { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Secrets +} + +func (ctx *serviceInspectContext) Healthcheck() *container.HealthConfig { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Healthcheck +} + +func (ctx *serviceInspectContext) IsModeGlobal() bool { + return ctx.Service.Spec.Mode.Global != nil +} + +func (ctx *serviceInspectContext) IsModeReplicated() bool { + return ctx.Service.Spec.Mode.Replicated != nil +} + +func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { + return ctx.Service.Spec.Mode.Replicated.Replicas +} + +func (ctx *serviceInspectContext) HasUpdateStatus() bool { + return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != "" +} + +func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { + return ctx.Service.UpdateStatus.State +} + +func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool { + return ctx.Service.UpdateStatus.StartedAt != nil +} + +func (ctx *serviceInspectContext) UpdateStatusStarted() string { + return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) + " ago" +} + +func (ctx *serviceInspectContext) UpdateIsCompleted() bool { + return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil +} + +func (ctx *serviceInspectContext) UpdateStatusCompleted() string { + return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) + " ago" +} + +func (ctx *serviceInspectContext) UpdateStatusMessage() string { + return ctx.Service.UpdateStatus.Message +} + +func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.Constraints + } + return nil +} + +func (ctx *serviceInspectContext) TaskPlacementPreferences() []string { + if ctx.Service.Spec.TaskTemplate.Placement == nil { + return nil + } + var strings []string + for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + strings = append(strings, "spread="+pref.Spread.SpreadDescriptor) + } + } + return strings +} + +func (ctx *serviceInspectContext) MaxReplicas() uint64 { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.MaxReplicas + } + return 0 +} + +func (ctx *serviceInspectContext) HasUpdateConfig() bool { + return ctx.Service.Spec.UpdateConfig != nil +} + +func (ctx *serviceInspectContext) UpdateParallelism() uint64 { + return ctx.Service.Spec.UpdateConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasUpdateDelay() bool { + return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateDelay() time.Duration { + return ctx.Service.Spec.UpdateConfig.Delay +} + +func (ctx *serviceInspectContext) UpdateOnFailure() string { + return ctx.Service.Spec.UpdateConfig.FailureAction +} + +func (ctx *serviceInspectContext) UpdateOrder() string { + return ctx.Service.Spec.UpdateConfig.Order +} + +func (ctx *serviceInspectContext) HasUpdateMonitor() bool { + return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { + return ctx.Service.Spec.UpdateConfig.Monitor +} + +func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { + return ctx.Service.Spec.UpdateConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) HasRollbackConfig() bool { + return ctx.Service.Spec.RollbackConfig != nil +} + +func (ctx *serviceInspectContext) RollbackParallelism() uint64 { + return ctx.Service.Spec.RollbackConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasRollbackDelay() bool { + return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) RollbackDelay() time.Duration { + return ctx.Service.Spec.RollbackConfig.Delay +} + +func (ctx *serviceInspectContext) RollbackOnFailure() string { + return ctx.Service.Spec.RollbackConfig.FailureAction +} + +func (ctx *serviceInspectContext) HasRollbackMonitor() bool { + return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) RollbackMonitor() time.Duration { + return ctx.Service.Spec.RollbackConfig.Monitor +} + +func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 { + return ctx.Service.Spec.RollbackConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) RollbackOrder() string { + return ctx.Service.Spec.RollbackConfig.Order +} + +func (ctx *serviceInspectContext) ContainerImage() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image +} + +func (ctx *serviceInspectContext) ContainerArgs() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args +} + +func (ctx *serviceInspectContext) ContainerEnv() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env +} + +func (ctx *serviceInspectContext) ContainerWorkDir() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir +} + +func (ctx *serviceInspectContext) ContainerUser() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.User +} + +func (ctx *serviceInspectContext) HasContainerInit() bool { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Init != nil +} + +func (ctx *serviceInspectContext) ContainerInit() bool { + return *ctx.Service.Spec.TaskTemplate.ContainerSpec.Init +} + +func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts +} + +func (ctx *serviceInspectContext) ContainerSysCtls() map[string]string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Sysctls +} + +func (ctx *serviceInspectContext) HasContainerSysCtls() bool { + return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.Sysctls) > 0 +} + +func (ctx *serviceInspectContext) HasResources() bool { + return ctx.Service.Spec.TaskTemplate.Resources != nil +} + +func (ctx *serviceInspectContext) HasResourceReservations() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { + return float64(0) + } + return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceReservationMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) +} + +func (ctx *serviceInspectContext) HasResourceLimits() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { + return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceLimitMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) +} + +func (ctx *serviceInspectContext) Networks() []string { + var out []string + for _, n := range ctx.Service.Spec.TaskTemplate.Networks { + if name, ok := ctx.networkNames[n.Target]; ok { + out = append(out, name) + } else { + out = append(out, n.Target) + } + } + return out +} + +func (ctx *serviceInspectContext) EndpointMode() string { + if ctx.Service.Spec.EndpointSpec == nil { + return "" + } + + return string(ctx.Service.Spec.EndpointSpec.Mode) +} + +func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { + return ctx.Service.Endpoint.Ports +} + +const ( + defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}" + + serviceIDHeader = "ID" + modeHeader = "MODE" + replicasHeader = "REPLICAS" +) + +// NewListFormat returns a Format for rendering using a service Context +func NewListFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultServiceTableFormat + case formatter.RawFormatKey: + if quiet { + return `id: {{.ID}}` + } + return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n` + } + return formatter.Format(source) +} + +// ListInfo stores the information about mode and replicas to be used by template +type ListInfo struct { + Mode string + Replicas string +} + +// ListFormatWrite writes the context +func ListFormatWrite(ctx formatter.Context, services []swarm.Service, info map[string]ListInfo) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, service := range services { + serviceCtx := &serviceContext{service: service, mode: info[service.ID].Mode, replicas: info[service.ID].Replicas} + if err := format(serviceCtx); err != nil { + return err + } + } + return nil + } + serviceCtx := serviceContext{} + serviceCtx.Header = formatter.SubHeaderContext{ + "ID": serviceIDHeader, + "Name": formatter.NameHeader, + "Mode": modeHeader, + "Replicas": replicasHeader, + "Image": formatter.ImageHeader, + "Ports": formatter.PortsHeader, + } + return ctx.Write(&serviceCtx, render) +} + +type serviceContext struct { + formatter.HeaderContext + service swarm.Service + mode string + replicas string +} + +func (c *serviceContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *serviceContext) ID() string { + return stringid.TruncateID(c.service.ID) +} + +func (c *serviceContext) Name() string { + return c.service.Spec.Name +} + +func (c *serviceContext) Mode() string { + return c.mode +} + +func (c *serviceContext) Replicas() string { + return c.replicas +} + +func (c *serviceContext) Image() string { + var image string + if c.service.Spec.TaskTemplate.ContainerSpec != nil { + image = c.service.Spec.TaskTemplate.ContainerSpec.Image + } + if ref, err := reference.ParseNormalizedNamed(image); err == nil { + // update image string for display, (strips any digest) + if nt, ok := ref.(reference.NamedTagged); ok { + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + image = reference.FamiliarString(namedTagged) + } + } + } + + return image +} + +type portRange struct { + pStart uint32 + pEnd uint32 + tStart uint32 + tEnd uint32 + protocol swarm.PortConfigProtocol +} + +func (pr portRange) String() string { + var ( + pub string + tgt string + ) + + if pr.pEnd > pr.pStart { + pub = fmt.Sprintf("%d-%d", pr.pStart, pr.pEnd) + } else { + pub = fmt.Sprintf("%d", pr.pStart) + } + if pr.tEnd > pr.tStart { + tgt = fmt.Sprintf("%d-%d", pr.tStart, pr.tEnd) + } else { + tgt = fmt.Sprintf("%d", pr.tStart) + } + return fmt.Sprintf("*:%s->%s/%s", pub, tgt, pr.protocol) +} + +// Ports formats published ports on the ingress network for output. +// +// Where possible, ranges are grouped to produce a compact output: +// - multiple ports mapped to a single port (80->80, 81->80); is formatted as *:80-81->80 +// - multiple consecutive ports on both sides; (80->80, 81->81) are formatted as: *:80-81->80-81 +// +// The above should not be grouped together, i.e.: +// - 80->80, 81->81, 82->80 should be presented as : *:80-81->80-81, *:82->80 +// +// TODO improve: +// - combine non-consecutive ports mapped to a single port (80->80, 81->80, 84->80, 86->80, 87->80); to be printed as *:80-81,84,86-87->80 +// - combine tcp and udp mappings if their port-mapping is exactly the same (*:80-81->80-81/tcp+udp instead of *:80-81->80-81/tcp, *:80-81->80-81/udp) +func (c *serviceContext) Ports() string { + if c.service.Endpoint.Ports == nil { + return "" + } + + pr := portRange{} + ports := []string{} + + servicePorts := c.service.Endpoint.Ports + sort.Slice(servicePorts, func(i, j int) bool { + if servicePorts[i].Protocol == servicePorts[j].Protocol { + return servicePorts[i].PublishedPort < servicePorts[j].PublishedPort + } + return servicePorts[i].Protocol < servicePorts[j].Protocol + }) + + for _, p := range c.service.Endpoint.Ports { + if p.PublishMode == swarm.PortConfigPublishModeIngress { + prIsRange := pr.tEnd != pr.tStart + tOverlaps := p.TargetPort <= pr.tEnd + + // Start a new port-range if: + // - the protocol is different from the current port-range + // - published or target port are not consecutive to the current port-range + // - the current port-range is a _range_, and the target port overlaps with the current range's target-ports + if p.Protocol != pr.protocol || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps { + // start a new port-range, and print the previous port-range (if any) + if pr.pStart > 0 { + ports = append(ports, pr.String()) + } + pr = portRange{ + pStart: p.PublishedPort, + pEnd: p.PublishedPort, + tStart: p.TargetPort, + tEnd: p.TargetPort, + protocol: p.Protocol, + } + continue + } + pr.pEnd = p.PublishedPort + pr.tEnd = p.TargetPort + } + } + if pr.pStart > 0 { + ports = append(ports, pr.String()) + } + return strings.Join(ports, ", ") +} diff --git a/cli/cli/command/service/formatter_test.go b/cli/cli/command/service/formatter_test.go new file mode 100644 index 00000000..8addeba0 --- /dev/null +++ b/cli/cli/command/service/formatter_test.go @@ -0,0 +1,360 @@ +package service + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestServiceContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: NewListFormat("table", false)}, + `ID NAME MODE REPLICAS IMAGE PORTS +id_baz baz global 2/4 *:80->8080/tcp +id_bar bar replicated 2/4 *:80->8080/tcp +`, + }, + { + formatter.Context{Format: NewListFormat("table", true)}, + `id_baz +id_bar +`, + }, + { + formatter.Context{Format: NewListFormat("table {{.Name}}", false)}, + `NAME +baz +bar +`, + }, + { + formatter.Context{Format: NewListFormat("table {{.Name}}", true)}, + `NAME +baz +bar +`, + }, + // Raw Format + { + formatter.Context{Format: NewListFormat("raw", false)}, + string(golden.Get(t, "service-context-write-raw.golden")), + }, + { + formatter.Context{Format: NewListFormat("raw", true)}, + `id: id_baz +id: id_bar +`, + }, + // Custom Format + { + formatter.Context{Format: NewListFormat("{{.Name}}", false)}, + `baz +bar +`, + }, + } + + for _, testcase := range cases { + services := []swarm.Service{ + { + ID: "id_baz", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "baz"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + { + ID: "id_bar", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "bar"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + } + info := map[string]ListInfo{ + "id_baz": { + Mode: "global", + Replicas: "2/4", + }, + "id_bar": { + Mode: "replicated", + Replicas: "2/4", + }, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ListFormatWrite(testcase.context, services, info) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestServiceContextWriteJSON(t *testing.T) { + services := []swarm.Service{ + { + ID: "id_baz", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "baz"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + { + ID: "id_bar", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: "bar"}, + }, + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + PublishMode: "ingress", + PublishedPort: 80, + TargetPort: 8080, + Protocol: "tcp", + }, + }, + }, + }, + } + info := map[string]ListInfo{ + "id_baz": { + Mode: "global", + Replicas: "2/4", + }, + "id_bar": { + Mode: "replicated", + Replicas: "2/4", + }, + } + expectedJSONs := []map[string]interface{}{ + {"ID": "id_baz", "Name": "baz", "Mode": "global", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"}, + {"ID": "id_bar", "Name": "bar", "Mode": "replicated", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"}, + } + + out := bytes.NewBufferString("") + err := ListFormatWrite(formatter.Context{Format: "{{json .}}", Output: out}, services, info) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var m map[string]interface{} + err := json.Unmarshal([]byte(line), &m) + assert.NilError(t, err, msg) + assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg) + } +} +func TestServiceContextWriteJSONField(t *testing.T) { + services := []swarm.Service{ + {ID: "id_baz", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "baz"}}}, + {ID: "id_bar", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "bar"}}}, + } + info := map[string]ListInfo{ + "id_baz": { + Mode: "global", + Replicas: "2/4", + }, + "id_bar": { + Mode: "replicated", + Replicas: "2/4", + }, + } + out := bytes.NewBufferString("") + err := ListFormatWrite(formatter.Context{Format: "{{json .Name}}", Output: out}, services, info) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + msg := fmt.Sprintf("Output: line %d: %s", i, line) + var s string + err := json.Unmarshal([]byte(line), &s) + assert.NilError(t, err, msg) + assert.Check(t, is.Equal(services[i].Spec.Name, s), msg) + } +} + +func TestServiceContext_Ports(t *testing.T) { + c := serviceContext{ + service: swarm.Service{ + Endpoint: swarm.Endpoint{ + Ports: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 81, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 80, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 95, + PublishedPort: 95, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 90, + PublishedPort: 90, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 91, + PublishedPort: 91, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 92, + PublishedPort: 92, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 93, + PublishedPort: 93, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 94, + PublishedPort: 94, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 95, + PublishedPort: 95, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 90, + PublishedPort: 90, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 96, + PublishedPort: 96, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 91, + PublishedPort: 91, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 92, + PublishedPort: 92, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 93, + PublishedPort: 93, + PublishMode: "ingress", + }, + { + Protocol: "udp", + TargetPort: 94, + PublishedPort: 94, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 60, + PublishedPort: 60, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 61, + PublishedPort: 61, + PublishMode: "ingress", + }, + { + Protocol: "tcp", + TargetPort: 61, + PublishedPort: 62, + PublishMode: "ingress", + }, + { + Protocol: "sctp", + TargetPort: 97, + PublishedPort: 97, + PublishMode: "ingress", + }, + { + Protocol: "sctp", + TargetPort: 98, + PublishedPort: 98, + PublishMode: "ingress", + }, + }, + }, + }, + } + + assert.Check(t, is.Equal("*:97-98->97-98/sctp, *:60-61->60-61/tcp, *:62->61/tcp, *:80-81->80/tcp, *:90-95->90-95/tcp, *:90-96->90-96/udp", c.Ports())) +} diff --git a/cli/cli/command/service/generic_resource_opts.go b/cli/cli/command/service/generic_resource_opts.go new file mode 100644 index 00000000..66385888 --- /dev/null +++ b/cli/cli/command/service/generic_resource_opts.go @@ -0,0 +1,105 @@ +package service + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" +) + +// GenericResource is a concept that a user can use to advertise user-defined +// resources on a node and thus better place services based on these resources. +// E.g: NVIDIA GPUs, Intel FPGAs, ... +// See https://github.com/docker/swarmkit/blob/master/design/generic_resources.md + +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid generic-resource format `%s` expected `name=value`", val) + } + + return val, nil +} + +// ParseGenericResources parses an array of Generic resourceResources +// Requesting Named Generic Resources for a service is not supported this +// is filtered here. +func ParseGenericResources(value []string) ([]swarm.GenericResource, error) { + if len(value) == 0 { + return nil, nil + } + + resources, err := genericresource.Parse(value) + if err != nil { + return nil, errors.Wrapf(err, "invalid generic resource specification") + } + + swarmResources := genericResourcesFromGRPC(resources) + for _, res := range swarmResources { + if res.NamedResourceSpec != nil { + return nil, fmt.Errorf("invalid generic-resource request `%s=%s`, Named Generic Resources is not supported for service create or update", res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value) + } + } + + return swarmResources, nil +} + +// genericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource +func genericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []swarm.GenericResource { + var generic []swarm.GenericResource + for _, res := range genericRes { + var current swarm.GenericResource + + switch r := res.Resource.(type) { + case *swarmapi.GenericResource_DiscreteResourceSpec: + current.DiscreteResourceSpec = &swarm.DiscreteGenericResource{ + Kind: r.DiscreteResourceSpec.Kind, + Value: r.DiscreteResourceSpec.Value, + } + case *swarmapi.GenericResource_NamedResourceSpec: + current.NamedResourceSpec = &swarm.NamedGenericResource{ + Kind: r.NamedResourceSpec.Kind, + Value: r.NamedResourceSpec.Value, + } + } + + generic = append(generic, current) + } + + return generic +} + +func buildGenericResourceMap(genericRes []swarm.GenericResource) (map[string]swarm.GenericResource, error) { + m := make(map[string]swarm.GenericResource) + + for _, res := range genericRes { + if res.DiscreteResourceSpec == nil { + return nil, fmt.Errorf("invalid generic-resource `%+v` for service task", res) + } + + _, ok := m[res.DiscreteResourceSpec.Kind] + if ok { + return nil, fmt.Errorf("duplicate generic-resource `%+v` for service task", res.DiscreteResourceSpec.Kind) + } + + m[res.DiscreteResourceSpec.Kind] = res + } + + return m, nil +} + +func buildGenericResourceList(genericRes map[string]swarm.GenericResource) []swarm.GenericResource { + var l []swarm.GenericResource + + for _, res := range genericRes { + l = append(l, res) + } + + return l +} diff --git a/cli/cli/command/service/generic_resource_opts_test.go b/cli/cli/command/service/generic_resource_opts_test.go new file mode 100644 index 00000000..c750f1dc --- /dev/null +++ b/cli/cli/command/service/generic_resource_opts_test.go @@ -0,0 +1,23 @@ +package service + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestValidateSingleGenericResource(t *testing.T) { + incorrect := []string{"foo", "fooo-bar"} + correct := []string{"foo=bar", "bar=1", "foo=barbar"} + + for _, v := range incorrect { + _, err := ValidateSingleGenericResource(v) + assert.Check(t, is.ErrorContains(err, "")) + } + + for _, v := range correct { + _, err := ValidateSingleGenericResource(v) + assert.NilError(t, err) + } +} diff --git a/cli/cli/command/service/helpers.go b/cli/cli/command/service/helpers.go new file mode 100644 index 00000000..eb508e85 --- /dev/null +++ b/cli/cli/command/service/helpers.go @@ -0,0 +1,33 @@ +package service + +import ( + "context" + "io" + "io/ioutil" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/service/progress" + "github.com/docker/docker/pkg/jsonmessage" +) + +// waitOnService waits for the service to converge. It outputs a progress bar, +// if appropriate based on the CLI flags. +func waitOnService(ctx context.Context, dockerCli command.Cli, serviceID string, quiet bool) error { + errChan := make(chan error, 1) + pipeReader, pipeWriter := io.Pipe() + + go func() { + errChan <- progress.ServiceProgress(ctx, dockerCli.Client(), serviceID, pipeWriter) + }() + + if quiet { + go io.Copy(ioutil.Discard, pipeReader) + return <-errChan + } + + err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, dockerCli.Out(), nil) + if err == nil { + err = <-errChan + } + return err +} diff --git a/cli/cli/command/service/inspect.go b/cli/cli/command/service/inspect.go new file mode 100644 index 00000000..38709963 --- /dev/null +++ b/cli/cli/command/service/inspect.go @@ -0,0 +1,93 @@ +package service + +import ( + "context" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + refs []string + format string + pretty bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SERVICE [SERVICE...]", + Short: "Display detailed information on one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + + if opts.pretty && len(opts.format) > 0 { + return errors.Errorf("--format is incompatible with human friendly format") + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + // Service inspect shows defaults values in empty fields. + service, _, err := client.ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) + if err == nil || !apiclient.IsErrNotFound(err) { + return service, nil, err + } + return nil, nil, errors.Errorf("Error: no such service: %s", ref) + } + + getNetwork := func(ref string) (interface{}, []byte, error) { + network, _, err := client.NetworkInspectWithRaw(ctx, ref, types.NetworkInspectOptions{Scope: "swarm"}) + if err == nil || !apiclient.IsErrNotFound(err) { + return network, nil, err + } + return nil, nil, errors.Errorf("Error: no such network: %s", ref) + } + + f := opts.format + if len(f) == 0 { + f = "raw" + if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { + f = dockerCli.ConfigFile().ServiceInspectFormat + } + } + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return errors.Errorf("Cannot supply extra formatting options to the pretty template") + } + + serviceCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewFormat(f), + } + + if err := InspectFormatWrite(serviceCtx, opts.refs, getRef, getNetwork); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/cli/cli/command/service/inspect_test.go b/cli/cli/command/service/inspect_test.go new file mode 100644 index 00000000..fe662ba3 --- /dev/null +++ b/cli/cli/command/service/inspect_test.go @@ -0,0 +1,170 @@ +package service + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { + b := new(bytes.Buffer) + + endpointSpec := &swarm.EndpointSpec{ + Mode: "vip", + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + }, + }, + } + + two := uint64(2) + + s := swarm.Service{ + ID: "de179gar9d0o7ltdybungplod", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 315}, + CreatedAt: now, + UpdatedAt: now, + }, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "my_service", + Labels: map[string]string{"com.label": "foo"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "foo/bar@sha256:this_is_a_test", + Configs: []*swarm.ConfigReference{ + { + ConfigID: "mtc3i44r1awdoziy2iceg73z8", + ConfigName: "configtest.conf", + File: &swarm.ConfigReferenceFileTarget{ + Name: "/configtest.conf", + }, + }, + }, + Secrets: []*swarm.SecretReference{ + { + SecretID: "3hv39ehbbb4hdozo7spod9ftn", + SecretName: "secrettest.conf", + File: &swarm.SecretReferenceFileTarget{ + Name: "/secrettest.conf", + }, + }, + }, + + Healthcheck: &container.HealthConfig{ + Test: []string{"CMD-SHELL", "curl"}, + Interval: 4, + Retries: 3, + StartPeriod: 2, + Timeout: 1, + }, + }, + Networks: []swarm.NetworkAttachmentConfig{ + { + Target: "5vpyomhb6ievnk0i0o60gcnei", + Aliases: []string{"web"}, + }, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &two, + }, + }, + EndpointSpec: endpointSpec, + }, + Endpoint: swarm.Endpoint{ + Spec: *endpointSpec, + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + PublishedPort: 30000, + }, + }, + VirtualIPs: []swarm.EndpointVirtualIP{ + { + NetworkID: "6o4107cj2jx9tihgb0jyts6pj", + Addr: "10.255.0.4/16", + }, + }, + }, + UpdateStatus: &swarm.UpdateStatus{ + StartedAt: &now, + CompletedAt: &now, + }, + } + + ctx := formatter.Context{ + Output: b, + Format: format, + } + + err := InspectFormatWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, + func(ref string) (interface{}, []byte, error) { + return s, nil, nil + }, + func(ref string) (interface{}, []byte, error) { + return types.NetworkResource{ + ID: "5vpyomhb6ievnk0i0o60gcnei", + Name: "mynetwork", + }, nil, nil + }, + ) + if err != nil { + t.Fatal(err) + } + return b.String() +} + +func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { + s := formatServiceInspect(t, NewFormat("pretty"), time.Now()) + if strings.Contains(s, "UpdateStatus") { + t.Fatal("Pretty print failed before parsing UpdateStatus") + } + if !strings.Contains(s, "mynetwork") { + t.Fatal("network name not found in inspect output") + } +} + +func TestJSONFormatWithNoUpdateConfig(t *testing.T) { + now := time.Now() + // s1: [{"ID":..}] + // s2: {"ID":..} + s1 := formatServiceInspect(t, NewFormat(""), now) + s2 := formatServiceInspect(t, NewFormat("{{json .}}"), now) + var m1Wrap []map[string]interface{} + if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { + t.Fatal(err) + } + if len(m1Wrap) != 1 { + t.Fatalf("strange s1=%s", s1) + } + m1 := m1Wrap[0] + var m2 map[string]interface{} + if err := json.Unmarshal([]byte(s2), &m2); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(m1, m2)) +} + +func TestPrettyPrintWithConfigsAndSecrets(t *testing.T) { + s := formatServiceInspect(t, NewFormat("pretty"), time.Now()) + + assert.Check(t, is.Contains(s, "Configs:"), "Pretty print missing configs") + assert.Check(t, is.Contains(s, "Secrets:"), "Pretty print missing secrets") + assert.Check(t, is.Contains(s, "Healthcheck:"), "Pretty print missing healthcheck") +} diff --git a/cli/cli/command/service/list.go b/cli/cli/command/service/list.go new file mode 100644 index 00000000..90fe3301 --- /dev/null +++ b/cli/cli/command/service/list.go @@ -0,0 +1,142 @@ +package service + +import ( + "context" + "fmt" + "sort" + + "vbom.ml/util/sortorder" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/spf13/cobra" +) + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List services", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&options.format, "format", "", "Pretty-print services using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + serviceFilters := options.filter.Value() + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceFilters}) + if err != nil { + return err + } + + sort.Slice(services, func(i, j int) bool { + return sortorder.NaturalLess(services[i].Spec.Name, services[j].Spec.Name) + }) + info := map[string]ListInfo{} + if len(services) > 0 && !options.quiet { + // only non-empty services and not quiet, should we call TaskList and NodeList api + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + info = GetServicesStatus(services, nodes, tasks) + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ServicesFormat + } else { + format = formatter.TableFormatKey + } + } + + servicesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewListFormat(format, options.quiet), + } + return ListFormatWrite(servicesCtx, services, info) +} + +// GetServicesStatus returns a map of mode and replicas +func GetServicesStatus(services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) map[string]ListInfo { + running := map[string]int{} + tasksNoShutdown := map[string]int{} + + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + + for _, task := range tasks { + if task.DesiredState != swarm.TaskStateShutdown { + tasksNoShutdown[task.ServiceID]++ + } + + if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { + running[task.ServiceID]++ + } + } + + info := map[string]ListInfo{} + for _, service := range services { + info[service.ID] = ListInfo{} + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + if service.Spec.TaskTemplate.Placement != nil && service.Spec.TaskTemplate.Placement.MaxReplicas > 0 { + info[service.ID] = ListInfo{ + Mode: "replicated", + Replicas: fmt.Sprintf("%d/%d (max %d per node)", running[service.ID], *service.Spec.Mode.Replicated.Replicas, service.Spec.TaskTemplate.Placement.MaxReplicas), + } + } else { + info[service.ID] = ListInfo{ + Mode: "replicated", + Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas), + } + } + } else if service.Spec.Mode.Global != nil { + info[service.ID] = ListInfo{ + Mode: "global", + Replicas: fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]), + } + } + } + return info +} diff --git a/cli/cli/command/service/list_test.go b/cli/cli/command/service/list_test.go new file mode 100644 index 00000000..e52e7e03 --- /dev/null +++ b/cli/cli/command/service/list_test.go @@ -0,0 +1,28 @@ +package service + +import ( + "context" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestServiceListOrder(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + newService("a57dbe8", "service-1-foo"), + newService("a57dbdd", "service-10-foo"), + newService("aaaaaaa", "service-2-foo"), + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{.Name}}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "service-list-sort.golden") +} diff --git a/cli/cli/command/service/logs.go b/cli/cli/command/service/logs.go new file mode 100644 index 00000000..107c9d21 --- /dev/null +++ b/cli/cli/command/service/logs.go @@ -0,0 +1,349 @@ +package service + +import ( + "bytes" + "context" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/service/logs" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type logsOptions struct { + noResolve bool + noTrunc bool + noTaskIDs bool + follow bool + since string + timestamps bool + tail string + details bool + raw bool + + target string +} + +func newLogsCommand(dockerCli command.Cli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] SERVICE|TASK", + Short: "Fetch the logs of a service or task", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + return runLogs(dockerCli, &opts) + }, + Annotations: map[string]string{"version": "1.29"}, + } + + flags := cmd.Flags() + // options specific to service logs + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names in output") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.raw, "raw", false, "Do not neatly format logs") + flags.SetAnnotation("raw", "version", []string{"1.30"}) + flags.BoolVar(&opts.noTaskIDs, "no-task-ids", false, "Do not include task IDs in output") + // options identical to container logs + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.SetAnnotation("details", "version", []string{"1.30"}) + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli command.Cli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + // get the details if we request it OR if we're not doing raw mode + // (we need them for the context to pretty print) + Details: opts.details || !opts.raw, + } + + cli := dockerCli.Client() + + var ( + maxLength = 1 + responseBody io.ReadCloser + tty bool + // logfunc is used to delay the call to logs so that we can do some + // processing before we actually get the logs + logfunc func(context.Context, string, types.ContainerLogsOptions) (io.ReadCloser, error) + ) + + service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target, types.ServiceInspectOptions{}) + if err != nil { + // if it's any error other than service not found, it's Real + if !client.IsErrNotFound(err) { + return err + } + task, _, err := cli.TaskInspectWithRaw(ctx, opts.target) + if err != nil { + if client.IsErrNotFound(err) { + // if the task isn't found, rewrite the error to be clear + // that we looked for services AND tasks and found none + err = fmt.Errorf("no such task or service: %v", opts.target) + } + return err + } + + tty = task.Spec.ContainerSpec.TTY + maxLength = getMaxLength(task.Slot) + + // use the TaskLogs api function + logfunc = cli.TaskLogs + } else { + // use ServiceLogs api function + logfunc = cli.ServiceLogs + tty = service.Spec.TaskTemplate.ContainerSpec.TTY + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + // if replicas are initialized, figure out if we need to pad them + replicas := *service.Spec.Mode.Replicated.Replicas + maxLength = getMaxLength(int(replicas)) + } + } + + // we can't prettify tty logs. tell the user that this is the case. + // this is why we assign the logs function to a variable and delay calling + // it. we want to check this before we make the call and checking twice in + // each branch is even sloppier than this CLI disaster already is + if tty && !opts.raw { + return errors.New("tty service logs only supported with --raw") + } + + // now get the logs + responseBody, err = logfunc(ctx, opts.target, options) + if err != nil { + return err + } + defer responseBody.Close() + + // tty logs get straight copied. they're not muxed with stdcopy + if tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + return err + } + + // otherwise, logs are multiplexed. if we're doing pretty printing, also + // create a task formatter. + var stdout, stderr io.Writer + stdout = dockerCli.Out() + stderr = dockerCli.Err() + if !opts.raw { + taskFormatter := newTaskFormatter(cli, opts, maxLength) + + stdout = &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: stdout} + stderr = &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: stderr} + } + + _, err = stdcopy.StdCopy(stdout, stderr, responseBody) + return err +} + +// getMaxLength gets the maximum length of the number in base 10 +func getMaxLength(i int) int { + return len(strconv.Itoa(i)) +} + +type taskFormatter struct { + client client.APIClient + opts *logsOptions + padding int + + r *idresolver.IDResolver + // cache saves a pre-cooked logContext formatted string based on a + // logcontext object, so we don't have to resolve names every time + cache map[logContext]string +} + +func newTaskFormatter(client client.APIClient, opts *logsOptions, padding int) *taskFormatter { + return &taskFormatter{ + client: client, + opts: opts, + padding: padding, + r: idresolver.New(client, opts.noResolve), + cache: make(map[logContext]string), + } +} + +func (f *taskFormatter) format(ctx context.Context, logCtx logContext) (string, error) { + if cached, ok := f.cache[logCtx]; ok { + return cached, nil + } + + nodeName, err := f.r.Resolve(ctx, swarm.Node{}, logCtx.nodeID) + if err != nil { + return "", err + } + + serviceName, err := f.r.Resolve(ctx, swarm.Service{}, logCtx.serviceID) + if err != nil { + return "", err + } + + task, _, err := f.client.TaskInspectWithRaw(ctx, logCtx.taskID) + if err != nil { + return "", err + } + + taskName := fmt.Sprintf("%s.%d", serviceName, task.Slot) + if !f.opts.noTaskIDs { + if f.opts.noTrunc { + taskName += fmt.Sprintf(".%s", task.ID) + } else { + taskName += fmt.Sprintf(".%s", stringid.TruncateID(task.ID)) + } + } + + paddingCount := f.padding - getMaxLength(task.Slot) + padding := "" + if paddingCount > 0 { + padding = strings.Repeat(" ", paddingCount) + } + formatted := taskName + "@" + nodeName + padding + f.cache[logCtx] = formatted + return formatted, nil +} + +type logWriter struct { + ctx context.Context + opts *logsOptions + f *taskFormatter + w io.Writer +} + +func (lw *logWriter) Write(buf []byte) (int, error) { + // this works but ONLY because stdcopy calls write a whole line at a time. + // if this ends up horribly broken or panics, check to see if stdcopy has + // reneged on that assumption. (@god forgive me) + // also this only works because the logs format is, like, barely parsable. + // if something changes in the logs format, this is gonna break + + // there should always be at least 2 parts: details and message. if there + // is no timestamp, details will be first (index 0) when we split on + // spaces. if there is a timestamp, details will be 2nd (`index 1) + detailsIndex := 0 + numParts := 2 + if lw.opts.timestamps { + detailsIndex++ + numParts++ + } + + // break up the log line into parts. + parts := bytes.SplitN(buf, []byte(" "), numParts) + if len(parts) != numParts { + return 0, errors.Errorf("invalid context in log message: %v", string(buf)) + } + // parse the details out + details, err := logs.ParseLogDetails(string(parts[detailsIndex])) + if err != nil { + return 0, err + } + // and then create a context from the details + // this removes the context-specific details from the details map, so we + // can more easily print the details later + logCtx, err := lw.parseContext(details) + if err != nil { + return 0, err + } + + output := []byte{} + // if we included timestamps, add them to the front + if lw.opts.timestamps { + output = append(output, parts[0]...) + output = append(output, ' ') + } + // add the context, nice and formatted + formatted, err := lw.f.format(lw.ctx, logCtx) + if err != nil { + return 0, err + } + output = append(output, []byte(formatted+" | ")...) + // if the user asked for details, add them to be log message + if lw.opts.details { + // ugh i hate this it's basically a dupe of api/server/httputils/write_log_stream.go:stringAttrs() + // ok but we're gonna do it a bit different + + // there are optimizations that can be made here. for starters, i'd + // suggest caching the details keys. then, we can maybe draw maps and + // slices from a pool to avoid alloc overhead on them. idk if it's + // worth the time yet. + + // first we need a slice + d := make([]string, 0, len(details)) + // then let's add all the pairs + for k := range details { + d = append(d, k+"="+details[k]) + } + // then sort em + sort.Strings(d) + // then join and append + output = append(output, []byte(strings.Join(d, ","))...) + output = append(output, ' ') + } + + // add the log message itself, finally + output = append(output, parts[detailsIndex+1]...) + + _, err = lw.w.Write(output) + if err != nil { + return 0, err + } + + return len(buf), nil +} + +// parseContext returns a log context and REMOVES the context from the details map +func (lw *logWriter) parseContext(details map[string]string) (logContext, error) { + nodeID, ok := details["com.docker.swarm.node.id"] + if !ok { + return logContext{}, errors.Errorf("missing node id in details: %v", details) + } + delete(details, "com.docker.swarm.node.id") + + serviceID, ok := details["com.docker.swarm.service.id"] + if !ok { + return logContext{}, errors.Errorf("missing service id in details: %v", details) + } + delete(details, "com.docker.swarm.service.id") + + taskID, ok := details["com.docker.swarm.task.id"] + if !ok { + return logContext{}, errors.Errorf("missing task id in details: %s", details) + } + delete(details, "com.docker.swarm.task.id") + + return logContext{ + nodeID: nodeID, + serviceID: serviceID, + taskID: taskID, + }, nil +} + +type logContext struct { + nodeID string + serviceID string + taskID string +} diff --git a/cli/cli/command/service/opts.go b/cli/cli/command/service/opts.go new file mode 100644 index 00000000..e0beeba0 --- /dev/null +++ b/cli/cli/command/service/opts.go @@ -0,0 +1,949 @@ +package service + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + gogotypes "github.com/gogo/protobuf/types" + "github.com/google/shlex" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +type int64Value interface { + Value() int64 +} + +// Uint64Opt represents a uint64. +type Uint64Opt struct { + value *uint64 +} + +// Set a new value on the option +func (i *Uint64Opt) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + i.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (i *Uint64Opt) Type() string { + return "uint" +} + +// String returns a string repr of this option +func (i *Uint64Opt) String() string { + if i.value != nil { + return fmt.Sprintf("%v", *i.value) + } + return "" +} + +// Value returns the uint64 +func (i *Uint64Opt) Value() *uint64 { + return i.value +} + +type floatValue float32 + +func (f *floatValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = floatValue(v) + return err +} + +func (f *floatValue) Type() string { + return "float" +} + +func (f *floatValue) String() string { + return strconv.FormatFloat(float64(*f), 'g', -1, 32) +} + +func (f *floatValue) Value() float32 { + return float32(*f) +} + +// placementPrefOpts holds a list of placement preferences. +type placementPrefOpts struct { + prefs []swarm.PlacementPreference + strings []string +} + +func (opts *placementPrefOpts) String() string { + if len(opts.strings) == 0 { + return "" + } + return fmt.Sprintf("%v", opts.strings) +} + +// Set validates the input value and adds it to the internal slices. +// Note: in the future strategies other than "spread", may be supported, +// as well as additional comma-separated options. +func (opts *placementPrefOpts) Set(value string) error { + fields := strings.Split(value, "=") + if len(fields) != 2 { + return errors.New(`placement preference must be of the format "="`) + } + if fields[0] != "spread" { + return errors.Errorf("unsupported placement preference %s (only spread is supported)", fields[0]) + } + + opts.prefs = append(opts.prefs, swarm.PlacementPreference{ + Spread: &swarm.SpreadOver{ + SpreadDescriptor: fields[1], + }, + }) + opts.strings = append(opts.strings, value) + return nil +} + +// Type returns a string name for this Option type +func (opts *placementPrefOpts) Type() string { + return "pref" +} + +// ShlexOpt is a flag Value which parses a string as a list of shell words +type ShlexOpt []string + +// Set the value +func (s *ShlexOpt) Set(value string) error { + valueSlice, err := shlex.Split(value) + *s = ShlexOpt(valueSlice) + return err +} + +// Type returns the tyep of the value +func (s *ShlexOpt) Type() string { + return "command" +} + +func (s *ShlexOpt) String() string { + if len(*s) == 0 { + return "" + } + return fmt.Sprint(*s) +} + +// Value returns the value as a string slice +func (s *ShlexOpt) Value() []string { + return []string(*s) +} + +type updateOptions struct { + parallelism uint64 + delay time.Duration + monitor time.Duration + onFailure string + maxFailureRatio floatValue + order string +} + +func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.UpdateConfig { + defaultFailureAction := strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaultUpdateConfig.FailureAction)]) + defaultMonitor, _ := gogotypes.DurationFromProto(defaultUpdateConfig.Monitor) + return &swarm.UpdateConfig{ + Parallelism: defaultUpdateConfig.Parallelism, + Delay: defaultUpdateConfig.Delay, + Monitor: defaultMonitor, + FailureAction: defaultFailureAction, + MaxFailureRatio: defaultUpdateConfig.MaxFailureRatio, + Order: defaultOrder(defaultUpdateConfig.Order), + } +} + +func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig { + if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { + return nil + } + + updateConfig := updateConfigFromDefaults(defaults.Service.Update) + + if flags.Changed(flagUpdateParallelism) { + updateConfig.Parallelism = opts.parallelism + } + if flags.Changed(flagUpdateDelay) { + updateConfig.Delay = opts.delay + } + if flags.Changed(flagUpdateMonitor) { + updateConfig.Monitor = opts.monitor + } + if flags.Changed(flagUpdateFailureAction) { + updateConfig.FailureAction = opts.onFailure + } + if flags.Changed(flagUpdateMaxFailureRatio) { + updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value() + } + if flags.Changed(flagUpdateOrder) { + updateConfig.Order = opts.order + } + + return updateConfig +} + +func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig { + if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio) { + return nil + } + + updateConfig := updateConfigFromDefaults(defaults.Service.Rollback) + + if flags.Changed(flagRollbackParallelism) { + updateConfig.Parallelism = opts.parallelism + } + if flags.Changed(flagRollbackDelay) { + updateConfig.Delay = opts.delay + } + if flags.Changed(flagRollbackMonitor) { + updateConfig.Monitor = opts.monitor + } + if flags.Changed(flagRollbackFailureAction) { + updateConfig.FailureAction = opts.onFailure + } + if flags.Changed(flagRollbackMaxFailureRatio) { + updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value() + } + if flags.Changed(flagRollbackOrder) { + updateConfig.Order = opts.order + } + + return updateConfig +} + +type resourceOptions struct { + limitCPU opts.NanoCPUs + limitMemBytes opts.MemBytes + resCPU opts.NanoCPUs + resMemBytes opts.MemBytes + resGenericResources []string +} + +func (r *resourceOptions) ToResourceRequirements() (*swarm.ResourceRequirements, error) { + generic, err := ParseGenericResources(r.resGenericResources) + if err != nil { + return nil, err + } + + return &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: r.limitCPU.Value(), + MemoryBytes: r.limitMemBytes.Value(), + }, + Reservations: &swarm.Resources{ + NanoCPUs: r.resCPU.Value(), + MemoryBytes: r.resMemBytes.Value(), + GenericResources: generic, + }, + }, nil +} + +type restartPolicyOptions struct { + condition string + delay opts.DurationOpt + maxAttempts Uint64Opt + window opts.DurationOpt +} + +func defaultRestartPolicy() *swarm.RestartPolicy { + defaultMaxAttempts := defaults.Service.Task.Restart.MaxAttempts + rp := &swarm.RestartPolicy{ + MaxAttempts: &defaultMaxAttempts, + } + + if defaults.Service.Task.Restart.Delay != nil { + defaultRestartDelay, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + rp.Delay = &defaultRestartDelay + } + if defaults.Service.Task.Restart.Window != nil { + defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window) + rp.Window = &defaultRestartWindow + } + rp.Condition = defaultRestartCondition() + + return rp +} + +func defaultRestartCondition() swarm.RestartPolicyCondition { + switch defaults.Service.Task.Restart.Condition { + case api.RestartOnNone: + return "none" + case api.RestartOnFailure: + return "on-failure" + case api.RestartOnAny: + return "any" + default: + return "" + } +} + +func defaultOrder(order api.UpdateConfig_UpdateOrder) string { + switch order { + case api.UpdateConfig_STOP_FIRST: + return "stop-first" + case api.UpdateConfig_START_FIRST: + return "start-first" + default: + return "" + } +} + +func (r *restartPolicyOptions) ToRestartPolicy(flags *pflag.FlagSet) *swarm.RestartPolicy { + if !anyChanged(flags, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow, flagRestartCondition) { + return nil + } + + restartPolicy := defaultRestartPolicy() + + if flags.Changed(flagRestartDelay) { + restartPolicy.Delay = r.delay.Value() + } + if flags.Changed(flagRestartCondition) { + restartPolicy.Condition = swarm.RestartPolicyCondition(r.condition) + } + if flags.Changed(flagRestartMaxAttempts) { + restartPolicy.MaxAttempts = r.maxAttempts.Value() + } + if flags.Changed(flagRestartWindow) { + restartPolicy.Window = r.window.Value() + } + + return restartPolicy +} + +type credentialSpecOpt struct { + value *swarm.CredentialSpec + source string +} + +func (c *credentialSpecOpt) Set(value string) error { + c.source = value + c.value = &swarm.CredentialSpec{} + switch { + case strings.HasPrefix(value, "config://"): + // NOTE(dperny): we allow the user to specify the value of + // CredentialSpec Config using the Name of the config, but the API + // requires the ID of the config. For simplicity, we will parse + // whatever value is provided into the "Config" field, but before + // making API calls, we may need to swap the Config Name for the ID. + // Therefore, this isn't the definitive location for the value of + // Config that is passed to the API. + c.value.Config = strings.TrimPrefix(value, "config://") + case strings.HasPrefix(value, "file://"): + c.value.File = strings.TrimPrefix(value, "file://") + case strings.HasPrefix(value, "registry://"): + c.value.Registry = strings.TrimPrefix(value, "registry://") + case value == "": + // if the value of the flag is an empty string, that means there is no + // CredentialSpec needed. This is useful for removing a CredentialSpec + // during a service update. + default: + return errors.New(`invalid credential spec: value must be prefixed with "config://", "file://", or "registry://"`) + } + + return nil +} + +func (c *credentialSpecOpt) Type() string { + return "credential-spec" +} + +func (c *credentialSpecOpt) String() string { + return c.source +} + +func (c *credentialSpecOpt) Value() *swarm.CredentialSpec { + return c.value +} + +func resolveNetworkID(ctx context.Context, apiClient client.NetworkAPIClient, networkIDOrName string) (string, error) { + nw, err := apiClient.NetworkInspect(ctx, networkIDOrName, types.NetworkInspectOptions{Scope: "swarm"}) + return nw.ID, err +} + +func convertNetworks(networks opts.NetworkOpt) []swarm.NetworkAttachmentConfig { + var netAttach []swarm.NetworkAttachmentConfig + for _, net := range networks.Value() { + netAttach = append(netAttach, swarm.NetworkAttachmentConfig{ + Target: net.Target, + Aliases: net.Aliases, + DriverOpts: net.DriverOpts, + }) + } + return netAttach +} + +type endpointOptions struct { + mode string + publishPorts opts.PortOpt +} + +func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { + return &swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), + Ports: e.publishPorts.Value(), + } +} + +type logDriverOptions struct { + name string + opts opts.ListOpts +} + +func newLogDriverOptions() logDriverOptions { + return logDriverOptions{opts: opts.NewListOpts(opts.ValidateEnv)} +} + +func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { + if ldo.name == "" { + return nil + } + + // set the log driver only if specified. + return &swarm.Driver{ + Name: ldo.name, + Options: opts.ConvertKVStringsToMap(ldo.opts.GetAll()), + } +} + +type healthCheckOptions struct { + cmd string + interval opts.PositiveDurationOpt + timeout opts.PositiveDurationOpt + retries int + startPeriod opts.PositiveDurationOpt + noHealthcheck bool +} + +func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { + var healthConfig *container.HealthConfig + haveHealthSettings := opts.cmd != "" || + opts.interval.Value() != nil || + opts.timeout.Value() != nil || + opts.retries != 0 + if opts.noHealthcheck { + if haveHealthSettings { + return nil, errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + healthConfig = &container.HealthConfig{Test: []string{"NONE"}} + } else if haveHealthSettings { + var test []string + if opts.cmd != "" { + test = []string{"CMD-SHELL", opts.cmd} + } + var interval, timeout, startPeriod time.Duration + if ptr := opts.interval.Value(); ptr != nil { + interval = *ptr + } + if ptr := opts.timeout.Value(); ptr != nil { + timeout = *ptr + } + if ptr := opts.startPeriod.Value(); ptr != nil { + startPeriod = *ptr + } + healthConfig = &container.HealthConfig{ + Test: test, + Interval: interval, + Timeout: timeout, + Retries: opts.retries, + StartPeriod: startPeriod, + } + } + return healthConfig, nil +} + +// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli +// : +// into a swarmkit host format: +// IP_address canonical_hostname [aliases...] +// This assumes input value (:) has already been validated +func convertExtraHostsToSwarmHosts(extraHosts []string) []string { + hosts := []string{} + for _, extraHost := range extraHosts { + parts := strings.SplitN(extraHost, ":", 2) + hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) + } + return hosts +} + +type serviceOptions struct { + detach bool + quiet bool + + name string + labels opts.ListOpts + containerLabels opts.ListOpts + image string + entrypoint ShlexOpt + args []string + hostname string + env opts.ListOpts + envFile opts.ListOpts + workdir string + user string + groups opts.ListOpts + credentialSpec credentialSpecOpt + init bool + stopSignal string + tty bool + readOnly bool + mounts opts.MountOpt + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOption opts.ListOpts + hosts opts.ListOpts + sysctls opts.ListOpts + + resources resourceOptions + stopGrace opts.DurationOpt + + replicas Uint64Opt + mode string + + restartPolicy restartPolicyOptions + constraints opts.ListOpts + placementPrefs placementPrefOpts + maxReplicas uint64 + update updateOptions + rollback updateOptions + networks opts.NetworkOpt + endpoint endpointOptions + + registryAuth bool + noResolveImage bool + + logDriver logDriverOptions + + healthcheck healthCheckOptions + secrets opts.SecretOpt + configs opts.ConfigOpt + + isolation string +} + +func newServiceOptions() *serviceOptions { + return &serviceOptions{ + labels: opts.NewListOpts(opts.ValidateLabel), + constraints: opts.NewListOpts(nil), + containerLabels: opts.NewListOpts(opts.ValidateLabel), + env: opts.NewListOpts(opts.ValidateEnv), + envFile: opts.NewListOpts(nil), + groups: opts.NewListOpts(nil), + logDriver: newLogDriverOptions(), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOption: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + hosts: opts.NewListOpts(opts.ValidateExtraHost), + sysctls: opts.NewListOpts(nil), + } +} + +func (options *serviceOptions) ToServiceMode() (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + switch options.mode { + case "global": + if options.replicas.Value() != nil { + return serviceMode, errors.Errorf("replicas can only be used with replicated mode") + } + + if options.maxReplicas > 0 { + return serviceMode, errors.New("replicas-max-per-node can only be used with replicated mode") + } + + serviceMode.Global = &swarm.GlobalService{} + case "replicated": + serviceMode.Replicated = &swarm.ReplicatedService{ + Replicas: options.replicas.Value(), + } + default: + return serviceMode, errors.Errorf("Unknown mode: %s, only replicated and global supported", options.mode) + } + return serviceMode, nil +} + +func (options *serviceOptions) ToStopGracePeriod(flags *pflag.FlagSet) *time.Duration { + if flags.Changed(flagStopGracePeriod) { + return options.stopGrace.Value() + } + return nil +} + +func (options *serviceOptions) ToService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet) (swarm.ServiceSpec, error) { + var service swarm.ServiceSpec + + envVariables, err := opts.ReadKVEnvStrings(options.envFile.GetAll(), options.env.GetAll()) + if err != nil { + return service, err + } + + currentEnv := make([]string, 0, len(envVariables)) + for _, env := range envVariables { // need to process each var, in order + k := strings.SplitN(env, "=", 2)[0] + for i, current := range currentEnv { // remove duplicates + if current == env { + continue // no update required, may hide this behind flag to preserve order of envVariables + } + if strings.HasPrefix(current, k+"=") { + currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) + } + } + currentEnv = append(currentEnv, env) + } + + healthConfig, err := options.healthcheck.toHealthConfig() + if err != nil { + return service, err + } + + serviceMode, err := options.ToServiceMode() + if err != nil { + return service, err + } + + networks := convertNetworks(options.networks) + for i, net := range networks { + nwID, err := resolveNetworkID(ctx, apiClient, net.Target) + if err != nil { + return service, err + } + networks[i].Target = nwID + } + sort.Slice(networks, func(i, j int) bool { + return networks[i].Target < networks[j].Target + }) + + resources, err := options.resources.ToResourceRequirements() + if err != nil { + return service, err + } + + service = swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: options.image, + Args: options.args, + Command: options.entrypoint.Value(), + Env: currentEnv, + Hostname: options.hostname, + Labels: opts.ConvertKVStringsToMap(options.containerLabels.GetAll()), + Dir: options.workdir, + User: options.user, + Groups: options.groups.GetAll(), + StopSignal: options.stopSignal, + TTY: options.tty, + ReadOnly: options.readOnly, + Mounts: options.mounts.Value(), + Init: &options.init, + DNSConfig: &swarm.DNSConfig{ + Nameservers: options.dns.GetAll(), + Search: options.dnsSearch.GetAll(), + Options: options.dnsOption.GetAll(), + }, + Hosts: convertExtraHostsToSwarmHosts(options.hosts.GetAll()), + StopGracePeriod: options.ToStopGracePeriod(flags), + Healthcheck: healthConfig, + Isolation: container.Isolation(options.isolation), + Sysctls: opts.ConvertKVStringsToMap(options.sysctls.GetAll()), + }, + Networks: networks, + Resources: resources, + RestartPolicy: options.restartPolicy.ToRestartPolicy(flags), + Placement: &swarm.Placement{ + Constraints: options.constraints.GetAll(), + Preferences: options.placementPrefs.prefs, + MaxReplicas: options.maxReplicas, + }, + LogDriver: options.logDriver.toLogDriver(), + }, + Mode: serviceMode, + UpdateConfig: options.update.updateConfig(flags), + RollbackConfig: options.rollback.rollbackConfig(flags), + EndpointSpec: options.endpoint.ToEndpointSpec(), + } + + if options.credentialSpec.String() != "" && options.credentialSpec.Value() != nil { + service.TaskTemplate.ContainerSpec.Privileges = &swarm.Privileges{ + CredentialSpec: options.credentialSpec.Value(), + } + } + + return service, nil +} + +type flagDefaults map[string]interface{} + +func (fd flagDefaults) getUint64(flagName string) uint64 { + if val, ok := fd[flagName].(uint64); ok { + return val + } + return 0 +} + +func (fd flagDefaults) getString(flagName string) string { + if val, ok := fd[flagName].(string); ok { + return val + } + return "" +} + +func buildServiceDefaultFlagMapping() flagDefaults { + defaultFlagValues := make(map[string]interface{}) + + defaultFlagValues[flagStopGracePeriod], _ = gogotypes.DurationFromProto(defaults.Service.Task.GetContainer().StopGracePeriod) + defaultFlagValues[flagRestartCondition] = `"` + defaultRestartCondition() + `"` + defaultFlagValues[flagRestartDelay], _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + + if defaults.Service.Task.Restart.MaxAttempts != 0 { + defaultFlagValues[flagRestartMaxAttempts] = defaults.Service.Task.Restart.MaxAttempts + } + + defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window) + if defaultRestartWindow != 0 { + defaultFlagValues[flagRestartWindow] = defaultRestartWindow + } + + defaultFlagValues[flagUpdateParallelism] = defaults.Service.Update.Parallelism + defaultFlagValues[flagUpdateDelay] = defaults.Service.Update.Delay + defaultFlagValues[flagUpdateMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Update.Monitor) + defaultFlagValues[flagUpdateFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Update.FailureAction)]) + `"` + defaultFlagValues[flagUpdateMaxFailureRatio] = defaults.Service.Update.MaxFailureRatio + defaultFlagValues[flagUpdateOrder] = `"` + defaultOrder(defaults.Service.Update.Order) + `"` + + defaultFlagValues[flagRollbackParallelism] = defaults.Service.Rollback.Parallelism + defaultFlagValues[flagRollbackDelay] = defaults.Service.Rollback.Delay + defaultFlagValues[flagRollbackMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Rollback.Monitor) + defaultFlagValues[flagRollbackFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Rollback.FailureAction)]) + `"` + defaultFlagValues[flagRollbackMaxFailureRatio] = defaults.Service.Rollback.MaxFailureRatio + defaultFlagValues[flagRollbackOrder] = `"` + defaultOrder(defaults.Service.Rollback.Order) + `"` + + defaultFlagValues[flagEndpointMode] = "vip" + + return defaultFlagValues +} + +func addDetachFlag(flags *pflag.FlagSet, detach *bool) { + flags.BoolVarP(detach, flagDetach, "d", false, "Exit immediately instead of waiting for the service to converge") + flags.SetAnnotation(flagDetach, "version", []string{"1.29"}) +} + +// addServiceFlags adds all flags that are common to both `create` and `update`. +// Any flags that are not common are added separately in the individual command +func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValues flagDefaults) { + flagDesc := func(flagName string, desc string) string { + if defaultValue, ok := defaultFlagValues[flagName]; ok { + return fmt.Sprintf("%s (default %v)", desc, defaultValue) + } + return desc + } + + addDetachFlag(flags, &opts.detach) + flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Suppress progress output") + + flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") + flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") + flags.Var(&opts.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)") + flags.SetAnnotation(flagCredentialSpec, "version", []string{"1.29"}) + flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") + flags.SetAnnotation(flagHostname, "version", []string{"1.25"}) + flags.Var(&opts.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image") + + flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") + flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") + flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") + flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") + + flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")) + flags.Var(&opts.replicas, flagReplicas, "Number of tasks") + flags.Uint64Var(&opts.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)") + flags.SetAnnotation(flagMaxReplicas, "version", []string{"1.40"}) + + flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`)) + flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)")) + flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up")) + + flags.Var(&opts.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)")) + + flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)") + flags.DurationVar(&opts.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)")) + flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)")) + flags.SetAnnotation(flagUpdateMonitor, "version", []string{"1.25"}) + flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause"|"continue"|"rollback")`)) + flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update")) + flags.SetAnnotation(flagUpdateMaxFailureRatio, "version", []string{"1.25"}) + flags.StringVar(&opts.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first"|"stop-first")`)) + flags.SetAnnotation(flagUpdateOrder, "version", []string{"1.29"}) + + flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism), + "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)") + flags.SetAnnotation(flagRollbackParallelism, "version", []string{"1.28"}) + flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)")) + flags.SetAnnotation(flagRollbackDelay, "version", []string{"1.28"}) + flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)")) + flags.SetAnnotation(flagRollbackMonitor, "version", []string{"1.28"}) + flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause"|"continue")`)) + flags.SetAnnotation(flagRollbackFailureAction, "version", []string{"1.28"}) + flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback")) + flags.SetAnnotation(flagRollbackMaxFailureRatio, "version", []string{"1.28"}) + flags.StringVar(&opts.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first"|"stop-first")`)) + flags.SetAnnotation(flagRollbackOrder, "version", []string{"1.29"}) + + flags.StringVar(&opts.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)") + + flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") + flags.BoolVar(&opts.noResolveImage, flagNoResolveImage, false, "Do not query the registry to resolve image digest and supported platforms") + flags.SetAnnotation(flagNoResolveImage, "version", []string{"1.30"}) + + flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") + flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") + + flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") + flags.SetAnnotation(flagHealthCmd, "version", []string{"1.25"}) + flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ms|s|m|h)") + flags.SetAnnotation(flagHealthInterval, "version", []string{"1.25"}) + flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ms|s|m|h)") + flags.SetAnnotation(flagHealthTimeout, "version", []string{"1.25"}) + flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") + flags.SetAnnotation(flagHealthRetries, "version", []string{"1.25"}) + flags.Var(&opts.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)") + flags.SetAnnotation(flagHealthStartPeriod, "version", []string{"1.29"}) + flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") + flags.SetAnnotation(flagNoHealthcheck, "version", []string{"1.25"}) + + flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") + flags.SetAnnotation(flagTTY, "version", []string{"1.25"}) + + flags.BoolVar(&opts.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only") + flags.SetAnnotation(flagReadOnly, "version", []string{"1.28"}) + + flags.StringVar(&opts.stopSignal, flagStopSignal, "", "Signal to stop the container") + flags.SetAnnotation(flagStopSignal, "version", []string{"1.28"}) + flags.StringVar(&opts.isolation, flagIsolation, "", "Service container isolation mode") + flags.SetAnnotation(flagIsolation, "version", []string{"1.35"}) +} + +const ( + flagCredentialSpec = "credential-spec" + flagPlacementPref = "placement-pref" + flagPlacementPrefAdd = "placement-pref-add" + flagPlacementPrefRemove = "placement-pref-rm" + flagConstraint = "constraint" + flagConstraintRemove = "constraint-rm" + flagConstraintAdd = "constraint-add" + flagContainerLabel = "container-label" + flagContainerLabelRemove = "container-label-rm" + flagContainerLabelAdd = "container-label-add" + flagDetach = "detach" + flagDNS = "dns" + flagDNSRemove = "dns-rm" + flagDNSAdd = "dns-add" + flagDNSOption = "dns-option" + flagDNSOptionRemove = "dns-option-rm" + flagDNSOptionAdd = "dns-option-add" + flagDNSSearch = "dns-search" + flagDNSSearchRemove = "dns-search-rm" + flagDNSSearchAdd = "dns-search-add" + flagEndpointMode = "endpoint-mode" + flagEntrypoint = "entrypoint" + flagEnv = "env" + flagEnvFile = "env-file" + flagEnvRemove = "env-rm" + flagEnvAdd = "env-add" + flagGenericResourcesRemove = "generic-resource-rm" + flagGenericResourcesAdd = "generic-resource-add" + flagGroup = "group" + flagGroupAdd = "group-add" + flagGroupRemove = "group-rm" + flagHost = "host" + flagHostAdd = "host-add" + flagHostRemove = "host-rm" + flagHostname = "hostname" + flagLabel = "label" + flagLabelRemove = "label-rm" + flagLabelAdd = "label-add" + flagLimitCPU = "limit-cpu" + flagLimitMemory = "limit-memory" + flagMaxReplicas = "replicas-max-per-node" + flagMode = "mode" + flagMount = "mount" + flagMountRemove = "mount-rm" + flagMountAdd = "mount-add" + flagName = "name" + flagNetwork = "network" + flagNetworkAdd = "network-add" + flagNetworkRemove = "network-rm" + flagPublish = "publish" + flagPublishRemove = "publish-rm" + flagPublishAdd = "publish-add" + flagQuiet = "quiet" + flagReadOnly = "read-only" + flagReplicas = "replicas" + flagReserveCPU = "reserve-cpu" + flagReserveMemory = "reserve-memory" + flagRestartCondition = "restart-condition" + flagRestartDelay = "restart-delay" + flagRestartMaxAttempts = "restart-max-attempts" + flagRestartWindow = "restart-window" + flagRollback = "rollback" + flagRollbackDelay = "rollback-delay" + flagRollbackFailureAction = "rollback-failure-action" + flagRollbackMaxFailureRatio = "rollback-max-failure-ratio" + flagRollbackMonitor = "rollback-monitor" + flagRollbackOrder = "rollback-order" + flagRollbackParallelism = "rollback-parallelism" + flagInit = "init" + flagSysCtl = "sysctl" + flagSysCtlAdd = "sysctl-add" + flagSysCtlRemove = "sysctl-rm" + flagStopGracePeriod = "stop-grace-period" + flagStopSignal = "stop-signal" + flagTTY = "tty" + flagUpdateDelay = "update-delay" + flagUpdateFailureAction = "update-failure-action" + flagUpdateMaxFailureRatio = "update-max-failure-ratio" + flagUpdateMonitor = "update-monitor" + flagUpdateOrder = "update-order" + flagUpdateParallelism = "update-parallelism" + flagUser = "user" + flagWorkdir = "workdir" + flagRegistryAuth = "with-registry-auth" + flagNoResolveImage = "no-resolve-image" + flagLogDriver = "log-driver" + flagLogOpt = "log-opt" + flagHealthCmd = "health-cmd" + flagHealthInterval = "health-interval" + flagHealthRetries = "health-retries" + flagHealthTimeout = "health-timeout" + flagHealthStartPeriod = "health-start-period" + flagNoHealthcheck = "no-healthcheck" + flagSecret = "secret" + flagSecretAdd = "secret-add" + flagSecretRemove = "secret-rm" + flagConfig = "config" + flagConfigAdd = "config-add" + flagConfigRemove = "config-rm" + flagIsolation = "isolation" +) + +func validateAPIVersion(c swarm.ServiceSpec, serverAPIVersion string) error { + for _, m := range c.TaskTemplate.ContainerSpec.Mounts { + if m.BindOptions != nil && m.BindOptions.NonRecursive && versions.LessThan(serverAPIVersion, "1.40") { + return errors.Errorf("bind-nonrecursive requires API v1.40 or later") + } + } + return nil +} diff --git a/cli/cli/command/service/opts_test.go b/cli/cli/command/service/opts_test.go new file mode 100644 index 00000000..6b9b83d1 --- /dev/null +++ b/cli/cli/command/service/opts_test.go @@ -0,0 +1,302 @@ +package service + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCredentialSpecOpt(t *testing.T) { + tests := []struct { + name string + in string + value swarm.CredentialSpec + expectedErr string + }{ + { + name: "empty", + in: "", + value: swarm.CredentialSpec{}, + }, + { + name: "no-prefix", + in: "noprefix", + value: swarm.CredentialSpec{}, + expectedErr: `invalid credential spec: value must be prefixed with "config://", "file://", or "registry://"`, + }, + { + name: "config", + in: "config://0bt9dmxjvjiqermk6xrop3ekq", + value: swarm.CredentialSpec{Config: "0bt9dmxjvjiqermk6xrop3ekq"}, + }, + { + name: "file", + in: "file://somefile.json", + value: swarm.CredentialSpec{File: "somefile.json"}, + }, + { + name: "registry", + in: "registry://testing", + value: swarm.CredentialSpec{Registry: "testing"}, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + var cs credentialSpecOpt + + err := cs.Set(tc.in) + + if tc.expectedErr != "" { + assert.Error(t, err, tc.expectedErr) + } else { + assert.NilError(t, err) + } + + assert.Equal(t, cs.String(), tc.in) + assert.DeepEqual(t, cs.Value(), &tc.value) + }) + } +} + +func TestMemBytesString(t *testing.T) { + var mem opts.MemBytes = 1048576 + assert.Check(t, is.Equal("1MiB", mem.String())) +} + +func TestMemBytesSetAndValue(t *testing.T) { + var mem opts.MemBytes + assert.NilError(t, mem.Set("5kb")) + assert.Check(t, is.Equal(int64(5120), mem.Value())) +} + +func TestNanoCPUsString(t *testing.T) { + var cpus opts.NanoCPUs = 6100000000 + assert.Check(t, is.Equal("6.100", cpus.String())) +} + +func TestNanoCPUsSetAndValue(t *testing.T) { + var cpus opts.NanoCPUs + assert.NilError(t, cpus.Set("0.35")) + assert.Check(t, is.Equal(int64(350000000), cpus.Value())) +} + +func TestUint64OptString(t *testing.T) { + value := uint64(2345678) + opt := Uint64Opt{value: &value} + assert.Check(t, is.Equal("2345678", opt.String())) + + opt = Uint64Opt{} + assert.Check(t, is.Equal("", opt.String())) +} + +func TestUint64OptSetAndValue(t *testing.T) { + var opt Uint64Opt + assert.NilError(t, opt.Set("14445")) + assert.Check(t, is.Equal(uint64(14445), *opt.Value())) +} + +func TestHealthCheckOptionsToHealthConfig(t *testing.T) { + dur := time.Second + opt := healthCheckOptions{ + cmd: "curl", + interval: opts.PositiveDurationOpt{DurationOpt: *opts.NewDurationOpt(&dur)}, + timeout: opts.PositiveDurationOpt{DurationOpt: *opts.NewDurationOpt(&dur)}, + startPeriod: opts.PositiveDurationOpt{DurationOpt: *opts.NewDurationOpt(&dur)}, + retries: 10, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(&container.HealthConfig{ + Test: []string{"CMD-SHELL", "curl"}, + Interval: time.Second, + Timeout: time.Second, + StartPeriod: time.Second, + Retries: 10, + }, config)) +} + +func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { + opt := healthCheckOptions{ + noHealthcheck: true, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(&container.HealthConfig{ + Test: []string{"NONE"}, + }, config)) +} + +func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { + opt := healthCheckOptions{ + cmd: "curl", + noHealthcheck: true, + } + _, err := opt.toHealthConfig() + assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") +} + +func TestResourceOptionsToResourceRequirements(t *testing.T) { + incorrectOptions := []resourceOptions{ + { + resGenericResources: []string{"foo=bar", "foo=1"}, + }, + { + resGenericResources: []string{"foo=bar", "foo=baz"}, + }, + { + resGenericResources: []string{"foo=bar"}, + }, + { + resGenericResources: []string{"foo=1", "foo=2"}, + }, + } + + for _, opt := range incorrectOptions { + _, err := opt.ToResourceRequirements() + assert.Check(t, is.ErrorContains(err, "")) + } + + correctOptions := []resourceOptions{ + { + resGenericResources: []string{"foo=1"}, + }, + { + resGenericResources: []string{"foo=1", "bar=2"}, + }, + } + + for _, opt := range correctOptions { + r, err := opt.ToResourceRequirements() + assert.NilError(t, err) + assert.Check(t, is.Len(r.Reservations.GenericResources, len(opt.resGenericResources))) + } + +} + +func TestToServiceNetwork(t *testing.T) { + nws := []types.NetworkResource{ + {Name: "aaa-network", ID: "id555"}, + {Name: "mmm-network", ID: "id999"}, + {Name: "zzz-network", ID: "id111"}, + } + + client := &fakeClient{ + networkInspectFunc: func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + for _, network := range nws { + if network.ID == networkID || network.Name == networkID { + return network, nil + } + } + return types.NetworkResource{}, fmt.Errorf("network not found: %s", networkID) + }, + } + + nwo := opts.NetworkOpt{} + nwo.Set("zzz-network") + nwo.Set("mmm-network") + nwo.Set("aaa-network") + + o := newServiceOptions() + o.mode = "replicated" + o.networks = nwo + + ctx := context.Background() + flags := newCreateCommand(nil).Flags() + service, err := o.ToService(ctx, client, flags) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id111"}, {Target: "id555"}, {Target: "id999"}}, service.TaskTemplate.Networks)) +} + +func TestToServiceUpdateRollback(t *testing.T) { + expected := swarm.ServiceSpec{ + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 23, + Delay: 34 * time.Second, + Monitor: 54321 * time.Nanosecond, + FailureAction: "pause", + MaxFailureRatio: 0.6, + Order: "stop-first", + }, + RollbackConfig: &swarm.UpdateConfig{ + Parallelism: 12, + Delay: 23 * time.Second, + Monitor: 12345 * time.Nanosecond, + FailureAction: "continue", + MaxFailureRatio: 0.5, + Order: "start-first", + }, + } + + // Note: in test-situation, the flags are only used to detect if an option + // was set; the actual value itself is read from the serviceOptions below. + flags := newCreateCommand(nil).Flags() + flags.Set("update-parallelism", "23") + flags.Set("update-delay", "34s") + flags.Set("update-monitor", "54321ns") + flags.Set("update-failure-action", "pause") + flags.Set("update-max-failure-ratio", "0.6") + flags.Set("update-order", "stop-first") + + flags.Set("rollback-parallelism", "12") + flags.Set("rollback-delay", "23s") + flags.Set("rollback-monitor", "12345ns") + flags.Set("rollback-failure-action", "continue") + flags.Set("rollback-max-failure-ratio", "0.5") + flags.Set("rollback-order", "start-first") + + o := newServiceOptions() + o.mode = "replicated" + o.update = updateOptions{ + parallelism: 23, + delay: 34 * time.Second, + monitor: 54321 * time.Nanosecond, + onFailure: "pause", + maxFailureRatio: 0.6, + order: "stop-first", + } + o.rollback = updateOptions{ + parallelism: 12, + delay: 23 * time.Second, + monitor: 12345 * time.Nanosecond, + onFailure: "continue", + maxFailureRatio: 0.5, + order: "start-first", + } + + service, err := o.ToService(context.Background(), &fakeClient{}, flags) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(service.UpdateConfig, expected.UpdateConfig)) + assert.Check(t, is.DeepEqual(service.RollbackConfig, expected.RollbackConfig)) +} + +func TestToServiceMaxReplicasGlobalModeConflict(t *testing.T) { + opt := serviceOptions{ + mode: "global", + maxReplicas: 1, + } + _, err := opt.ToServiceMode() + assert.Error(t, err, "replicas-max-per-node can only be used with replicated mode") +} + +func TestToServiceSysCtls(t *testing.T) { + o := newServiceOptions() + o.mode = "replicated" + o.sysctls.Set("net.ipv4.ip_forward=1") + o.sysctls.Set("kernel.shmmax=123456") + + expected := map[string]string{"net.ipv4.ip_forward": "1", "kernel.shmmax": "123456"} + flags := newCreateCommand(nil).Flags() + service, err := o.ToService(context.Background(), &fakeClient{}, flags) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(service.TaskTemplate.ContainerSpec.Sysctls, expected)) +} diff --git a/cli/cli/command/service/parse.go b/cli/cli/command/service/parse.go new file mode 100644 index 00000000..25677d38 --- /dev/null +++ b/cli/cli/command/service/parse.go @@ -0,0 +1,158 @@ +package service + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +// ParseSecrets retrieves the secrets with the requested names and fills +// secret IDs into the secret references. +func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) { + if len(requestedSecrets) == 0 { + return []*swarmtypes.SecretReference{}, nil + } + + secretRefs := make(map[string]*swarmtypes.SecretReference) + ctx := context.Background() + + for _, secret := range requestedSecrets { + if _, exists := secretRefs[secret.File.Name]; exists { + return nil, errors.Errorf("duplicate secret target for %s not allowed", secret.SecretName) + } + secretRef := new(swarmtypes.SecretReference) + *secretRef = *secret + secretRefs[secret.File.Name] = secretRef + } + + args := filters.NewArgs() + for _, s := range secretRefs { + args.Add("name", s.SecretName) + } + + secrets, err := client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundSecrets := make(map[string]string) + for _, secret := range secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret.ID + } + + addedSecrets := []*swarmtypes.SecretReference{} + + for _, ref := range secretRefs { + id, ok := foundSecrets[ref.SecretName] + if !ok { + return nil, errors.Errorf("secret not found: %s", ref.SecretName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.SecretID = id + addedSecrets = append(addedSecrets, ref) + } + + return addedSecrets, nil +} + +// ParseConfigs retrieves the configs from the requested names and converts +// them to config references to use with the spec +func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) { + if len(requestedConfigs) == 0 { + return []*swarmtypes.ConfigReference{}, nil + } + + // the configRefs map has two purposes: it prevents duplication of config + // target filenames, and it it used to get all configs so we can resolve + // their IDs. unfortunately, there are other targets for ConfigReferences, + // besides just a File; specifically, the Runtime target, which is used for + // CredentialSpecs. Therefore, we need to have a list of ConfigReferences + // that are not File targets as well. at this time of writing, the only use + // for Runtime targets is CredentialSpecs. However, to future-proof this + // functionality, we should handle the case where multiple Runtime targets + // are in use for the same Config, and we should deduplicate + // such ConfigReferences, as no matter how many times the Config is used, + // it is only needed to be referenced once. + configRefs := make(map[string]*swarmtypes.ConfigReference) + runtimeRefs := make(map[string]*swarmtypes.ConfigReference) + ctx := context.Background() + + for _, config := range requestedConfigs { + // copy the config, so we don't mutate the args + configRef := new(swarmtypes.ConfigReference) + *configRef = *config + + if config.Runtime != nil { + // by assigning to a map based on ConfigName, if the same Config + // is required as a Runtime target for multiple purposes, we only + // include it once in the final set of configs. + runtimeRefs[config.ConfigName] = config + // continue, so we skip the logic below for handling file-type + // configs + continue + } + + if _, exists := configRefs[config.File.Name]; exists { + return nil, errors.Errorf("duplicate config target for %s not allowed", config.ConfigName) + } + + configRefs[config.File.Name] = configRef + } + + args := filters.NewArgs() + for _, s := range configRefs { + args.Add("name", s.ConfigName) + } + for _, s := range runtimeRefs { + args.Add("name", s.ConfigName) + } + + configs, err := client.ConfigList(ctx, types.ConfigListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundConfigs := make(map[string]string) + for _, config := range configs { + foundConfigs[config.Spec.Annotations.Name] = config.ID + } + + addedConfigs := []*swarmtypes.ConfigReference{} + + for _, ref := range configRefs { + id, ok := foundConfigs[ref.ConfigName] + if !ok { + return nil, errors.Errorf("config not found: %s", ref.ConfigName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.ConfigID = id + addedConfigs = append(addedConfigs, ref) + } + + // unfortunately, because the key of configRefs and runtimeRefs is different + // values that may collide, we can't just do some fancy trickery to + // concat maps, we need to do two separate loops + for _, ref := range runtimeRefs { + id, ok := foundConfigs[ref.ConfigName] + if !ok { + return nil, errors.Errorf("config not found: %s", ref.ConfigName) + } + + ref.ConfigID = id + addedConfigs = append(addedConfigs, ref) + } + + return addedConfigs, nil +} diff --git a/cli/cli/command/service/progress/progress.go b/cli/cli/command/service/progress/progress.go new file mode 100644 index 00000000..4b9cdd73 --- /dev/null +++ b/cli/cli/command/service/progress/progress.go @@ -0,0 +1,504 @@ +package progress + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" +) + +var ( + numberedStates = map[swarm.TaskState]int64{ + swarm.TaskStateNew: 1, + swarm.TaskStateAllocated: 2, + swarm.TaskStatePending: 3, + swarm.TaskStateAssigned: 4, + swarm.TaskStateAccepted: 5, + swarm.TaskStatePreparing: 6, + swarm.TaskStateReady: 7, + swarm.TaskStateStarting: 8, + swarm.TaskStateRunning: 9, + + // The following states are not actually shown in progress + // output, but are used internally for ordering. + swarm.TaskStateComplete: 10, + swarm.TaskStateShutdown: 11, + swarm.TaskStateFailed: 12, + swarm.TaskStateRejected: 13, + } + + longestState int +) + +const ( + maxProgress = 9 + maxProgressBars = 20 +) + +type progressUpdater interface { + update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) +} + +func init() { + for state := range numberedStates { + if !terminalState(state) && len(state) > longestState { + longestState = len(state) + } + } +} + +func terminalState(state swarm.TaskState) bool { + return numberedStates[state] > numberedStates[swarm.TaskStateRunning] +} + +func stateToProgress(state swarm.TaskState, rollback bool) int64 { + if !rollback { + return numberedStates[state] + } + return numberedStates[swarm.TaskStateRunning] - numberedStates[state] +} + +// ServiceProgress outputs progress information for convergence of a service. +// nolint: gocyclo +func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error { + defer progressWriter.Close() + + progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false) + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, os.Interrupt) + defer signal.Stop(sigint) + + taskFilter := filters.NewArgs() + taskFilter.Add("service", serviceID) + taskFilter.Add("_up-to-date", "true") + + getUpToDateTasks := func() ([]swarm.Task, error) { + return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + } + + var ( + updater progressUpdater + converged bool + convergedAt time.Time + monitor = 5 * time.Second + rollback bool + ) + + for { + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 { + monitor = service.Spec.UpdateConfig.Monitor + } + + if updater == nil { + updater, err = initializeUpdater(service, progressOut) + if err != nil { + return err + } + } + + if service.UpdateStatus != nil { + switch service.UpdateStatus.State { + case swarm.UpdateStateUpdating: + rollback = false + case swarm.UpdateStateCompleted: + if !converged { + return nil + } + case swarm.UpdateStatePaused: + return fmt.Errorf("service update paused: %s", service.UpdateStatus.Message) + case swarm.UpdateStateRollbackStarted: + if !rollback && service.UpdateStatus.Message != "" { + progressOut.WriteProgress(progress.Progress{ + ID: "rollback", + Action: service.UpdateStatus.Message, + }) + } + rollback = true + case swarm.UpdateStateRollbackPaused: + return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message) + case swarm.UpdateStateRollbackCompleted: + if !converged { + return fmt.Errorf("service rolled back: %s", service.UpdateStatus.Message) + } + } + } + if converged && time.Since(convergedAt) >= monitor { + progressOut.WriteProgress(progress.Progress{ + ID: "verify", + Action: "Service converged", + }) + + return nil + } + + tasks, err := getUpToDateTasks() + if err != nil { + return err + } + + activeNodes, err := getActiveNodes(ctx, client) + if err != nil { + return err + } + + converged, err = updater.update(service, tasks, activeNodes, rollback) + if err != nil { + return err + } + if converged { + if convergedAt.IsZero() { + convergedAt = time.Now() + } + wait := monitor - time.Since(convergedAt) + if wait >= 0 { + progressOut.WriteProgress(progress.Progress{ + // Ideally this would have no ID, but + // the progress rendering code behaves + // poorly on an "action" with no ID. It + // returns the cursor to the beginning + // of the line, so the first character + // may be difficult to read. Then the + // output is overwritten by the shell + // prompt when the command finishes. + ID: "verify", + Action: fmt.Sprintf("Waiting %d seconds to verify that tasks are stable...", wait/time.Second+1), + }) + } + } else { + if !convergedAt.IsZero() { + progressOut.WriteProgress(progress.Progress{ + ID: "verify", + Action: "Detected task failure", + }) + } + convergedAt = time.Time{} + } + + select { + case <-time.After(200 * time.Millisecond): + case <-sigint: + if !converged { + progress.Message(progressOut, "", "Operation continuing in background.") + progress.Messagef(progressOut, "", "Use `docker service ps %s` to check progress.", serviceID) + } + return nil + } + } +} + +func getActiveNodes(ctx context.Context, client client.APIClient) (map[string]struct{}, error) { + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return nil, err + } + + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + return activeNodes, nil +} + +func initializeUpdater(service swarm.Service, progressOut progress.Output) (progressUpdater, error) { + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + return &replicatedProgressUpdater{ + progressOut: progressOut, + }, nil + } + if service.Spec.Mode.Global != nil { + return &globalProgressUpdater{ + progressOut: progressOut, + }, nil + } + return nil, errors.New("unrecognized service mode") +} + +func writeOverallProgress(progressOut progress.Output, numerator, denominator int, rollback bool) { + if rollback { + progressOut.WriteProgress(progress.Progress{ + ID: "overall progress", + Action: fmt.Sprintf("rolling back update: %d out of %d tasks", numerator, denominator), + }) + return + } + progressOut.WriteProgress(progress.Progress{ + ID: "overall progress", + Action: fmt.Sprintf("%d out of %d tasks", numerator, denominator), + }) +} + +func truncError(errMsg string) string { + // Remove newlines from the error, which corrupt the output. + errMsg = strings.Replace(errMsg, "\n", " ", -1) + + // Limit the length to 75 characters, so that even on narrow terminals + // this will not overflow to the next line. + if len(errMsg) > 75 { + errMsg = errMsg[:74] + "…" + } + return errMsg +} + +type replicatedProgressUpdater struct { + progressOut progress.Output + + // used for mapping slots to a contiguous space + // this also causes progress bars to appear in order + slotMap map[int]int + + initialized bool + done bool +} + +func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { + if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil { + return false, errors.New("no replica count") + } + replicas := *service.Spec.Mode.Replicated.Replicas + + if !u.initialized { + u.slotMap = make(map[int]int) + + // Draw progress bars in order + writeOverallProgress(u.progressOut, 0, int(replicas), rollback) + + if replicas <= maxProgressBars { + for i := uint64(1); i <= replicas; i++ { + progress.Update(u.progressOut, fmt.Sprintf("%d/%d", i, replicas), " ") + } + } + u.initialized = true + } + + tasksBySlot := u.tasksBySlot(tasks, activeNodes) + + // If we had reached a converged state, check if we are still converged. + if u.done { + for _, task := range tasksBySlot { + if task.Status.State != swarm.TaskStateRunning { + u.done = false + break + } + } + } + + running := uint64(0) + + for _, task := range tasksBySlot { + mappedSlot := u.slotMap[task.Slot] + if mappedSlot == 0 { + mappedSlot = len(u.slotMap) + 1 + u.slotMap[task.Slot] = mappedSlot + } + + if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { + running++ + } + + u.writeTaskProgress(task, mappedSlot, replicas, rollback) + } + + if !u.done { + writeOverallProgress(u.progressOut, int(running), int(replicas), rollback) + + if running == replicas { + u.done = true + } + } + + return running == replicas, nil +} + +func (u *replicatedProgressUpdater) tasksBySlot(tasks []swarm.Task, activeNodes map[string]struct{}) map[int]swarm.Task { + // If there are multiple tasks with the same slot number, favor the one + // with the *lowest* desired state. This can happen in restart + // scenarios. + tasksBySlot := make(map[int]swarm.Task) + for _, task := range tasks { + if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { + continue + } + if existingTask, ok := tasksBySlot[task.Slot]; ok { + if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { + continue + } + // If the desired states match, observed state breaks + // ties. This can happen with the "start first" service + // update mode. + if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && + numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { + continue + } + } + if task.NodeID != "" { + if _, nodeActive := activeNodes[task.NodeID]; !nodeActive { + continue + } + } + tasksBySlot[task.Slot] = task + } + + return tasksBySlot +} + +func (u *replicatedProgressUpdater) writeTaskProgress(task swarm.Task, mappedSlot int, replicas uint64, rollback bool) { + if u.done || replicas > maxProgressBars || uint64(mappedSlot) > replicas { + return + } + + if task.Status.Err != "" { + u.progressOut.WriteProgress(progress.Progress{ + ID: fmt.Sprintf("%d/%d", mappedSlot, replicas), + Action: truncError(task.Status.Err), + }) + return + } + + if !terminalState(task.DesiredState) && !terminalState(task.Status.State) { + u.progressOut.WriteProgress(progress.Progress{ + ID: fmt.Sprintf("%d/%d", mappedSlot, replicas), + Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State), + Current: stateToProgress(task.Status.State, rollback), + Total: maxProgress, + HideCounts: true, + }) + } +} + +type globalProgressUpdater struct { + progressOut progress.Output + + initialized bool + done bool +} + +func (u *globalProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { + tasksByNode := u.tasksByNode(tasks) + + // We don't have perfect knowledge of how many nodes meet the + // constraints for this service. But the orchestrator creates tasks + // for all eligible nodes at the same time, so we should see all those + // nodes represented among the up-to-date tasks. + nodeCount := len(tasksByNode) + + if !u.initialized { + if nodeCount == 0 { + // Two possibilities: either the orchestrator hasn't created + // the tasks yet, or the service doesn't meet constraints for + // any node. Either way, we wait. + u.progressOut.WriteProgress(progress.Progress{ + ID: "overall progress", + Action: "waiting for new tasks", + }) + return false, nil + } + + writeOverallProgress(u.progressOut, 0, nodeCount, rollback) + u.initialized = true + } + + // If we had reached a converged state, check if we are still converged. + if u.done { + for _, task := range tasksByNode { + if task.Status.State != swarm.TaskStateRunning { + u.done = false + break + } + } + } + + running := 0 + + for _, task := range tasksByNode { + if _, nodeActive := activeNodes[task.NodeID]; nodeActive { + if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { + running++ + } + + u.writeTaskProgress(task, nodeCount, rollback) + } + } + + if !u.done { + writeOverallProgress(u.progressOut, running, nodeCount, rollback) + + if running == nodeCount { + u.done = true + } + } + + return running == nodeCount, nil +} + +func (u *globalProgressUpdater) tasksByNode(tasks []swarm.Task) map[string]swarm.Task { + // If there are multiple tasks with the same node ID, favor the one + // with the *lowest* desired state. This can happen in restart + // scenarios. + tasksByNode := make(map[string]swarm.Task) + for _, task := range tasks { + if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { + continue + } + if existingTask, ok := tasksByNode[task.NodeID]; ok { + if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { + continue + } + + // If the desired states match, observed state breaks + // ties. This can happen with the "start first" service + // update mode. + if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && + numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { + continue + } + + } + tasksByNode[task.NodeID] = task + } + + return tasksByNode +} + +func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int, rollback bool) { + if u.done || nodeCount > maxProgressBars { + return + } + + if task.Status.Err != "" { + u.progressOut.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(task.NodeID), + Action: truncError(task.Status.Err), + }) + return + } + + if !terminalState(task.DesiredState) && !terminalState(task.Status.State) { + u.progressOut.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(task.NodeID), + Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State), + Current: stateToProgress(task.Status.State, rollback), + Total: maxProgress, + HideCounts: true, + }) + } +} diff --git a/cli/cli/command/service/progress/progress_test.go b/cli/cli/command/service/progress/progress_test.go new file mode 100644 index 00000000..2a386d64 --- /dev/null +++ b/cli/cli/command/service/progress/progress_test.go @@ -0,0 +1,375 @@ +package progress + +import ( + "fmt" + "strconv" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/progress" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +type mockProgress struct { + p []progress.Progress +} + +func (mp *mockProgress) WriteProgress(p progress.Progress) error { + mp.p = append(mp.p, p) + return nil +} + +func (mp *mockProgress) clear() { + mp.p = nil +} + +type updaterTester struct { + t *testing.T + updater progressUpdater + p *mockProgress + service swarm.Service + activeNodes map[string]struct{} + rollback bool +} + +func (u updaterTester) testUpdater(tasks []swarm.Task, expectedConvergence bool, expectedProgress []progress.Progress) { + u.p.clear() + + converged, err := u.updater.update(u.service, tasks, u.activeNodes, u.rollback) + assert.Check(u.t, err) + assert.Check(u.t, is.Equal(expectedConvergence, converged)) + assert.Check(u.t, is.DeepEqual(expectedProgress, u.p.p)) +} + +func TestReplicatedProgressUpdaterOneReplica(t *testing.T) { + replicas := uint64(1) + + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &replicas, + }, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &replicatedProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{"a": {}, "b": {}}, + service: service, + } + + tasks := []swarm.Task{} + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "0 out of 1 tasks"}, + {ID: "1/1", Action: " "}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // Task with DesiredState beyond Running is ignored + tasks = append(tasks, + swarm.Task{ID: "1", + NodeID: "a", + DesiredState: swarm.TaskStateShutdown, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // Task with valid DesiredState and State updates progress bar + tasks[0].DesiredState = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "new ", Current: 1, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task exposes an error, we should show that instead of the + // progress bar. + tasks[0].Status.Err = "something is wrong" + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "something is wrong"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // When the task reaches running, update should return true + tasks[0].Status.Err = "" + tasks[0].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "1/1", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // If the task fails, update should return false again + tasks[0].Status.Err = "task failed" + tasks[0].Status.State = swarm.TaskStateFailed + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "task failed"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task is restarted, progress output should be shown for the + // replacement task, not the old task. + tasks[0].DesiredState = swarm.TaskStateShutdown + tasks = append(tasks, + swarm.Task{ID: "2", + NodeID: "b", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateRunning}, + }) + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "1/1", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // Add a new task while the current one is still running, to simulate + // "start-then-stop" updates. + tasks = append(tasks, + swarm.Task{ID: "3", + NodeID: "b", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStatePreparing}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "1/1", Action: "preparing", Current: 6, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) +} + +func TestReplicatedProgressUpdaterManyReplicas(t *testing.T) { + replicas := uint64(50) + + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &replicas, + }, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &replicatedProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{"a": {}, "b": {}}, + service: service, + } + + tasks := []swarm.Task{} + + // No per-task progress bars because there are too many replicas + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", replicas)}, + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", replicas)}, + }) + + for i := 0; i != int(replicas); i++ { + tasks = append(tasks, + swarm.Task{ + ID: strconv.Itoa(i), + Slot: i + 1, + NodeID: "a", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + + if i%2 == 1 { + tasks[i].NodeID = "b" + } + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i, replicas)}, + }) + + tasks[i].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, uint64(i) == replicas-1, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i+1, replicas)}, + }) + } +} + +func TestGlobalProgressUpdaterOneNode(t *testing.T) { + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &globalProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{"a": {}, "b": {}}, + service: service, + } + + tasks := []swarm.Task{} + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "waiting for new tasks"}, + }) + + // Task with DesiredState beyond Running is ignored + tasks = append(tasks, + swarm.Task{ID: "1", + NodeID: "a", + DesiredState: swarm.TaskStateShutdown, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "0 out of 1 tasks"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // Task with valid DesiredState and State updates progress bar + tasks[0].DesiredState = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "new ", Current: 1, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task exposes an error, we should show that instead of the + // progress bar. + tasks[0].Status.Err = "something is wrong" + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "something is wrong"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // When the task reaches running, update should return true + tasks[0].Status.Err = "" + tasks[0].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "a", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // If the task fails, update should return false again + tasks[0].Status.Err = "task failed" + tasks[0].Status.State = swarm.TaskStateFailed + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "task failed"}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) + + // If the task is restarted, progress output should be shown for the + // replacement task, not the old task. + tasks[0].DesiredState = swarm.TaskStateShutdown + tasks = append(tasks, + swarm.Task{ID: "2", + NodeID: "a", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateRunning}, + }) + updaterTester.testUpdater(tasks, true, + []progress.Progress{ + {ID: "a", Action: "running ", Current: 9, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "1 out of 1 tasks"}, + }) + + // Add a new task while the current one is still running, to simulate + // "start-then-stop" updates. + tasks = append(tasks, + swarm.Task{ID: "3", + NodeID: "a", + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStatePreparing}, + }) + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "a", Action: "preparing", Current: 6, Total: 9, HideCounts: true}, + {ID: "overall progress", Action: "0 out of 1 tasks"}, + }) +} + +func TestGlobalProgressUpdaterManyNodes(t *testing.T) { + nodes := 50 + + service := swarm.Service{ + Spec: swarm.ServiceSpec{ + Mode: swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + }, + }, + } + + p := &mockProgress{} + updaterTester := updaterTester{ + t: t, + updater: &globalProgressUpdater{ + progressOut: p, + }, + p: p, + activeNodes: map[string]struct{}{}, + service: service, + } + + for i := 0; i != nodes; i++ { + updaterTester.activeNodes[strconv.Itoa(i)] = struct{}{} + } + + tasks := []swarm.Task{} + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: "waiting for new tasks"}, + }) + + for i := 0; i != nodes; i++ { + tasks = append(tasks, + swarm.Task{ + ID: "task" + strconv.Itoa(i), + NodeID: strconv.Itoa(i), + DesiredState: swarm.TaskStateRunning, + Status: swarm.TaskStatus{State: swarm.TaskStateNew}, + }) + } + + updaterTester.testUpdater(tasks, false, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", nodes)}, + {ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", nodes)}, + }) + + for i := 0; i != nodes; i++ { + tasks[i].Status.State = swarm.TaskStateRunning + updaterTester.testUpdater(tasks, i == nodes-1, + []progress.Progress{ + {ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i+1, nodes)}, + }) + } +} diff --git a/cli/cli/command/service/ps.go b/cli/cli/command/service/ps.go new file mode 100644 index 00000000..d1304047 --- /dev/null +++ b/cli/cli/command/service/ps.go @@ -0,0 +1,155 @@ +package service + +import ( + "context" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/command/node" + "github.com/docker/cli/cli/command/task" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type psOptions struct { + services []string + quiet bool + noResolve bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newPsCommand(dockerCli command.Cli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] SERVICE [SERVICE...]", + Short: "List the tasks of one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.services = args + return runPS(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display task IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&options.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.StringVar(&options.format, "format", "", "Pretty-print tasks using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli command.Cli, options psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + filter, notfound, err := createFilter(ctx, client, options) + if err != nil { + return err + } + if err := updateNodeFilter(ctx, client, filter); err != nil { + return err + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + format = task.DefaultFormat(dockerCli.ConfigFile(), options.quiet) + } + if options.quiet { + options.noTrunc = true + } + if err := task.Print(ctx, dockerCli, tasks, idresolver.New(client, options.noResolve), !options.noTrunc, options.quiet, format); err != nil { + return err + } + if len(notfound) != 0 { + return errors.New(strings.Join(notfound, "\n")) + } + return nil +} + +func createFilter(ctx context.Context, client client.APIClient, options psOptions) (filters.Args, []string, error) { + filter := options.filter.Value() + + serviceIDFilter := filters.NewArgs() + serviceNameFilter := filters.NewArgs() + for _, service := range options.services { + serviceIDFilter.Add("id", service) + serviceNameFilter.Add("name", service) + } + serviceByIDList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter}) + if err != nil { + return filter, nil, err + } + serviceByNameList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter}) + if err != nil { + return filter, nil, err + } + + var notfound []string + serviceCount := 0 +loop: + // Match services by 1. Full ID, 2. Full name, 3. ID prefix. An error is returned if the ID-prefix match is ambiguous + for _, service := range options.services { + for _, s := range serviceByIDList { + if s.ID == service { + filter.Add("service", s.ID) + serviceCount++ + continue loop + } + } + for _, s := range serviceByNameList { + if s.Spec.Annotations.Name == service { + filter.Add("service", s.ID) + serviceCount++ + continue loop + } + } + found := false + for _, s := range serviceByIDList { + if strings.HasPrefix(s.ID, service) { + if found { + return filter, nil, errors.New("multiple services found with provided prefix: " + service) + } + filter.Add("service", s.ID) + serviceCount++ + found = true + } + } + if !found { + notfound = append(notfound, "no such service: "+service) + } + } + if serviceCount == 0 { + return filter, nil, errors.New(strings.Join(notfound, "\n")) + } + return filter, notfound, err +} + +func updateNodeFilter(ctx context.Context, client client.APIClient, filter filters.Args) error { + if filter.Contains("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + nodeReference, err := node.Reference(ctx, client, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", nodeReference) + } + } + return nil +} diff --git a/cli/cli/command/service/ps_test.go b/cli/cli/command/service/ps_test.go new file mode 100644 index 00000000..6459cfcc --- /dev/null +++ b/cli/cli/command/service/ps_test.go @@ -0,0 +1,135 @@ +package service + +import ( + "context" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCreateFilter(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + {ID: "idmatch"}, + {ID: "idprefixmatch"}, + newService("cccccccc", "namematch"), + newService("01010101", "notfoundprefix"), + }, nil + }, + } + + filter := opts.NewFilterOpt() + assert.NilError(t, filter.Set("node=somenode")) + options := psOptions{ + services: []string{"idmatch", "idprefix", "namematch", "notfound"}, + filter: filter, + } + + actual, notfound, err := createFilter(context.Background(), client, options) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(notfound, []string{"no such service: notfound"})) + + expected := filters.NewArgs( + filters.Arg("service", "idmatch"), + filters.Arg("service", "idprefixmatch"), + filters.Arg("service", "cccccccc"), + filters.Arg("node", "somenode"), + ) + assert.DeepEqual(t, expected, actual, cmpFilters) +} + +func TestCreateFilterWithAmbiguousIDPrefixError(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + {ID: "aaaone"}, + {ID: "aaatwo"}, + }, nil + }, + } + options := psOptions{ + services: []string{"aaa"}, + filter: opts.NewFilterOpt(), + } + _, _, err := createFilter(context.Background(), client, options) + assert.Error(t, err, "multiple services found with provided prefix: aaa") +} + +func TestCreateFilterNoneFound(t *testing.T) { + client := &fakeClient{} + options := psOptions{ + services: []string{"foo", "notfound"}, + filter: opts.NewFilterOpt(), + } + _, _, err := createFilter(context.Background(), client, options) + assert.Error(t, err, "no such service: foo\nno such service: notfound") +} + +func TestRunPSWarnsOnNotFound(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + {ID: "foo"}, + }, nil + }, + } + + cli := test.NewFakeCli(client) + options := psOptions{ + services: []string{"foo", "bar"}, + filter: opts.NewFilterOpt(), + format: "{{.ID}}", + } + err := runPS(cli, options) + assert.Error(t, err, "no such service: bar") +} + +func TestRunPSQuiet(t *testing.T) { + client := &fakeClient{ + serviceListFunc: func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{{ID: "foo"}}, nil + }, + taskListFunc: func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{{ID: "sxabyp0obqokwekpun4rjo0b3"}}, nil + }, + } + + cli := test.NewFakeCli(client) + err := runPS(cli, psOptions{services: []string{"foo"}, quiet: true, filter: opts.NewFilterOpt()}) + assert.NilError(t, err) + assert.Check(t, is.Equal("sxabyp0obqokwekpun4rjo0b3\n", cli.OutBuffer().String())) +} + +func TestUpdateNodeFilter(t *testing.T) { + selfNodeID := "foofoo" + filter := filters.NewArgs( + filters.Arg("node", "one"), + filters.Arg("node", "two"), + filters.Arg("node", "self"), + ) + + client := &fakeClient{ + infoFunc: func(_ context.Context) (types.Info, error) { + return types.Info{Swarm: swarm.Info{NodeID: selfNodeID}}, nil + }, + } + + updateNodeFilter(context.Background(), client, filter) + + expected := filters.NewArgs( + filters.Arg("node", "one"), + filters.Arg("node", "two"), + filters.Arg("node", selfNodeID), + ) + assert.DeepEqual(t, expected, filter, cmpFilters) +} + +var cmpFilters = cmp.AllowUnexported(filters.Args{}) diff --git a/cli/cli/command/service/remove.go b/cli/cli/command/service/remove.go new file mode 100644 index 00000000..ee810b03 --- /dev/null +++ b/cli/cli/command/service/remove.go @@ -0,0 +1,48 @@ +package service + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + + cmd := &cobra.Command{ + Use: "rm SERVICE [SERVICE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } + cmd.Flags() + + return cmd +} + +func runRemove(dockerCli command.Cli, sids []string) error { + client := dockerCli.Client() + + ctx := context.Background() + + var errs []string + for _, sid := range sids { + err := client.ServiceRemove(ctx, sid) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", sid) + } + if len(errs) > 0 { + return errors.Errorf(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/service/rollback.go b/cli/cli/command/service/rollback.go new file mode 100644 index 00000000..2196815c --- /dev/null +++ b/cli/cli/command/service/rollback.go @@ -0,0 +1,64 @@ +package service + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/spf13/cobra" +) + +func newRollbackCommand(dockerCli command.Cli) *cobra.Command { + options := newServiceOptions() + + cmd := &cobra.Command{ + Use: "rollback [OPTIONS] SERVICE", + Short: "Revert changes to a service's configuration", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRollback(dockerCli, options, args[0]) + }, + Annotations: map[string]string{"version": "1.31"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, flagQuiet, "q", false, "Suppress progress output") + addDetachFlag(flags, &options.detach) + + return cmd +} + +func runRollback(dockerCli command.Cli, options *serviceOptions, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + spec := &service.Spec + updateOpts := types.ServiceUpdateOptions{ + Rollback: "previous", + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + + if options.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { + return nil + } + + return waitOnService(ctx, dockerCli, serviceID, options.quiet) +} diff --git a/cli/cli/command/service/rollback_test.go b/cli/cli/command/service/rollback_test.go new file mode 100644 index 00000000..e61d1c20 --- /dev/null +++ b/cli/cli/command/service/rollback_test.go @@ -0,0 +1,104 @@ +package service + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRollback(t *testing.T) { + testCases := []struct { + name string + args []string + serviceUpdateFunc func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + expectedDockerCliErr string + }{ + { + name: "rollback-service", + args: []string{"service-id"}, + }, + { + name: "rollback-service-with-warnings", + args: []string{"service-id"}, + serviceUpdateFunc: func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + response := types.ServiceUpdateResponse{} + + response.Warnings = []string{ + "- warning 1", + "- warning 2", + } + + return response, nil + }, + expectedDockerCliErr: "- warning 1\n- warning 2", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + serviceUpdateFunc: tc.serviceUpdateFunc, + }) + cmd := newRollbackCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("quiet", "true") + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(strings.TrimSpace(cli.ErrBuffer().String()), tc.expectedDockerCliErr)) + } +} + +func TestRollbackWithErrors(t *testing.T) { + testCases := []struct { + name string + args []string + serviceInspectWithRawFunc func(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + serviceUpdateFunc func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"service-id-1", "service-id-2"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "service-does-not-exists", + args: []string{"service-id"}, + serviceInspectWithRawFunc: func(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return swarm.Service{}, []byte{}, fmt.Errorf("no such services: %s", serviceID) + }, + expectedError: "no such services: service-id", + }, + { + name: "service-update-failed", + args: []string{"service-id"}, + serviceUpdateFunc: func(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + return types.ServiceUpdateResponse{}, fmt.Errorf("no such services: %s", serviceID) + }, + expectedError: "no such services: service-id", + }, + } + + for _, tc := range testCases { + cmd := newRollbackCommand( + test.NewFakeCli(&fakeClient{ + serviceInspectWithRawFunc: tc.serviceInspectWithRawFunc, + serviceUpdateFunc: tc.serviceUpdateFunc, + })) + cmd.SetArgs(tc.args) + cmd.Flags().Set("quiet", "true") + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} diff --git a/cli/cli/command/service/scale.go b/cli/cli/command/service/scale.go new file mode 100644 index 00000000..5b656a7f --- /dev/null +++ b/cli/cli/command/service/scale.go @@ -0,0 +1,122 @@ +package service + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type scaleOptions struct { + detach bool +} + +func newScaleCommand(dockerCli command.Cli) *cobra.Command { + options := &scaleOptions{} + + cmd := &cobra.Command{ + Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", + Short: "Scale one or multiple replicated services", + Args: scaleArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runScale(dockerCli, options, args) + }, + } + + flags := cmd.Flags() + addDetachFlag(flags, &options.detach) + return cmd +} + +func scaleArgs(cmd *cobra.Command, args []string) error { + if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { + return err + } + for _, arg := range args { + if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { + return errors.Errorf( + "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", + arg, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } + } + return nil +} + +func runScale(dockerCli command.Cli, options *scaleOptions, args []string) error { + var errs []string + var serviceIDs []string + ctx := context.Background() + + for _, arg := range args { + parts := strings.SplitN(arg, "=", 2) + serviceID, scaleStr := parts[0], parts[1] + + // validate input arg scale number + scale, err := strconv.ParseUint(scaleStr, 10, 64) + if err != nil { + errs = append(errs, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) + continue + } + + if err := runServiceScale(ctx, dockerCli, serviceID, scale); err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", serviceID, err)) + } else { + serviceIDs = append(serviceIDs, serviceID) + } + + } + + if len(serviceIDs) > 0 { + if !options.detach && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.29") { + for _, serviceID := range serviceIDs { + if err := waitOnService(ctx, dockerCli, serviceID, false); err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", serviceID, err)) + } + } + } + } + + if len(errs) == 0 { + return nil + } + return errors.Errorf(strings.Join(errs, "\n")) +} + +func runServiceScale(ctx context.Context, dockerCli command.Cli, serviceID string, scale uint64) error { + client := dockerCli.Client() + + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + serviceMode := &service.Spec.Mode + if serviceMode.Replicated == nil { + return errors.Errorf("scale can only be used with replicated mode") + } + + serviceMode.Replicated.Replicas = &scale + + response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) + return nil +} diff --git a/cli/cli/command/service/testdata/service-context-write-raw.golden b/cli/cli/command/service/testdata/service-context-write-raw.golden new file mode 100644 index 00000000..d62b9a24 --- /dev/null +++ b/cli/cli/command/service/testdata/service-context-write-raw.golden @@ -0,0 +1,14 @@ +id: id_baz +name: baz +mode: global +replicas: 2/4 +image: +ports: *:80->8080/tcp + +id: id_bar +name: bar +mode: replicated +replicas: 2/4 +image: +ports: *:80->8080/tcp + diff --git a/cli/cli/command/service/testdata/service-list-sort.golden b/cli/cli/command/service/testdata/service-list-sort.golden new file mode 100644 index 00000000..3b0cb214 --- /dev/null +++ b/cli/cli/command/service/testdata/service-list-sort.golden @@ -0,0 +1,3 @@ +service-1-foo +service-2-foo +service-10-foo diff --git a/cli/cli/command/service/trust.go b/cli/cli/command/service/trust.go new file mode 100644 index 00000000..b7453ccb --- /dev/null +++ b/cli/cli/command/service/trust.go @@ -0,0 +1,87 @@ +package service + +import ( + "context" + "encoding/hex" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary/tuf/data" +) + +func resolveServiceImageDigestContentTrust(dockerCli command.Cli, service *swarm.ServiceSpec) error { + if !dockerCli.ContentTrustEnabled() { + // When not using content trust, digest resolution happens later when + // contacting the registry to retrieve image information. + return nil + } + + ref, err := reference.ParseAnyReference(service.TaskTemplate.ContainerSpec.Image) + if err != nil { + return errors.Wrapf(err, "invalid reference %s", service.TaskTemplate.ContainerSpec.Image) + } + + // If reference does not have digest (is not canonical nor image id) + if _, ok := ref.(reference.Digested); !ok { + namedRef, ok := ref.(reference.Named) + if !ok { + return errors.New("failed to resolve image digest using content trust: reference is not named") + } + namedRef = reference.TagNameOnly(namedRef) + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return errors.New("failed to resolve image digest using content trust: reference is not tagged") + } + + resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) + if err != nil { + return errors.Wrap(err, "failed to resolve image digest using content trust") + } + resolvedFamiliar := reference.FamiliarString(resolvedImage) + logrus.Debugf("resolved image tag to %s using content trust", resolvedFamiliar) + service.TaskTemplate.ContainerSpec.Image = resolvedFamiliar + } + + return nil +} + +func trustedResolveDigest(ctx context.Context, cli command.Cli, ref reference.NamedTagged) (reference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, err + } + + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli.In(), cli.Out(), command.UserAgent(), repoInfo, &authConfig, "pull") + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.Name.Name(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref))) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + h, ok := t.Hashes["sha256"] + if !ok { + return nil, errors.New("no valid hash, expecting sha256") + } + + dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) + + // Allow returning canonical reference with tag + return reference.WithDigest(ref, dgst) +} diff --git a/cli/cli/command/service/update.go b/cli/cli/command/service/update.go new file mode 100644 index 00000000..c95dcf50 --- /dev/null +++ b/cli/cli/command/service/update.go @@ -0,0 +1,1344 @@ +package service + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/swarmkit/api/defaults" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + options := newServiceOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] SERVICE", + Short: "Update a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), options, args[0]) + }, + } + + flags := cmd.Flags() + flags.String("image", "", "Service image tag") + flags.Var(&ShlexOpt{}, "args", "Service command args") + flags.Bool(flagRollback, false, "Rollback to previous specification") + flags.SetAnnotation(flagRollback, "version", []string{"1.25"}) + flags.Bool("force", false, "Force update even if no changes require it") + flags.SetAnnotation("force", "version", []string{"1.25"}) + addServiceFlags(flags, options, nil) + + flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") + flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") + flags.SetAnnotation(flagGroupRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") + flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") + flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") + // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") + flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") + flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") + flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") + flags.SetAnnotation(flagDNSRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") + flags.SetAnnotation(flagDNSOptionRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") + flags.SetAnnotation(flagDNSSearchRemove, "version", []string{"1.25"}) + flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") + flags.SetAnnotation(flagHostRemove, "version", []string{"1.25"}) + flags.Var(&options.labels, flagLabelAdd, "Add or update a service label") + flags.Var(&options.containerLabels, flagContainerLabelAdd, "Add or update a container label") + flags.Var(&options.env, flagEnvAdd, "Add or update an environment variable") + flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") + flags.SetAnnotation(flagSecretRemove, "version", []string{"1.25"}) + flags.Var(&options.secrets, flagSecretAdd, "Add or update a secret on a service") + flags.SetAnnotation(flagSecretAdd, "version", []string{"1.25"}) + + flags.Var(newListOptsVar(), flagConfigRemove, "Remove a configuration file") + flags.SetAnnotation(flagConfigRemove, "version", []string{"1.30"}) + flags.Var(&options.configs, flagConfigAdd, "Add or update a config file on a service") + flags.SetAnnotation(flagConfigAdd, "version", []string{"1.30"}) + + flags.Var(&options.mounts, flagMountAdd, "Add or update a mount on a service") + flags.Var(&options.constraints, flagConstraintAdd, "Add or update a placement constraint") + flags.Var(&options.placementPrefs, flagPlacementPrefAdd, "Add a placement preference") + flags.SetAnnotation(flagPlacementPrefAdd, "version", []string{"1.28"}) + flags.Var(&placementPrefOpts{}, flagPlacementPrefRemove, "Remove a placement preference") + flags.SetAnnotation(flagPlacementPrefRemove, "version", []string{"1.28"}) + flags.Var(&options.networks, flagNetworkAdd, "Add a network") + flags.SetAnnotation(flagNetworkAdd, "version", []string{"1.29"}) + flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network") + flags.SetAnnotation(flagNetworkRemove, "version", []string{"1.29"}) + flags.Var(&options.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") + flags.Var(&options.groups, flagGroupAdd, "Add an additional supplementary user group to the container") + flags.SetAnnotation(flagGroupAdd, "version", []string{"1.25"}) + flags.Var(&options.dns, flagDNSAdd, "Add or update a custom DNS server") + flags.SetAnnotation(flagDNSAdd, "version", []string{"1.25"}) + flags.Var(&options.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") + flags.SetAnnotation(flagDNSOptionAdd, "version", []string{"1.25"}) + flags.Var(&options.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") + flags.SetAnnotation(flagDNSSearchAdd, "version", []string{"1.25"}) + flags.Var(&options.hosts, flagHostAdd, "Add a custom host-to-IP mapping (host:ip)") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.25"}) + flags.BoolVar(&options.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes") + flags.SetAnnotation(flagInit, "version", []string{"1.37"}) + flags.Var(&options.sysctls, flagSysCtlAdd, "Add or update a Sysctl option") + flags.SetAnnotation(flagSysCtlAdd, "version", []string{"1.40"}) + flags.Var(newListOptsVar(), flagSysCtlRemove, "Remove a Sysctl option") + flags.SetAnnotation(flagSysCtlRemove, "version", []string{"1.40"}) + + // Add needs parsing, Remove only needs the key + flags.Var(newListOptsVar(), flagGenericResourcesRemove, "Remove a Generic resource") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) + flags.Var(newListOptsVarWithValidator(ValidateSingleGenericResource), flagGenericResourcesAdd, "Add a Generic resource") + flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"}) + + return cmd +} + +func newListOptsVar() *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, nil) +} + +func newListOptsVarWithValidator(validator opts.ValidatorFctType) *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, validator) +} + +// nolint: gocyclo +func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOptions, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) + if err != nil { + return err + } + + rollback, err := flags.GetBool(flagRollback) + if err != nil { + return err + } + + // There are two ways to do user-requested rollback. The old way is + // client-side, but with a sufficiently recent daemon we prefer + // server-side, because it will honor the rollback parameters. + var ( + clientSideRollback bool + serverSideRollback bool + ) + + spec := &service.Spec + if rollback { + // Rollback can't be combined with other flags. + otherFlagsPassed := false + flags.VisitAll(func(f *pflag.Flag) { + if f.Name == flagRollback || f.Name == flagDetach || f.Name == flagQuiet { + return + } + if flags.Changed(f.Name) { + otherFlagsPassed = true + } + }) + if otherFlagsPassed { + return errors.New("other flags may not be combined with --rollback") + } + + if versions.LessThan(apiClient.ClientVersion(), "1.28") { + clientSideRollback = true + spec = service.PreviousSpec + if spec == nil { + return errors.Errorf("service does not have a previous specification to roll back to") + } + } else { + serverSideRollback = true + } + } + + updateOpts := types.ServiceUpdateOptions{} + if serverSideRollback { + updateOpts.Rollback = "previous" + } + + err = updateService(ctx, apiClient, flags, spec) + if err != nil { + return err + } + + if flags.Changed("image") { + if err := resolveServiceImageDigestContentTrust(dockerCli, spec); err != nil { + return err + } + if !options.noResolveImage && versions.GreaterThanOrEqualTo(apiClient.ClientVersion(), "1.30") { + updateOpts.QueryRegistry = true + } + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets + + updatedConfigs, err := getUpdatedConfigs(apiClient, flags, spec.TaskTemplate.ContainerSpec) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Configs = updatedConfigs + + // set the credential spec value after get the updated configs, because we + // might need the updated configs to set the correct value of the + // CredentialSpec. + updateCredSpecConfig(flags, spec.TaskTemplate.ContainerSpec) + + // only send auth if flag was set + sendAuth, err := flags.GetBool(flagRegistryAuth) + if err != nil { + return err + } + if sendAuth { + // Retrieve encoded auth token from the image reference + // This would be the old image if it didn't change in this update + image := spec.TaskTemplate.ContainerSpec.Image + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + updateOpts.EncodedRegistryAuth = encodedAuth + } else if clientSideRollback { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec + } else { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + + if options.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") { + return nil + } + + return waitOnService(ctx, dockerCli, serviceID, options.quiet) +} + +// nolint: gocyclo +func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + updateBoolPtr := func(flag string, field **bool) { + if flags.Changed(flag) { + b, _ := flags.GetBool(flag) + *field = &b + } + } + updateString := func(flag string, field *string) { + if flags.Changed(flag) { + *field, _ = flags.GetString(flag) + } + } + + updateInt64Value := func(flag string, field *int64) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(int64Value).Value() + } + } + + updateFloatValue := func(flag string, field *float32) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(*floatValue).Value() + } + } + + updateDuration := func(flag string, field *time.Duration) { + if flags.Changed(flag) { + *field, _ = flags.GetDuration(flag) + } + } + + updateDurationOpt := func(flag string, field **time.Duration) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*opts.DurationOpt).Value() + *field = &val + } + } + + updateUint64 := func(flag string, field *uint64) { + if flags.Changed(flag) { + *field, _ = flags.GetUint64(flag) + } + } + + updateUint64Opt := func(flag string, field **uint64) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() + *field = &val + } + } + + updateIsolation := func(flag string, field *container.Isolation) error { + if flags.Changed(flag) { + val, _ := flags.GetString(flag) + *field = container.Isolation(val) + } + return nil + } + + cspec := spec.TaskTemplate.ContainerSpec + task := &spec.TaskTemplate + + taskResources := func() *swarm.ResourceRequirements { + if task.Resources == nil { + task.Resources = &swarm.ResourceRequirements{} + } + if task.Resources.Limits == nil { + task.Resources.Limits = &swarm.Resources{} + } + if task.Resources.Reservations == nil { + task.Resources.Reservations = &swarm.Resources{} + } + return task.Resources + } + + updateLabels(flags, &spec.Labels) + updateContainerLabels(flags, &cspec.Labels) + updateString("image", &cspec.Image) + updateStringToSlice(flags, "args", &cspec.Args) + updateStringToSlice(flags, flagEntrypoint, &cspec.Command) + updateEnvironment(flags, &cspec.Env) + updateString(flagWorkdir, &cspec.Dir) + updateString(flagUser, &cspec.User) + updateString(flagHostname, &cspec.Hostname) + updateBoolPtr(flagInit, &cspec.Init) + if err := updateIsolation(flagIsolation, &cspec.Isolation); err != nil { + return err + } + if err := updateMounts(flags, &cspec.Mounts); err != nil { + return err + } + + updateSysCtls(flags, &task.ContainerSpec.Sysctls) + + if anyChanged(flags, flagLimitCPU, flagLimitMemory) { + taskResources().Limits = spec.TaskTemplate.Resources.Limits + updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) + updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) + } + + if anyChanged(flags, flagReserveCPU, flagReserveMemory) { + taskResources().Reservations = spec.TaskTemplate.Resources.Reservations + updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) + updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) + } + + if err := addGenericResources(flags, task); err != nil { + return err + } + + if err := removeGenericResources(flags, task); err != nil { + return err + } + + updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) + + if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { + if task.RestartPolicy == nil { + task.RestartPolicy = defaultRestartPolicy() + } + if flags.Changed(flagRestartCondition) { + value, _ := flags.GetString(flagRestartCondition) + task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) + } + updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) + updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) + updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) + } + + if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacementConstraints(flags, task.Placement) + } + + if anyChanged(flags, flagPlacementPrefAdd, flagPlacementPrefRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacementPreferences(flags, task.Placement) + } + + if anyChanged(flags, flagNetworkAdd, flagNetworkRemove) { + if err := updateNetworks(ctx, apiClient, flags, spec); err != nil { + return err + } + } + + if err := updateReplicas(flags, &spec.Mode); err != nil { + return err + } + + if anyChanged(flags, flagMaxReplicas) { + updateUint64(flagMaxReplicas, &task.Placement.MaxReplicas) + } + + if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) { + if spec.UpdateConfig == nil { + spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update) + } + updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) + updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) + updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) + updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) + updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) + updateString(flagUpdateOrder, &spec.UpdateConfig.Order) + } + + if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) { + if spec.RollbackConfig == nil { + spec.RollbackConfig = updateConfigFromDefaults(defaults.Service.Rollback) + } + updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism) + updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay) + updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor) + updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction) + updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio) + updateString(flagRollbackOrder, &spec.RollbackConfig.Order) + } + + if flags.Changed(flagEndpointMode) { + value, _ := flags.GetString(flagEndpointMode) + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + spec.EndpointSpec.Mode = swarm.ResolutionMode(value) + } + + if anyChanged(flags, flagGroupAdd, flagGroupRemove) { + if err := updateGroups(flags, &cspec.Groups); err != nil { + return err + } + } + + if anyChanged(flags, flagPublishAdd, flagPublishRemove) { + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { + return err + } + } + + if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { + if cspec.DNSConfig == nil { + cspec.DNSConfig = &swarm.DNSConfig{} + } + if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { + return err + } + } + + if anyChanged(flags, flagHostAdd, flagHostRemove) { + if err := updateHosts(flags, &cspec.Hosts); err != nil { + return err + } + } + + if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { + return err + } + + force, err := flags.GetBool("force") + if err != nil { + return err + } + + if force { + spec.TaskTemplate.ForceUpdate++ + } + + if err := updateHealthcheck(flags, cspec); err != nil { + return err + } + + if flags.Changed(flagTTY) { + tty, err := flags.GetBool(flagTTY) + if err != nil { + return err + } + cspec.TTY = tty + } + + if flags.Changed(flagReadOnly) { + readOnly, err := flags.GetBool(flagReadOnly) + if err != nil { + return err + } + cspec.ReadOnly = readOnly + } + + updateString(flagStopSignal, &cspec.StopSignal) + + return nil +} + +func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) { + if !flags.Changed(flag) { + return + } + + *field = flags.Lookup(flag).Value.(*ShlexOpt).Value() +} + +func anyChanged(flags *pflag.FlagSet, fields ...string) bool { + for _, flag := range fields { + if flags.Changed(flag) { + return true + } + } + return false +} + +func addGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error { + if !flags.Changed(flagGenericResourcesAdd) { + return nil + } + + if spec.Resources == nil { + spec.Resources = &swarm.ResourceRequirements{} + } + + if spec.Resources.Reservations == nil { + spec.Resources.Reservations = &swarm.Resources{} + } + + values := flags.Lookup(flagGenericResourcesAdd).Value.(*opts.ListOpts).GetAll() + generic, err := ParseGenericResources(values) + if err != nil { + return err + } + + m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources) + if err != nil { + return err + } + + for _, toAddRes := range generic { + m[toAddRes.DiscreteResourceSpec.Kind] = toAddRes + } + + spec.Resources.Reservations.GenericResources = buildGenericResourceList(m) + + return nil +} + +func removeGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error { + // Can only be Discrete Resources + if !flags.Changed(flagGenericResourcesRemove) { + return nil + } + + if spec.Resources == nil { + spec.Resources = &swarm.ResourceRequirements{} + } + + if spec.Resources.Reservations == nil { + spec.Resources.Reservations = &swarm.Resources{} + } + + values := flags.Lookup(flagGenericResourcesRemove).Value.(*opts.ListOpts).GetAll() + + m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources) + if err != nil { + return err + } + + for _, toRemoveRes := range values { + if _, ok := m[toRemoveRes]; !ok { + return fmt.Errorf("could not find generic-resource `%s` to remove it", toRemoveRes) + } + + delete(m, toRemoveRes) + } + + spec.Resources.Reservations.GenericResources = buildGenericResourceList(m) + return nil +} + +func updatePlacementConstraints(flags *pflag.FlagSet, placement *swarm.Placement) { + if flags.Changed(flagConstraintAdd) { + values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() + placement.Constraints = append(placement.Constraints, values...) + } + toRemove := buildToRemoveSet(flags, flagConstraintRemove) + + newConstraints := []string{} + for _, constraint := range placement.Constraints { + if _, exists := toRemove[constraint]; !exists { + newConstraints = append(newConstraints, constraint) + } + } + // Sort so that result is predictable. + sort.Strings(newConstraints) + + placement.Constraints = newConstraints +} + +func updatePlacementPreferences(flags *pflag.FlagSet, placement *swarm.Placement) { + var newPrefs []swarm.PlacementPreference + + if flags.Changed(flagPlacementPrefRemove) { + for _, existing := range placement.Preferences { + removed := false + for _, removal := range flags.Lookup(flagPlacementPrefRemove).Value.(*placementPrefOpts).prefs { + if removal.Spread != nil && existing.Spread != nil && removal.Spread.SpreadDescriptor == existing.Spread.SpreadDescriptor { + removed = true + break + } + } + if !removed { + newPrefs = append(newPrefs, existing) + } + } + } else { + newPrefs = placement.Preferences + } + + if flags.Changed(flagPlacementPrefAdd) { + newPrefs = append(newPrefs, + flags.Lookup(flagPlacementPrefAdd).Value.(*placementPrefOpts).prefs...) + } + + placement.Preferences = newPrefs +} + +func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagContainerLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range opts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagContainerLabelRemove) { + toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range opts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagLabelRemove) { + toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateSysCtls(flags *pflag.FlagSet, field *map[string]string) { + if *field != nil && flags.Changed(flagSysCtlRemove) { + values := flags.Lookup(flagSysCtlRemove).Value.(*opts.ListOpts).GetAll() + for key := range opts.ConvertKVStringsToMap(values) { + delete(*field, key) + } + } + if flags.Changed(flagSysCtlAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagSysCtlAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range opts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } +} + +func updateEnvironment(flags *pflag.FlagSet, field *[]string) { + if flags.Changed(flagEnvAdd) { + envSet := map[string]string{} + for _, v := range *field { + envSet[envKey(v)] = v + } + + value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) + for _, v := range value.GetAll() { + envSet[envKey(v)] = v + } + + *field = []string{} + for _, v := range envSet { + *field = append(*field, v) + } + } + + toRemove := buildToRemoveSet(flags, flagEnvRemove) + *field = removeItems(*field, toRemove, envKey) +} + +func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { + newSecrets := []*swarm.SecretReference{} + + toRemove := buildToRemoveSet(flags, flagSecretRemove) + for _, secret := range secrets { + if _, exists := toRemove[secret.SecretName]; !exists { + newSecrets = append(newSecrets, secret) + } + } + + if flags.Changed(flagSecretAdd) { + values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() + + addSecrets, err := ParseSecrets(apiClient, values) + if err != nil { + return nil, err + } + newSecrets = append(newSecrets, addSecrets...) + } + + return newSecrets, nil +} + +func getUpdatedConfigs(apiClient client.ConfigAPIClient, flags *pflag.FlagSet, spec *swarm.ContainerSpec) ([]*swarm.ConfigReference, error) { + var ( + // credSpecConfigName stores the name of the config specified by the + // credential-spec flag. if a Runtime target Config with this name is + // already in the containerSpec, then this value will be set to + // emptystring in the removeConfigs stage. otherwise, a ConfigReference + // will be created to pass to ParseConfigs to get the ConfigID. + credSpecConfigName string + // credSpecConfigID stores the ID of the credential spec config if that + // config is being carried over from the old set of references + credSpecConfigID string + ) + + if flags.Changed(flagCredentialSpec) { + credSpec := flags.Lookup(flagCredentialSpec).Value.(*credentialSpecOpt).Value() + credSpecConfigName = credSpec.Config + } else { + // if the credential spec flag has not changed, then check if there + // already is a credentialSpec. if there is one, and it's for a Config, + // then it's from the old object, and its value is the config ID. we + // need this so we don't remove the config if the credential spec is + // not being updated. + if spec.Privileges != nil && spec.Privileges.CredentialSpec != nil { + if config := spec.Privileges.CredentialSpec.Config; config != "" { + credSpecConfigID = config + } + } + } + + newConfigs := removeConfigs(flags, spec, credSpecConfigName, credSpecConfigID) + + // resolveConfigs is a slice of any new configs that need to have the ID + // resolved + resolveConfigs := []*swarm.ConfigReference{} + + if flags.Changed(flagConfigAdd) { + resolveConfigs = append(resolveConfigs, flags.Lookup(flagConfigAdd).Value.(*opts.ConfigOpt).Value()...) + } + + // if credSpecConfigNameis non-empty at this point, it means its a new + // config, and we need to resolve its ID accordingly. + if credSpecConfigName != "" { + resolveConfigs = append(resolveConfigs, &swarm.ConfigReference{ + ConfigName: credSpecConfigName, + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }) + } + + if len(resolveConfigs) > 0 { + addConfigs, err := ParseConfigs(apiClient, resolveConfigs) + if err != nil { + return nil, err + } + newConfigs = append(newConfigs, addConfigs...) + } + + return newConfigs, nil +} + +// removeConfigs figures out which configs in the existing spec should be kept +// after the update. +func removeConfigs(flags *pflag.FlagSet, spec *swarm.ContainerSpec, credSpecName, credSpecID string) []*swarm.ConfigReference { + keepConfigs := []*swarm.ConfigReference{} + + toRemove := buildToRemoveSet(flags, flagConfigRemove) + // all configs in spec.Configs should have both a Name and ID, because + // they come from an already-accepted spec. + for _, config := range spec.Configs { + // if the config is a Runtime target, make sure it's still in use right + // now, the only use for Runtime target is credential specs. if, in + // the future, more uses are added, then this check will need to be + // made more intelligent. + if config.Runtime != nil { + // if we're carrying over a credential spec explicitly (because the + // user passed --credential-spec with the same config name) then we + // should match on credSpecName. if we're carrying over a + // credential spec implicitly (because the user did not pass any + // --credential-spec flag) then we should match on credSpecID. in + // either case, we're keeping the config that already exists. + if config.ConfigName == credSpecName || config.ConfigID == credSpecID { + keepConfigs = append(keepConfigs, config) + } + // continue the loop, to skip the part where we check if the config + // is in toRemove. + continue + } + + if _, exists := toRemove[config.ConfigName]; !exists { + keepConfigs = append(keepConfigs, config) + } + } + + return keepConfigs +} + +func envKey(value string) string { + kv := strings.SplitN(value, "=", 2) + return kv[0] +} + +func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { + var empty struct{} + toRemove := make(map[string]struct{}) + + if !flags.Changed(flag) { + return toRemove + } + + toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() + for _, key := range toRemoveSlice { + toRemove[key] = empty + } + return toRemove +} + +func removeItems( + seq []string, + toRemove map[string]struct{}, + keyFunc func(string) string, +) []string { + newSeq := []string{} + for _, item := range seq { + if _, exists := toRemove[keyFunc(item)]; !exists { + newSeq = append(newSeq, item) + } + } + return newSeq +} + +func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { + mountsByTarget := map[string]mounttypes.Mount{} + + if flags.Changed(flagMountAdd) { + values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() + for _, mount := range values { + if _, ok := mountsByTarget[mount.Target]; ok { + return errors.Errorf("duplicate mount target") + } + mountsByTarget[mount.Target] = mount + } + } + + // Add old list of mount points minus updated one. + for _, mount := range *mounts { + if _, ok := mountsByTarget[mount.Target]; !ok { + mountsByTarget[mount.Target] = mount + } + } + + newMounts := []mounttypes.Mount{} + + toRemove := buildToRemoveSet(flags, flagMountRemove) + + for _, mount := range mountsByTarget { + if _, exists := toRemove[mount.Target]; !exists { + newMounts = append(newMounts, mount) + } + } + sort.Slice(newMounts, func(i, j int) bool { + a, b := newMounts[i], newMounts[j] + + if a.Source == b.Source { + return a.Target < b.Target + } + + return a.Source < b.Source + }) + *mounts = newMounts + return nil +} + +func updateGroups(flags *pflag.FlagSet, groups *[]string) error { + if flags.Changed(flagGroupAdd) { + values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() + *groups = append(*groups, values...) + } + toRemove := buildToRemoveSet(flags, flagGroupRemove) + + newGroups := []string{} + for _, group := range *groups { + if _, exists := toRemove[group]; !exists { + newGroups = append(newGroups, group) + } + } + // Sort so that result is predictable. + sort.Strings(newGroups) + + *groups = newGroups + return nil +} + +func removeDuplicates(entries []string) []string { + hit := map[string]bool{} + newEntries := []string{} + for _, v := range entries { + if !hit[v] { + newEntries = append(newEntries, v) + hit[v] = true + } + } + return newEntries +} + +func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { + newConfig := &swarm.DNSConfig{} + + nameservers := (*config).Nameservers + if flags.Changed(flagDNSAdd) { + values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() + nameservers = append(nameservers, values...) + } + nameservers = removeDuplicates(nameservers) + toRemove := buildToRemoveSet(flags, flagDNSRemove) + for _, nameserver := range nameservers { + if _, exists := toRemove[nameserver]; !exists { + newConfig.Nameservers = append(newConfig.Nameservers, nameserver) + + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Nameservers) + + search := (*config).Search + if flags.Changed(flagDNSSearchAdd) { + values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() + search = append(search, values...) + } + search = removeDuplicates(search) + toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) + for _, entry := range search { + if _, exists := toRemove[entry]; !exists { + newConfig.Search = append(newConfig.Search, entry) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Search) + + options := (*config).Options + if flags.Changed(flagDNSOptionAdd) { + values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() + options = append(options, values...) + } + options = removeDuplicates(options) + toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) + for _, option := range options { + if _, exists := toRemove[option]; !exists { + newConfig.Options = append(newConfig.Options, option) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Options) + + *config = newConfig + return nil +} + +func portConfigToString(portConfig *swarm.PortConfig) string { + protocol := portConfig.Protocol + mode := portConfig.PublishMode + return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) +} + +func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { + // The key of the map is `port/protocol`, e.g., `80/tcp` + portSet := map[string]swarm.PortConfig{} + + // Build the current list of portConfig + for _, entry := range *portConfig { + if _, ok := portSet[portConfigToString(&entry)]; !ok { + portSet[portConfigToString(&entry)] = entry + } + } + + newPorts := []swarm.PortConfig{} + + // Clean current ports + toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() +portLoop: + for _, port := range portSet { + for _, pConfig := range toRemove { + if equalProtocol(port.Protocol, pConfig.Protocol) && + port.TargetPort == pConfig.TargetPort && + equalPublishMode(port.PublishMode, pConfig.PublishMode) { + continue portLoop + } + } + + newPorts = append(newPorts, port) + } + + // Check to see if there are any conflict in flags. + if flags.Changed(flagPublishAdd) { + ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() + + for _, port := range ports { + if _, ok := portSet[portConfigToString(&port)]; ok { + continue + } + //portSet[portConfigToString(&port)] = port + newPorts = append(newPorts, port) + } + } + + // Sort the PortConfig to avoid unnecessary updates + sort.Slice(newPorts, func(i, j int) bool { + // We convert PortConfig into `port/protocol`, e.g., `80/tcp` + // In updatePorts we already filter out with map so there is duplicate entries + return portConfigToString(&newPorts[i]) < portConfigToString(&newPorts[j]) + }) + *portConfig = newPorts + return nil +} + +func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { + return prot1 == prot2 || + (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || + (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) +} + +func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { + return mode1 == mode2 || + (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || + (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) +} + +func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { + if !flags.Changed(flagReplicas) { + return nil + } + + if serviceMode == nil || serviceMode.Replicated == nil { + return errors.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() + return nil +} + +type hostMapping struct { + IPAddr string + Host string +} + +// updateHosts performs a diff between existing host entries, entries to be +// removed, and entries to be added. Host entries preserve the order in which they +// were added, as the specification mentions that in case multiple entries for a +// host exist, the first entry should be used (by default). +// +// Note that, even though unsupported by the the CLI, the service specs format +// allow entries with both a _canonical_ hostname, and one or more aliases +// in an entry (IP-address canonical_hostname [alias ...]) +// +// Entries can be removed by either a specific `:` mapping, +// or by `` alone: +// +// - If both IP-address and host-name is provided, the hostname is removed only +// from entries that match the given IP-address. +// - If only a host-name is provided, the hostname is removed from any entry it +// is part of (either as canonical host-name, or as alias). +// - If, after removing the host-name from an entry, no host-names remain in +// the entry, the entry itself is removed. +// +// For example, the list of host-entries before processing could look like this: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host1 host2 host4", +// "127.0.0.1 host1 host4", +// "127.0.0.3 host1", +// "127.0.0.1 host1", +// } +// +// Removing `host1` removes every occurrence: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host2 host4", +// "127.0.0.1 host4", +// } +// +// Removing `host1:127.0.0.1` on the other hand, only remove the host if the +// IP-address matches: +// +// hosts = &[]string{ +// "127.0.0.2 host3 host1 host2 host4", +// "127.0.0.1 host4", +// "127.0.0.3 host1", +// } +func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { + var toRemove []hostMapping + if flags.Changed(flagHostRemove) { + extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() + for _, entry := range extraHostsToRemove { + v := strings.SplitN(entry, ":", 2) + if len(v) > 1 { + toRemove = append(toRemove, hostMapping{IPAddr: v[1], Host: v[0]}) + } else { + toRemove = append(toRemove, hostMapping{Host: v[0]}) + } + } + } + + var newHosts []string + for _, entry := range *hosts { + // Since this is in SwarmKit format, we need to find the key, which is canonical_hostname of: + // IP_address canonical_hostname [aliases...] + parts := strings.Fields(entry) + if len(parts) == 0 { + continue + } + ip := parts[0] + hostNames := parts[1:] + for _, rm := range toRemove { + if rm.IPAddr != "" && rm.IPAddr != ip { + continue + } + for i, h := range hostNames { + if h == rm.Host { + hostNames = append(hostNames[:i], hostNames[i+1:]...) + } + } + } + if len(hostNames) > 0 { + newHosts = append(newHosts, fmt.Sprintf("%s %s", ip, strings.Join(hostNames, " "))) + } + } + + // Append new hosts (in SwarmKit format) + if flags.Changed(flagHostAdd) { + values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) + newHosts = append(newHosts, values...) + } + *hosts = removeDuplicates(newHosts) + return nil +} + +// updateLogDriver updates the log driver only if the log driver flag is set. +// All options will be replaced with those provided on the command line. +func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { + if !flags.Changed(flagLogDriver) { + return nil + } + + name, err := flags.GetString(flagLogDriver) + if err != nil { + return err + } + + if name == "" { + return nil + } + + taskTemplate.LogDriver = &swarm.Driver{ + Name: name, + Options: opts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), + } + + return nil +} + +func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { + if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { + return nil + } + if containerSpec.Healthcheck == nil { + containerSpec.Healthcheck = &container.HealthConfig{} + } + noHealthcheck, err := flags.GetBool(flagNoHealthcheck) + if err != nil { + return err + } + if noHealthcheck { + if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) { + containerSpec.Healthcheck = &container.HealthConfig{ + Test: []string{"NONE"}, + } + return nil + } + return errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { + containerSpec.Healthcheck.Test = nil + } + if flags.Changed(flagHealthInterval) { + val := *flags.Lookup(flagHealthInterval).Value.(*opts.PositiveDurationOpt).Value() + containerSpec.Healthcheck.Interval = val + } + if flags.Changed(flagHealthTimeout) { + val := *flags.Lookup(flagHealthTimeout).Value.(*opts.PositiveDurationOpt).Value() + containerSpec.Healthcheck.Timeout = val + } + if flags.Changed(flagHealthStartPeriod) { + val := *flags.Lookup(flagHealthStartPeriod).Value.(*opts.PositiveDurationOpt).Value() + containerSpec.Healthcheck.StartPeriod = val + } + if flags.Changed(flagHealthRetries) { + containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) + } + if flags.Changed(flagHealthCmd) { + cmd, _ := flags.GetString(flagHealthCmd) + if cmd != "" { + containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} + } else { + containerSpec.Healthcheck.Test = nil + } + } + return nil +} + +func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + // spec.TaskTemplate.Networks takes precedence over the deprecated + // spec.Networks field. If spec.Network is in use, we'll migrate those + // values to spec.TaskTemplate.Networks. + specNetworks := spec.TaskTemplate.Networks + if len(specNetworks) == 0 { + specNetworks = spec.Networks + } + spec.Networks = nil + + toRemove := buildToRemoveSet(flags, flagNetworkRemove) + idsToRemove := make(map[string]struct{}) + for networkIDOrName := range toRemove { + network, err := apiClient.NetworkInspect(ctx, networkIDOrName, types.NetworkInspectOptions{Scope: "swarm"}) + if err != nil { + return err + } + idsToRemove[network.ID] = struct{}{} + } + + existingNetworks := make(map[string]struct{}) + var newNetworks []swarm.NetworkAttachmentConfig + for _, network := range specNetworks { + if _, exists := idsToRemove[network.Target]; exists { + continue + } + + newNetworks = append(newNetworks, network) + existingNetworks[network.Target] = struct{}{} + } + + if flags.Changed(flagNetworkAdd) { + values := flags.Lookup(flagNetworkAdd).Value.(*opts.NetworkOpt) + networks := convertNetworks(*values) + for _, network := range networks { + nwID, err := resolveNetworkID(ctx, apiClient, network.Target) + if err != nil { + return err + } + if _, exists := existingNetworks[nwID]; exists { + return errors.Errorf("service is already attached to network %s", network.Target) + } + network.Target = nwID + newNetworks = append(newNetworks, network) + existingNetworks[network.Target] = struct{}{} + } + } + + sort.Slice(newNetworks, func(i, j int) bool { + return newNetworks[i].Target < newNetworks[j].Target + }) + + spec.TaskTemplate.Networks = newNetworks + return nil +} + +// updateCredSpecConfig updates the value of the credential spec Config field +// to the config ID if the credential spec has changed. it mutates the passed +// spec. it does not handle the case where the credential spec specifies a +// config that does not exist -- that case is handled as part of +// getUpdatedConfigs +func updateCredSpecConfig(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) { + if flags.Changed(flagCredentialSpec) { + credSpecOpt := flags.Lookup(flagCredentialSpec) + // if the flag has changed, and the value is empty string, then we + // should remove any credential spec that might be present + if credSpecOpt.Value.String() == "" { + if containerSpec.Privileges != nil { + containerSpec.Privileges.CredentialSpec = nil + } + return + } + + // otherwise, set the credential spec to be the parsed value + credSpec := credSpecOpt.Value.(*credentialSpecOpt).Value() + + // if this is a Config credential spec, we we still need to replace the + // value of credSpec.Config with the config ID instead of Name. + if credSpec.Config != "" { + for _, config := range containerSpec.Configs { + // if the config name matches, then set the config ID. we do + // not need to worry about if this is a Runtime target or not. + // even if it is not a Runtime target, getUpdatedConfigs + // ensures that a Runtime target for this config exists, and + // the Name is unique so the ID is correct no matter the + // target. + if config.ConfigName == credSpec.Config { + credSpec.Config = config.ConfigID + break + } + } + } + + if containerSpec.Privileges == nil { + containerSpec.Privileges = &swarm.Privileges{} + } + + containerSpec.Privileges.CredentialSpec = credSpec + } +} diff --git a/cli/cli/command/service/update_test.go b/cli/cli/command/service/update_test.go new file mode 100644 index 00000000..6eeea55a --- /dev/null +++ b/cli/cli/command/service/update_test.go @@ -0,0 +1,1250 @@ +package service + +import ( + "context" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestUpdateServiceArgs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("args", "the \"new args\"") + + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + cspec.Args = []string{"old", "args"} + + updateService(nil, nil, flags, spec) + assert.Check(t, is.DeepEqual([]string{"the", "new args"}, cspec.Args)) +} + +func TestUpdateLabels(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-add", "toadd=newlabel") + flags.Set("label-rm", "toremove") + + labels := map[string]string{ + "toremove": "thelabeltoremove", + "tokeep": "value", + } + + updateLabels(flags, &labels) + assert.Check(t, is.Len(labels, 2)) + assert.Check(t, is.Equal("value", labels["tokeep"])) + assert.Check(t, is.Equal("newlabel", labels["toadd"])) +} + +func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-rm", "dne") + + labels := map[string]string{"foo": "theoldlabel"} + updateLabels(flags, &labels) + assert.Check(t, is.Len(labels, 1)) +} + +func TestUpdatePlacementConstraints(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("constraint-add", "node=toadd") + flags.Set("constraint-rm", "node!=toremove") + + placement := &swarm.Placement{ + Constraints: []string{"node!=toremove", "container=tokeep"}, + } + + updatePlacementConstraints(flags, placement) + assert.Assert(t, is.Len(placement.Constraints, 2)) + assert.Check(t, is.Equal("container=tokeep", placement.Constraints[0])) + assert.Check(t, is.Equal("node=toadd", placement.Constraints[1])) +} + +func TestUpdatePlacementPrefs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("placement-pref-add", "spread=node.labels.dc") + flags.Set("placement-pref-rm", "spread=node.labels.rack") + + placement := &swarm.Placement{ + Preferences: []swarm.PlacementPreference{ + { + Spread: &swarm.SpreadOver{ + SpreadDescriptor: "node.labels.rack", + }, + }, + { + Spread: &swarm.SpreadOver{ + SpreadDescriptor: "node.labels.row", + }, + }, + }, + } + + updatePlacementPreferences(flags, placement) + assert.Assert(t, is.Len(placement.Preferences, 2)) + assert.Check(t, is.Equal("node.labels.row", placement.Preferences[0].Spread.SpreadDescriptor)) + assert.Check(t, is.Equal("node.labels.dc", placement.Preferences[1].Spread.SpreadDescriptor)) +} + +func TestUpdateEnvironment(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "toadd=newenv") + flags.Set("env-rm", "toremove") + + envs := []string{"toremove=theenvtoremove", "tokeep=value"} + + updateEnvironment(flags, &envs) + assert.Assert(t, is.Len(envs, 2)) + // Order has been removed in updateEnvironment (map) + sort.Strings(envs) + assert.Check(t, is.Equal("toadd=newenv", envs[0])) + assert.Check(t, is.Equal("tokeep=value", envs[1])) +} + +func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "foo=newenv") + flags.Set("env-add", "foo=dupe") + flags.Set("env-rm", "foo") + + envs := []string{"foo=value"} + + updateEnvironment(flags, &envs) + assert.Check(t, is.Len(envs, 0)) +} + +func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { + // Test case for #25404 + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "A=b") + + envs := []string{"A=c"} + + updateEnvironment(flags, &envs) + assert.Assert(t, is.Len(envs, 1)) + assert.Check(t, is.Equal("A=b", envs[0])) +} + +func TestUpdateGroups(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("group-add", "wheel") + flags.Set("group-add", "docker") + flags.Set("group-rm", "root") + flags.Set("group-add", "foo") + flags.Set("group-rm", "docker") + + groups := []string{"bar", "root"} + + updateGroups(flags, &groups) + assert.Assert(t, is.Len(groups, 3)) + assert.Check(t, is.Equal("bar", groups[0])) + assert.Check(t, is.Equal("foo", groups[1])) + assert.Check(t, is.Equal("wheel", groups[2])) +} + +func TestUpdateDNSConfig(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + + // IPv4, with duplicates + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "2.2.2.2") + flags.Set("dns-rm", "3.3.3.3") + flags.Set("dns-rm", "2.2.2.2") + // IPv6 + flags.Set("dns-add", "2001:db8:abc8::1") + // Invalid dns record + assert.ErrorContains(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") + + // domains with duplicates + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.org") + flags.Set("dns-search-rm", "example.org") + // Invalid dns search domain + assert.ErrorContains(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") + + flags.Set("dns-option-add", "ndots:9") + flags.Set("dns-option-rm", "timeout:3") + + config := &swarm.DNSConfig{ + Nameservers: []string{"3.3.3.3", "5.5.5.5"}, + Search: []string{"localdomain"}, + Options: []string{"timeout:3"}, + } + + updateDNSConfig(flags, &config) + + assert.Assert(t, is.Len(config.Nameservers, 3)) + assert.Check(t, is.Equal("1.1.1.1", config.Nameservers[0])) + assert.Check(t, is.Equal("2001:db8:abc8::1", config.Nameservers[1])) + assert.Check(t, is.Equal("5.5.5.5", config.Nameservers[2])) + + assert.Assert(t, is.Len(config.Search, 2)) + assert.Check(t, is.Equal("example.com", config.Search[0])) + assert.Check(t, is.Equal("localdomain", config.Search[1])) + + assert.Assert(t, is.Len(config.Options, 1)) + assert.Check(t, is.Equal(config.Options[0], "ndots:9")) +} + +func TestUpdateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") + flags.Set("mount-rm", "/toremove") + + mounts := []mounttypes.Mount{ + {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Assert(t, is.Len(mounts, 2)) + assert.Check(t, is.Equal("/toadd", mounts[0].Target)) + assert.Check(t, is.Equal("/tokeep", mounts[1].Target)) +} + +func TestUpdateMountsWithDuplicateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") + + mounts := []mounttypes.Mount{ + {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, + {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Assert(t, is.Len(mounts, 3)) + assert.Check(t, is.Equal("/tokeep1", mounts[0].Target)) + assert.Check(t, is.Equal("/tokeep2", mounts[1].Target)) + assert.Check(t, is.Equal("/toadd", mounts[2].Target)) +} + +func TestUpdatePorts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "1000:1000") + flags.Set("publish-rm", "333/udp") + + portConfigs := []swarm.PortConfig{ + {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, + {TargetPort: 555}, + } + + err := updatePorts(flags, &portConfigs) + assert.NilError(t, err) + assert.Assert(t, is.Len(portConfigs, 2)) + // Do a sort to have the order (might have changed by map) + targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} + sort.Ints(targetPorts) + assert.Check(t, is.Equal(555, targetPorts[0])) + assert.Check(t, is.Equal(1000, targetPorts[1])) +} + +func TestUpdatePortsDuplicate(t *testing.T) { + // Test case for #25375 + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "80:80") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.NilError(t, err) + assert.Assert(t, is.Len(portConfigs, 1)) + assert.Check(t, is.Equal(uint32(80), portConfigs[0].TargetPort)) +} + +func TestUpdateHealthcheckTable(t *testing.T) { + type test struct { + flags [][2]string + initial *container.HealthConfig + expected *container.HealthConfig + err string + } + testCases := []test{ + { + flags: [][2]string{{"no-healthcheck", "true"}}, + initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"NONE"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + }, + { + flags: [][2]string{{"health-interval", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", ""}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "0"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + }, + { + flags: [][2]string{{"health-start-period", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, StartPeriod: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + } + for i, c := range testCases { + flags := newUpdateCommand(nil).Flags() + for _, flag := range c.flags { + flags.Set(flag[0], flag[1]) + } + cspec := &swarm.ContainerSpec{ + Healthcheck: c.initial, + } + err := updateHealthcheck(flags, cspec) + if c.err != "" { + assert.Error(t, err, c.err) + } else { + assert.NilError(t, err) + if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { + t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) + } + } + } +} + +func TestUpdateHosts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "example.net:2.2.2.2") + flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") + // remove with ipv6 should work + flags.Set("host-rm", "example.net:2001:db8:abc8::1") + // just hostname should work as well + flags.Set("host-rm", "example.net") + // bad format error + assert.ErrorContains(t, flags.Set("host-add", "$example.com$"), `bad format for add-host: "$example.com$"`) + + hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} + expected := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2.2.2.2 example.net", "2001:db8:abc8::1 ipv6.net"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, hosts)) +} + +func TestUpdateHostsPreservesOrder(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "foobar:127.0.0.2") + flags.Set("host-add", "foobar:127.0.0.1") + flags.Set("host-add", "foobar:127.0.0.3") + + hosts := []string{} + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 foobar", "127.0.0.1 foobar", "127.0.0.3 foobar"}, hosts)) +} + +func TestUpdateHostsReplaceEntry(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "foobar:127.0.0.4") + flags.Set("host-rm", "foobar:127.0.0.2") + + hosts := []string{"127.0.0.2 foobar", "127.0.0.1 foobar", "127.0.0.3 foobar"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.1 foobar", "127.0.0.3 foobar", "127.0.0.4 foobar"}, hosts)) +} + +func TestUpdateHostsRemoveHost(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-rm", "host1") + + hosts := []string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host1 host4", "127.0.0.3 host1"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + + // Removing host `host1` should remove the entry from each line it appears in. + // If there are no other hosts in the entry, the entry itself should be removed. + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host3 host2 host4", "127.0.0.1 host4"}, hosts)) +} + +func TestUpdateHostsRemoveHostIP(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-rm", "host1:127.0.0.1") + + hosts := []string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host1 host4", "127.0.0.3 host1", "127.0.0.1 host1"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + + // Removing host `host1` should remove the entry from each line it appears in, + // but only if the IP-address matches. If there are no other hosts in the entry, + // the entry itself should be removed. + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host4", "127.0.0.3 host1"}, hosts)) +} + +func TestUpdateHostsRemoveAll(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "host-three:127.0.0.4") + flags.Set("host-add", "host-one:127.0.0.5") + flags.Set("host-rm", "host-one") + + hosts := []string{"127.0.0.1 host-one", "127.0.0.2 host-two", "127.0.0.3 host-one"} + + err := updateHosts(flags, &hosts) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host-two", "127.0.0.4 host-three", "127.0.0.5 host-one"}, hosts)) +} + +func TestUpdatePortsRmWithProtocol(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "8081:81") + flags.Set("publish-add", "8082:82") + flags.Set("publish-rm", "80") + flags.Set("publish-rm", "81/tcp") + flags.Set("publish-rm", "82/udp") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.NilError(t, err) + assert.Assert(t, is.Len(portConfigs, 2)) + assert.Check(t, is.Equal(uint32(81), portConfigs[0].TargetPort)) + assert.Check(t, is.Equal(uint32(82), portConfigs[1].TargetPort)) +} + +type secretAPIClientMock struct { + listResult []swarm.Secret +} + +func (s secretAPIClientMock) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + return s.listResult, nil +} +func (s secretAPIClientMock) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + return types.SecretCreateResponse{}, nil +} +func (s secretAPIClientMock) SecretRemove(ctx context.Context, id string) error { + return nil +} +func (s secretAPIClientMock) SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) { + return swarm.Secret{}, []byte{}, nil +} +func (s secretAPIClientMock) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + return nil +} + +// TestUpdateSecretUpdateInPlace tests the ability to update the "target" of an secret with "docker service update" +// by combining "--secret-rm" and "--secret-add" for the same secret. +func TestUpdateSecretUpdateInPlace(t *testing.T) { + apiClient := secretAPIClientMock{ + listResult: []swarm.Secret{ + { + ID: "tn9qiblgnuuut11eufquw5dev", + Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo"}}, + }, + }, + } + + flags := newUpdateCommand(nil).Flags() + flags.Set("secret-add", "source=foo,target=foo2") + flags.Set("secret-rm", "foo") + + secrets := []*swarm.SecretReference{ + { + File: &swarm.SecretReferenceFileTarget{ + Name: "foo", + UID: "0", + GID: "0", + Mode: 292, + }, + SecretID: "tn9qiblgnuuut11eufquw5dev", + SecretName: "foo", + }, + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, secrets) + + assert.NilError(t, err) + assert.Assert(t, is.Len(updatedSecrets, 1)) + assert.Check(t, is.Equal("tn9qiblgnuuut11eufquw5dev", updatedSecrets[0].SecretID)) + assert.Check(t, is.Equal("foo", updatedSecrets[0].SecretName)) + assert.Check(t, is.Equal("foo2", updatedSecrets[0].File.Name)) +} + +func TestUpdateReadOnly(t *testing.T) { + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + + // Update with --read-only=true, changed to true + flags := newUpdateCommand(nil).Flags() + flags.Set("read-only", "true") + updateService(nil, nil, flags, spec) + assert.Check(t, cspec.ReadOnly) + + // Update without --read-only, no change + flags = newUpdateCommand(nil).Flags() + updateService(nil, nil, flags, spec) + assert.Check(t, cspec.ReadOnly) + + // Update with --read-only=false, changed to false + flags = newUpdateCommand(nil).Flags() + flags.Set("read-only", "false") + updateService(nil, nil, flags, spec) + assert.Check(t, !cspec.ReadOnly) +} + +func TestUpdateInit(t *testing.T) { + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + + // Update with --init=true + flags := newUpdateCommand(nil).Flags() + flags.Set("init", "true") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal(true, *cspec.Init)) + + // Update without --init, no change + flags = newUpdateCommand(nil).Flags() + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal(true, *cspec.Init)) + + // Update with --init=false + flags = newUpdateCommand(nil).Flags() + flags.Set("init", "false") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal(false, *cspec.Init)) +} + +func TestUpdateStopSignal(t *testing.T) { + spec := &swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + cspec := spec.TaskTemplate.ContainerSpec + + // Update with --stop-signal=SIGUSR1 + flags := newUpdateCommand(nil).Flags() + flags.Set("stop-signal", "SIGUSR1") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal("SIGUSR1", cspec.StopSignal)) + + // Update without --stop-signal, no change + flags = newUpdateCommand(nil).Flags() + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal("SIGUSR1", cspec.StopSignal)) + + // Update with --stop-signal=SIGWINCH + flags = newUpdateCommand(nil).Flags() + flags.Set("stop-signal", "SIGWINCH") + updateService(nil, nil, flags, spec) + assert.Check(t, is.Equal("SIGWINCH", cspec.StopSignal)) +} + +func TestUpdateIsolationValid(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + err := flags.Set("isolation", "process") + assert.NilError(t, err) + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(container.IsolationProcess, spec.TaskTemplate.ContainerSpec.Isolation)) +} + +// TestUpdateLimitsReservations tests that limits and reservations are updated, +// and that values are not updated are not reset to their default value +func TestUpdateLimitsReservations(t *testing.T) { + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + + // test that updating works if the service did not previously + // have limits set (https://github.com/moby/moby/issues/38363) + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagLimitMemory, "200M") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + + spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + + // test that updating works if the service did not previously + // have reservations set (https://github.com/moby/moby/issues/38363) + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagReserveCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagReserveMemory, "200M") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + + spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + Resources: &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: 1000000000, + MemoryBytes: 104857600, + }, + Reservations: &swarm.Resources{ + NanoCPUs: 1000000000, + MemoryBytes: 104857600, + }, + }, + }, + } + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagLimitCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagReserveCPU, "2") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600))) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagLimitMemory, "200M") + assert.NilError(t, err) + err = flags.Set(flagReserveMemory, "200M") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200))) +} + +func TestUpdateIsolationInvalid(t *testing.T) { + // validation depends on daemon os / version so validation should be done on the daemon side + flags := newUpdateCommand(nil).Flags() + err := flags.Set("isolation", "test") + assert.NilError(t, err) + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(container.Isolation("test"), spec.TaskTemplate.ContainerSpec.Isolation)) +} + +func TestAddGenericResources(t *testing.T) { + task := &swarm.TaskSpec{} + flags := newUpdateCommand(nil).Flags() + + assert.Check(t, addGenericResources(flags, task)) + + flags.Set(flagGenericResourcesAdd, "foo=1") + assert.Check(t, addGenericResources(flags, task)) + assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 1)) + + // Checks that foo isn't added a 2nd time + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesAdd, "bar=1") + assert.Check(t, addGenericResources(flags, task)) + assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 2)) +} + +func TestRemoveGenericResources(t *testing.T) { + task := &swarm.TaskSpec{} + flags := newUpdateCommand(nil).Flags() + + assert.Check(t, removeGenericResources(flags, task)) + + flags.Set(flagGenericResourcesRemove, "foo") + assert.Check(t, is.ErrorContains(removeGenericResources(flags, task), "")) + + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesAdd, "foo=1") + addGenericResources(flags, task) + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesAdd, "bar=1") + addGenericResources(flags, task) + + flags = newUpdateCommand(nil).Flags() + flags.Set(flagGenericResourcesRemove, "foo") + assert.Check(t, removeGenericResources(flags, task)) + assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 1)) +} + +func TestUpdateNetworks(t *testing.T) { + ctx := context.Background() + nws := []types.NetworkResource{ + {Name: "aaa-network", ID: "id555"}, + {Name: "mmm-network", ID: "id999"}, + {Name: "zzz-network", ID: "id111"}, + } + + client := &fakeClient{ + networkInspectFunc: func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + for _, network := range nws { + if network.ID == networkID || network.Name == networkID { + return network, nil + } + } + return types.NetworkResource{}, fmt.Errorf("network not found: %s", networkID) + }, + } + + svc := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + Networks: []swarm.NetworkAttachmentConfig{ + {Target: "id999"}, + }, + }, + } + + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagNetworkAdd, "aaa-network") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkAdd, "aaa-network") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.Error(t, err, "service is already attached to network aaa-network") + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkAdd, "id555") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.Error(t, err, "service is already attached to network id555") + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkRemove, "id999") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}}, svc.TaskTemplate.Networks)) + + flags = newUpdateCommand(nil).Flags() + err = flags.Set(flagNetworkAdd, "mmm-network") + assert.NilError(t, err) + err = flags.Set(flagNetworkRemove, "aaa-network") + assert.NilError(t, err) + err = updateService(ctx, client, flags, &svc) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id999"}}, svc.TaskTemplate.Networks)) +} + +func TestUpdateMaxReplicas(t *testing.T) { + ctx := context.Background() + + svc := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + Placement: &swarm.Placement{ + MaxReplicas: 1, + }, + }, + } + + flags := newUpdateCommand(nil).Flags() + flags.Set(flagMaxReplicas, "2") + err := updateService(ctx, nil, flags, &svc) + assert.NilError(t, err) + + assert.DeepEqual(t, svc.TaskTemplate.Placement, &swarm.Placement{MaxReplicas: uint64(2)}) +} + +func TestUpdateSysCtls(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + spec map[string]string + add []string + rm []string + expected map[string]string + }{ + { + name: "from scratch", + add: []string{"sysctl.zet=value-99", "sysctl.alpha=value-1"}, + expected: map[string]string{"sysctl.zet": "value-99", "sysctl.alpha": "value-1"}, + }, + { + name: "append new", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + add: []string{"new.sysctl=newvalue"}, + expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2", "new.sysctl": "newvalue"}, + }, + { + name: "append duplicate is a no-op", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + add: []string{"sysctl.one=value-1"}, + expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + }, + { + name: "remove and append existing is a no-op", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + add: []string{"sysctl.one=value-1"}, + rm: []string{"sysctl.one=value-1"}, + expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + }, + { + name: "remove and append new should append", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + add: []string{"new.sysctl=newvalue"}, + rm: []string{"new.sysctl=newvalue"}, + expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2", "new.sysctl": "newvalue"}, + }, + { + name: "update existing", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + add: []string{"sysctl.one=newvalue"}, + expected: map[string]string{"sysctl.one": "newvalue", "sysctl.two": "value-2"}, + }, + { + name: "update existing twice", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + add: []string{"sysctl.one=newvalue", "sysctl.one=evennewervalue"}, + expected: map[string]string{"sysctl.one": "evennewervalue", "sysctl.two": "value-2"}, + }, + { + name: "remove all", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + rm: []string{"sysctl.one=value-1", "sysctl.two=value-2"}, + expected: map[string]string{}, + }, + { + name: "remove by key", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + rm: []string{"sysctl.one"}, + expected: map[string]string{"sysctl.two": "value-2"}, + }, + { + name: "remove by key and different value", + spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"}, + rm: []string{"sysctl.one=anyvalueyoulike"}, + expected: map[string]string{"sysctl.two": "value-2"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + svc := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{Sysctls: tc.spec}, + }, + } + flags := newUpdateCommand(nil).Flags() + for _, v := range tc.add { + assert.NilError(t, flags.Set(flagSysCtlAdd, v)) + } + for _, v := range tc.rm { + assert.NilError(t, flags.Set(flagSysCtlRemove, v)) + } + err := updateService(ctx, &fakeClient{}, flags, &svc) + assert.NilError(t, err) + if !assert.Check(t, is.DeepEqual(svc.TaskTemplate.ContainerSpec.Sysctls, tc.expected)) { + t.Logf("expected: %v", tc.expected) + t.Logf("actual: %v", svc.TaskTemplate.ContainerSpec.Sysctls) + } + }) + } +} + +func TestUpdateGetUpdatedConfigs(t *testing.T) { + // cannedConfigs is a set of configs that we'll use over and over in the + // tests. it's a map of Name to Config + cannedConfigs := map[string]*swarm.Config{ + "bar": { + ID: "barID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "bar", + }, + }, + }, + "cred": { + ID: "credID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "cred", + }, + }, + }, + "newCred": { + ID: "newCredID", + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "newCred", + }, + }, + }, + } + // cannedConfigRefs is the same thing, but with config references instead + // instead of ID, however, it just maps an arbitrary string value. this is + // so we could have multiple config refs using the same config + cannedConfigRefs := map[string]*swarm.ConfigReference{ + "fooRef": { + ConfigID: "fooID", + ConfigName: "foo", + File: &swarm.ConfigReferenceFileTarget{ + Name: "foo", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + "barRef": { + ConfigID: "barID", + ConfigName: "bar", + File: &swarm.ConfigReferenceFileTarget{ + Name: "bar", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + "bazRef": { + ConfigID: "bazID", + ConfigName: "baz", + File: &swarm.ConfigReferenceFileTarget{ + Name: "baz", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + "credRef": { + ConfigID: "credID", + ConfigName: "cred", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }, + "newCredRef": { + ConfigID: "newCredID", + ConfigName: "newCred", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }, + } + + type flagVal [2]string + type test struct { + // the name of the subtest + name string + // flags are the flags we'll be setting + flags []flagVal + // oldConfigs are the configs that would already be on the service + // it is a slice of strings corresponding to the the key of + // cannedConfigRefs + oldConfigs []string + // oldCredSpec is the credentialSpec being carried over from the old + // object + oldCredSpec *swarm.CredentialSpec + // lookupConfigs are the configs we're expecting to be listed. it is a + // slice of strings corresponding to the key of cannedConfigs + lookupConfigs []string + // expected is the configs we should get as a result. it is a slice of + // strings corresponding to the key in cannedConfigRefs + expected []string + } + + testCases := []test{ + { + name: "no configs added or removed", + oldConfigs: []string{"fooRef"}, + expected: []string{"fooRef"}, + }, { + name: "add a config", + flags: []flagVal{{"config-add", "bar"}}, + oldConfigs: []string{"fooRef"}, + lookupConfigs: []string{"bar"}, + expected: []string{"fooRef", "barRef"}, + }, { + name: "remove a config", + flags: []flagVal{{"config-rm", "bar"}}, + oldConfigs: []string{"fooRef", "barRef"}, + expected: []string{"fooRef"}, + }, { + name: "include an old credential spec", + oldConfigs: []string{"credRef"}, + oldCredSpec: &swarm.CredentialSpec{Config: "credID"}, + expected: []string{"credRef"}, + }, { + name: "add a credential spec", + oldConfigs: []string{"fooRef"}, + flags: []flagVal{{"credential-spec", "config://cred"}}, + lookupConfigs: []string{"cred"}, + expected: []string{"fooRef", "credRef"}, + }, { + name: "change a credential spec", + oldConfigs: []string{"fooRef", "credRef"}, + oldCredSpec: &swarm.CredentialSpec{Config: "credID"}, + flags: []flagVal{{"credential-spec", "config://newCred"}}, + lookupConfigs: []string{"newCred"}, + expected: []string{"fooRef", "newCredRef"}, + }, { + name: "credential spec no longer config", + oldConfigs: []string{"fooRef", "credRef"}, + oldCredSpec: &swarm.CredentialSpec{Config: "credID"}, + flags: []flagVal{{"credential-spec", "file://someFile"}}, + lookupConfigs: []string{}, + expected: []string{"fooRef"}, + }, { + name: "credential spec becomes config", + oldConfigs: []string{"fooRef"}, + oldCredSpec: &swarm.CredentialSpec{File: "someFile"}, + flags: []flagVal{{"credential-spec", "config://cred"}}, + lookupConfigs: []string{"cred"}, + expected: []string{"fooRef", "credRef"}, + }, { + name: "remove credential spec", + oldConfigs: []string{"fooRef", "credRef"}, + oldCredSpec: &swarm.CredentialSpec{Config: "credID"}, + flags: []flagVal{{"credential-spec", ""}}, + lookupConfigs: []string{}, + expected: []string{"fooRef"}, + }, { + name: "just frick my stuff up", + // a more complicated test. add barRef, remove bazRef, keep fooRef, + // change credentialSpec from credRef to newCredRef + oldConfigs: []string{"fooRef", "bazRef", "credRef"}, + oldCredSpec: &swarm.CredentialSpec{Config: "cred"}, + flags: []flagVal{ + {"config-add", "bar"}, + {"config-rm", "baz"}, + {"credential-spec", "config://newCred"}, + }, + lookupConfigs: []string{"bar", "newCred"}, + expected: []string{"fooRef", "barRef", "newCredRef"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + for _, f := range tc.flags { + flags.Set(f[0], f[1]) + } + + // fakeConfigAPIClientList is actually defined in create_test.go, + // but we'll use it here as well + var fakeClient fakeConfigAPIClientList = func(_ context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) { + names := opts.Filters.Get("name") + assert.Equal(t, len(names), len(tc.lookupConfigs)) + + configs := []swarm.Config{} + for _, lookup := range tc.lookupConfigs { + assert.Assert(t, is.Contains(names, lookup)) + cfg, ok := cannedConfigs[lookup] + assert.Assert(t, ok) + configs = append(configs, *cfg) + } + return configs, nil + } + + // build the actual set of old configs and the container spec + oldConfigs := []*swarm.ConfigReference{} + for _, config := range tc.oldConfigs { + cfg, ok := cannedConfigRefs[config] + assert.Assert(t, ok) + oldConfigs = append(oldConfigs, cfg) + } + + containerSpec := &swarm.ContainerSpec{ + Configs: oldConfigs, + Privileges: &swarm.Privileges{ + CredentialSpec: tc.oldCredSpec, + }, + } + + finalConfigs, err := getUpdatedConfigs(fakeClient, flags, containerSpec) + assert.NilError(t, err) + + // ensure that the finalConfigs consists of all of the expected + // configs + assert.Equal(t, len(finalConfigs), len(tc.expected), + "%v final configs, %v expected", + len(finalConfigs), len(tc.expected), + ) + for _, expected := range tc.expected { + assert.Assert(t, is.Contains(finalConfigs, cannedConfigRefs[expected])) + } + }) + } +} + +func TestUpdateCredSpec(t *testing.T) { + type testCase struct { + // name is the name of the subtest + name string + // flagVal is the value we're setting flagCredentialSpec to + flagVal string + // spec is the existing serviceSpec with its configs + spec *swarm.ContainerSpec + // expected is the expected value of the credential spec after the + // function. it may be nil + expected *swarm.CredentialSpec + } + + testCases := []testCase{ + { + name: "add file credential spec", + flagVal: "file://somefile", + spec: &swarm.ContainerSpec{}, + expected: &swarm.CredentialSpec{File: "somefile"}, + }, { + name: "remove a file credential spec", + flagVal: "", + spec: &swarm.ContainerSpec{ + Privileges: &swarm.Privileges{ + CredentialSpec: &swarm.CredentialSpec{ + File: "someFile", + }, + }, + }, + expected: nil, + }, { + name: "remove when no CredentialSpec exists", + flagVal: "", + spec: &swarm.ContainerSpec{}, + expected: nil, + }, { + name: "add a config credenital spec", + flagVal: "config://someConfigName", + spec: &swarm.ContainerSpec{ + Configs: []*swarm.ConfigReference{ + { + ConfigName: "someConfigName", + ConfigID: "someConfigID", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }, + }, + }, + expected: &swarm.CredentialSpec{ + Config: "someConfigID", + }, + }, { + name: "remove a config credential spec", + flagVal: "", + spec: &swarm.ContainerSpec{ + Privileges: &swarm.Privileges{ + CredentialSpec: &swarm.CredentialSpec{ + Config: "someConfigID", + }, + }, + }, + expected: nil, + }, { + name: "update a config credential spec", + flagVal: "config://someConfigName", + spec: &swarm.ContainerSpec{ + Configs: []*swarm.ConfigReference{ + { + ConfigName: "someConfigName", + ConfigID: "someConfigID", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }, + }, + Privileges: &swarm.Privileges{ + CredentialSpec: &swarm.CredentialSpec{ + Config: "someDifferentConfigID", + }, + }, + }, + expected: &swarm.CredentialSpec{ + Config: "someConfigID", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set(flagCredentialSpec, tc.flagVal) + + updateCredSpecConfig(flags, tc.spec) + // handle the case where tc.spec.Privileges is nil + if tc.expected == nil { + assert.Assert(t, tc.spec.Privileges == nil || tc.spec.Privileges.CredentialSpec == nil) + return + } + + assert.Assert(t, tc.spec.Privileges != nil) + assert.DeepEqual(t, tc.spec.Privileges.CredentialSpec, tc.expected) + }) + } +} diff --git a/cli/cli/command/stack/client_test.go b/cli/cli/command/stack/client_test.go new file mode 100644 index 00000000..a4f95cf3 --- /dev/null +++ b/cli/cli/command/stack/client_test.go @@ -0,0 +1,250 @@ +package stack + +import ( + "context" + "strings" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + + version string + + services []string + networks []string + secrets []string + configs []string + + removedServices []string + removedNetworks []string + removedSecrets []string + removedConfigs []string + + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + networkListFunc func(options types.NetworkListOptions) ([]types.NetworkResource, error) + secretListFunc func(options types.SecretListOptions) ([]swarm.Secret, error) + configListFunc func(options types.ConfigListOptions) ([]swarm.Config, error) + nodeListFunc func(options types.NodeListOptions) ([]swarm.Node, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + nodeInspectWithRaw func(ref string) (swarm.Node, []byte, error) + + serviceUpdateFunc func(serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + + serviceRemoveFunc func(serviceID string) error + networkRemoveFunc func(networkID string) error + secretRemoveFunc func(secretID string) error + configRemoveFunc func(configID string) error +} + +func (cli *fakeClient) ServerVersion(ctx context.Context) (types.Version, error) { + return types.Version{ + Version: "docker-dev", + APIVersion: api.DefaultVersion, + }, nil +} + +func (cli *fakeClient) ClientVersion() string { + return cli.version +} + +func (cli *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + if cli.serviceListFunc != nil { + return cli.serviceListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + servicesList := []swarm.Service{} + for _, name := range cli.services { + if belongToNamespace(name, namespace) { + servicesList = append(servicesList, serviceFromName(name)) + } + } + return servicesList, nil +} + +func (cli *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + if cli.networkListFunc != nil { + return cli.networkListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + networksList := []types.NetworkResource{} + for _, name := range cli.networks { + if belongToNamespace(name, namespace) { + networksList = append(networksList, networkFromName(name)) + } + } + return networksList, nil +} + +func (cli *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if cli.secretListFunc != nil { + return cli.secretListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + secretsList := []swarm.Secret{} + for _, name := range cli.secrets { + if belongToNamespace(name, namespace) { + secretsList = append(secretsList, secretFromName(name)) + } + } + return secretsList, nil +} + +func (cli *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if cli.configListFunc != nil { + return cli.configListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + configsList := []swarm.Config{} + for _, name := range cli.configs { + if belongToNamespace(name, namespace) { + configsList = append(configsList, configFromName(name)) + } + } + return configsList, nil +} + +func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if cli.taskListFunc != nil { + return cli.taskListFunc(options) + } + return []swarm.Task{}, nil +} + +func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + if cli.nodeListFunc != nil { + return cli.nodeListFunc(options) + } + return []swarm.Node{}, nil +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectWithRaw != nil { + return cli.nodeInspectWithRaw(ref) + } + return swarm.Node{}, nil, nil +} + +func (cli *fakeClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + if cli.serviceUpdateFunc != nil { + return cli.serviceUpdateFunc(serviceID, version, service, options) + } + + return types.ServiceUpdateResponse{}, nil +} + +func (cli *fakeClient) ServiceRemove(ctx context.Context, serviceID string) error { + if cli.serviceRemoveFunc != nil { + return cli.serviceRemoveFunc(serviceID) + } + + cli.removedServices = append(cli.removedServices, serviceID) + return nil +} + +func (cli *fakeClient) NetworkRemove(ctx context.Context, networkID string) error { + if cli.networkRemoveFunc != nil { + return cli.networkRemoveFunc(networkID) + } + + cli.removedNetworks = append(cli.removedNetworks, networkID) + return nil +} + +func (cli *fakeClient) SecretRemove(ctx context.Context, secretID string) error { + if cli.secretRemoveFunc != nil { + return cli.secretRemoveFunc(secretID) + } + + cli.removedSecrets = append(cli.removedSecrets, secretID) + return nil +} + +func (cli *fakeClient) ConfigRemove(ctx context.Context, configID string) error { + if cli.configRemoveFunc != nil { + return cli.configRemoveFunc(configID) + } + + cli.removedConfigs = append(cli.removedConfigs, configID) + return nil +} + +func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return swarm.Service{ + ID: serviceID, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: serviceID, + }, + }, + }, []byte{}, nil +} + +func serviceFromName(name string) swarm.Service { + return swarm.Service{ + ID: "ID-" + name, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func networkFromName(name string) types.NetworkResource { + return types.NetworkResource{ + ID: "ID-" + name, + Name: name, + } +} + +func secretFromName(name string) swarm.Secret { + return swarm.Secret{ + ID: "ID-" + name, + Spec: swarm.SecretSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func configFromName(name string) swarm.Config { + return swarm.Config{ + ID: "ID-" + name, + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func namespaceFromFilters(filters filters.Args) string { + label := filters.Get("label")[0] + return strings.TrimPrefix(label, convert.LabelNamespace+"=") +} + +func belongToNamespace(id, namespace string) bool { + return strings.HasPrefix(id, namespace+"_") +} + +func objectName(namespace, name string) string { + return namespace + "_" + name +} + +func objectID(name string) string { + return "ID-" + name +} + +func buildObjectIDs(objectNames []string) []string { + IDs := make([]string, len(objectNames)) + for i, name := range objectNames { + IDs[i] = objectID(name) + } + return IDs +} diff --git a/cli/cli/command/stack/cmd.go b/cli/cli/command/stack/cmd.go new file mode 100644 index 00000000..080732f5 --- /dev/null +++ b/cli/cli/command/stack/cmd.go @@ -0,0 +1,132 @@ +package stack + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var errUnsupportedAllOrchestrator = fmt.Errorf(`no orchestrator specified: use either "kubernetes" or "swarm"`) + +type commonOptions struct { + orchestrator command.Orchestrator +} + +func (o *commonOptions) Orchestrator() command.Orchestrator { + if o == nil { + return command.OrchestratorSwarm + } + return o.orchestrator +} + +// NewStackCommand returns a cobra command for `stack` subcommands +func NewStackCommand(dockerCli command.Cli) *cobra.Command { + var opts commonOptions + cmd := &cobra.Command{ + Use: "stack [OPTIONS]", + Short: "Manage Docker stacks", + Args: cli.NoArgs, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + orchestrator, err := getOrchestrator(dockerCli, cmd) + if err != nil { + return err + } + opts.orchestrator = orchestrator + hideOrchestrationFlags(cmd, orchestrator) + return checkSupportedFlag(cmd, orchestrator) + }, + + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.25", + }, + } + defaultHelpFunc := cmd.HelpFunc() + cmd.SetHelpFunc(func(c *cobra.Command, args []string) { + if err := cmd.Root().PersistentPreRunE(c, args); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return + } + if err := cmd.PersistentPreRunE(c, args); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return + } + hideOrchestrationFlags(c, opts.orchestrator) + defaultHelpFunc(c, args) + }) + cmd.AddCommand( + newDeployCommand(dockerCli, &opts), + newListCommand(dockerCli, &opts), + newPsCommand(dockerCli, &opts), + newRemoveCommand(dockerCli, &opts), + newServicesCommand(dockerCli, &opts), + ) + flags := cmd.PersistentFlags() + flags.String("kubeconfig", "", "Kubernetes config file") + flags.SetAnnotation("kubeconfig", "kubernetes", nil) + flags.String("orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)") + return cmd +} + +// NewTopLevelDeployCommand returns a command for `docker deploy` +func NewTopLevelDeployCommand(dockerCli command.Cli) *cobra.Command { + cmd := newDeployCommand(dockerCli, nil) + // Remove the aliases at the top level + cmd.Aliases = []string{} + cmd.Annotations = map[string]string{ + "experimental": "", + "version": "1.25", + } + return cmd +} + +func getOrchestrator(dockerCli command.Cli, cmd *cobra.Command) (command.Orchestrator, error) { + var orchestratorFlag string + if o, err := cmd.Flags().GetString("orchestrator"); err == nil { + orchestratorFlag = o + } + return dockerCli.StackOrchestrator(orchestratorFlag) +} + +func hideOrchestrationFlags(cmd *cobra.Command, orchestrator command.Orchestrator) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() { + f.Hidden = true + } + if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() { + f.Hidden = true + } + }) + for _, subcmd := range cmd.Commands() { + hideOrchestrationFlags(subcmd, orchestrator) + } +} + +func checkSupportedFlag(cmd *cobra.Command, orchestrator command.Orchestrator) error { + errs := []string{} + cmd.Flags().VisitAll(func(f *pflag.Flag) { + if !f.Changed { + return + } + if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() { + errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with kubernetes features enabled`, f.Name)) + } + if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() { + errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with swarm features enabled`, f.Name)) + } + }) + for _, subcmd := range cmd.Commands() { + if err := checkSupportedFlag(subcmd, orchestrator); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/cli/cli/command/stack/common.go b/cli/cli/command/stack/common.go new file mode 100644 index 00000000..2a89cfad --- /dev/null +++ b/cli/cli/command/stack/common.go @@ -0,0 +1,50 @@ +package stack + +import ( + "fmt" + "strings" + "unicode" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/spf13/pflag" +) + +// validateStackName checks if the provided string is a valid stack name (namespace). +// It currently only does a rudimentary check if the string is empty, or consists +// of only whitespace and quoting characters. +func validateStackName(namespace string) error { + v := strings.TrimFunc(namespace, quotesOrWhitespace) + if v == "" { + return fmt.Errorf("invalid stack name: %q", namespace) + } + return nil +} + +func validateStackNames(namespaces []string) error { + for _, ns := range namespaces { + if err := validateStackName(ns); err != nil { + return err + } + } + return nil +} + +func quotesOrWhitespace(r rune) bool { + return unicode.IsSpace(r) || r == '"' || r == '\'' +} + +func runOrchestratedCommand(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, swarmCmd func() error, kubernetesCmd func(*kubernetes.KubeCli) error) error { + switch { + case commonOrchestrator.HasAll(): + return errUnsupportedAllOrchestrator + case commonOrchestrator.HasKubernetes(): + kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(flags, commonOrchestrator)) + if err != nil { + return err + } + return kubernetesCmd(kli) + default: + return swarmCmd() + } +} diff --git a/cli/cli/command/stack/deploy.go b/cli/cli/command/stack/deploy.go new file mode 100644 index 00000000..80075727 --- /dev/null +++ b/cli/cli/command/stack/deploy.go @@ -0,0 +1,81 @@ +package stack + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/loader" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + var opts options.Deploy + + cmd := &cobra.Command{ + Use: "deploy [OPTIONS] STACK", + Aliases: []string{"up"}, + Short: "Deploy a new stack or update an existing stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + + commonOrchestrator := command.OrchestratorSwarm // default for top-level deploy command + if common != nil { + commonOrchestrator = common.orchestrator + } + + switch { + case opts.Bundlefile == "" && len(opts.Composefiles) == 0: + return errors.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") + case opts.Bundlefile != "" && len(opts.Composefiles) != 0: + return errors.Errorf("You cannot specify both a bundle file and a Compose file.") + case opts.Bundlefile != "": + if commonOrchestrator != command.OrchestratorSwarm { + return errors.Errorf("bundle files are not supported on another orchestrator than swarm.") + } + return swarm.DeployBundle(context.Background(), dockerCli, opts) + } + + config, err := loader.LoadComposefile(dockerCli, opts) + if err != nil { + return err + } + return RunDeploy(dockerCli, cmd.Flags(), config, common.Orchestrator(), opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Bundlefile, "bundle-file", "", "Path to a Distributed Application Bundle file") + flags.SetAnnotation("bundle-file", "experimental", nil) + flags.SetAnnotation("bundle-file", "swarm", nil) + flags.StringSliceVarP(&opts.Composefiles, "compose-file", "c", []string{}, `Path to a Compose file, or "-" to read from stdin`) + flags.SetAnnotation("compose-file", "version", []string{"1.25"}) + flags.BoolVar(&opts.SendRegistryAuth, "with-registry-auth", false, "Send registry authentication details to Swarm agents") + flags.SetAnnotation("with-registry-auth", "swarm", nil) + flags.BoolVar(&opts.Prune, "prune", false, "Prune services that are no longer referenced") + flags.SetAnnotation("prune", "version", []string{"1.27"}) + flags.SetAnnotation("prune", "swarm", nil) + flags.StringVar(&opts.ResolveImage, "resolve-image", swarm.ResolveImageAlways, + `Query the registry to resolve image digest and supported platforms ("`+swarm.ResolveImageAlways+`"|"`+swarm.ResolveImageChanged+`"|"`+swarm.ResolveImageNever+`")`) + flags.SetAnnotation("resolve-image", "version", []string{"1.30"}) + flags.SetAnnotation("resolve-image", "swarm", nil) + kubernetes.AddNamespaceFlag(flags) + return cmd +} + +// RunDeploy performs a stack deploy against the specified orchestrator +func RunDeploy(dockerCli command.Cli, flags *pflag.FlagSet, config *composetypes.Config, commonOrchestrator command.Orchestrator, opts options.Deploy) error { + return runOrchestratedCommand(dockerCli, flags, commonOrchestrator, + func() error { return swarm.RunDeploy(dockerCli, opts, config) }, + func(kli *kubernetes.KubeCli) error { return kubernetes.RunDeploy(kli, opts, config) }) +} diff --git a/cli/cli/command/stack/deploy_test.go b/cli/cli/command/stack/deploy_test.go new file mode 100644 index 00000000..89dbc6e1 --- /dev/null +++ b/cli/cli/command/stack/deploy_test.go @@ -0,0 +1,17 @@ +package stack + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" +) + +func TestDeployWithEmptyName(t *testing.T) { + cmd := newDeployCommand(test.NewFakeCli(&fakeClient{}), nil) + cmd.SetArgs([]string{"' '"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} diff --git a/cli/cli/command/stack/formatter/formatter.go b/cli/cli/command/stack/formatter/formatter.go new file mode 100644 index 00000000..7f1cb82d --- /dev/null +++ b/cli/cli/command/stack/formatter/formatter.go @@ -0,0 +1,88 @@ +package formatter + +import ( + "strconv" + + "github.com/docker/cli/cli/command/formatter" +) + +const ( + // KubernetesStackTableFormat is the default Kubernetes stack format + KubernetesStackTableFormat = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}\t{{.Namespace}}" + // SwarmStackTableFormat is the default Swarm stack format + SwarmStackTableFormat = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}" + + stackServicesHeader = "SERVICES" + stackOrchestrastorHeader = "ORCHESTRATOR" + stackNamespaceHeader = "NAMESPACE" + + // TableFormatKey is an alias for formatter.TableFormatKey + TableFormatKey = formatter.TableFormatKey +) + +// Context is an alias for formatter.Context +type Context = formatter.Context + +// Format is an alias for formatter.Format +type Format = formatter.Format + +// Stack contains deployed stack information. +type Stack struct { + // Name is the name of the stack + Name string + // Services is the number of the services + Services int + // Orchestrator is the platform where the stack is deployed + Orchestrator string + // Namespace is the Kubernetes namespace assigned to the stack + Namespace string +} + +// StackWrite writes formatted stacks using the Context +func StackWrite(ctx formatter.Context, stacks []*Stack) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, stack := range stacks { + if err := format(&stackContext{s: stack}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newStackContext(), render) +} + +type stackContext struct { + formatter.HeaderContext + s *Stack +} + +func newStackContext() *stackContext { + stackCtx := stackContext{} + stackCtx.Header = formatter.SubHeaderContext{ + "Name": formatter.NameHeader, + "Services": stackServicesHeader, + "Orchestrator": stackOrchestrastorHeader, + "Namespace": stackNamespaceHeader, + } + return &stackCtx +} + +func (s *stackContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(s) +} + +func (s *stackContext) Name() string { + return s.s.Name +} + +func (s *stackContext) Services() string { + return strconv.Itoa(s.s.Services) +} + +func (s *stackContext) Orchestrator() string { + return s.s.Orchestrator +} + +func (s *stackContext) Namespace() string { + return s.s.Namespace +} diff --git a/cli/cli/command/stack/formatter/formatter_test.go b/cli/cli/command/stack/formatter/formatter_test.go new file mode 100644 index 00000000..0f2550ea --- /dev/null +++ b/cli/cli/command/stack/formatter/formatter_test.go @@ -0,0 +1,74 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestStackContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + formatter.Context{Format: formatter.Format(SwarmStackTableFormat)}, + `NAME SERVICES ORCHESTRATOR +baz 2 orchestrator1 +bar 1 orchestrator2 +`, + }, + // Kubernetes table format adds Namespace column + { + formatter.Context{Format: formatter.Format(KubernetesStackTableFormat)}, + `NAME SERVICES ORCHESTRATOR NAMESPACE +baz 2 orchestrator1 namespace1 +bar 1 orchestrator2 namespace2 +`, + }, + { + formatter.Context{Format: formatter.Format("table {{.Name}}")}, + `NAME +baz +bar +`, + }, + // Custom Format + { + formatter.Context{Format: formatter.Format("{{.Name}}")}, + `baz +bar +`, + }, + } + + stacks := []*Stack{ + {Name: "baz", Services: 2, Orchestrator: "orchestrator1", Namespace: "namespace1"}, + {Name: "bar", Services: 1, Orchestrator: "orchestrator2", Namespace: "namespace2"}, + } + for _, testcase := range cases { + out := bytes.NewBufferString("") + testcase.context.Output = out + err := StackWrite(testcase.context, stacks) + if err != nil { + assert.Check(t, is.ErrorContains(err, testcase.expected)) + } else { + assert.Check(t, is.Equal(out.String(), testcase.expected)) + } + } +} diff --git a/cli/cli/command/stack/kubernetes/cli.go b/cli/cli/command/stack/kubernetes/cli.go new file mode 100644 index 00000000..8f5baf65 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/cli.go @@ -0,0 +1,144 @@ +package kubernetes + +import ( + "fmt" + "net" + "net/url" + "os" + + "github.com/docker/cli/cli/command" + kubecontext "github.com/docker/cli/cli/context/kubernetes" + kubernetes "github.com/docker/compose-on-kubernetes/api" + cliv1beta1 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1" + "github.com/pkg/errors" + flag "github.com/spf13/pflag" + kubeclient "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// KubeCli holds kubernetes specifics (client, namespace) with the command.Cli +type KubeCli struct { + command.Cli + kubeConfig *restclient.Config + kubeNamespace string + clientSet *kubeclient.Clientset +} + +// Options contains resolved parameters to initialize kubernetes clients +type Options struct { + Namespace string + Config string + Orchestrator command.Orchestrator +} + +// NewOptions returns an Options initialized with command line flags +func NewOptions(flags *flag.FlagSet, orchestrator command.Orchestrator) Options { + opts := Options{ + Orchestrator: orchestrator, + } + if namespace, err := flags.GetString("namespace"); err == nil { + opts.Namespace = namespace + } + if kubeConfig, err := flags.GetString("kubeconfig"); err == nil { + opts.Config = kubeConfig + } + return opts +} + +// AddNamespaceFlag adds the namespace flag to the given flag set +func AddNamespaceFlag(flags *flag.FlagSet) { + flags.String("namespace", "", "Kubernetes namespace to use") + flags.SetAnnotation("namespace", "kubernetes", nil) +} + +// WrapCli wraps command.Cli with kubernetes specifics +func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) { + cli := &KubeCli{ + Cli: dockerCli, + } + var ( + clientConfig clientcmd.ClientConfig + err error + ) + if dockerCli.CurrentContext() == "" { + clientConfig = kubernetes.NewKubernetesConfig(opts.Config) + } else { + clientConfig, err = kubecontext.ConfigFromContext(dockerCli.CurrentContext(), dockerCli.ContextStore()) + } + if err != nil { + return nil, err + } + + cli.kubeNamespace = opts.Namespace + if opts.Namespace == "" { + configNamespace, _, err := clientConfig.Namespace() + switch { + case os.IsNotExist(err), os.IsPermission(err): + return nil, errors.Wrap(err, "unable to load configuration file") + case err != nil: + return nil, err + } + cli.kubeNamespace = configNamespace + } + + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + cli.kubeConfig = config + + clientSet, err := kubeclient.NewForConfig(config) + if err != nil { + return nil, err + } + cli.clientSet = clientSet + + if opts.Orchestrator.HasAll() { + if err := cli.checkHostsMatch(); err != nil { + return nil, err + } + } + return cli, nil +} + +func (c *KubeCli) composeClient() (*Factory, error) { + return NewFactory(c.kubeNamespace, c.kubeConfig, c.clientSet, c.ClientInfo().HasExperimental) +} + +func (c *KubeCli) checkHostsMatch() error { + daemonEndpoint, err := url.Parse(c.Client().DaemonHost()) + if err != nil { + return err + } + kubeEndpoint, err := url.Parse(c.kubeConfig.Host) + if err != nil { + return err + } + if daemonEndpoint.Hostname() == kubeEndpoint.Hostname() { + return nil + } + // The daemon can be local in Docker for Desktop, e.g. "npipe", "unix", ... + if daemonEndpoint.Scheme != "tcp" { + ips, err := net.LookupIP(kubeEndpoint.Hostname()) + if err != nil { + return err + } + for _, ip := range ips { + if ip.IsLoopback() { + return nil + } + } + } + fmt.Fprintf(c.Err(), "WARNING: Swarm and Kubernetes hosts do not match (docker host=%s, kubernetes host=%s).\n"+ + " Update $DOCKER_HOST (or pass -H), or use 'kubectl config use-context' to match.\n", daemonEndpoint.Hostname(), kubeEndpoint.Hostname()) + return nil +} + +func (c *KubeCli) stacksv1beta1() (cliv1beta1.StackInterface, error) { + raw, err := newStackV1Beta1(c.kubeConfig, c.kubeNamespace) + if err != nil { + return nil, err + } + return raw.stacks, nil +} diff --git a/cli/cli/command/stack/kubernetes/client.go b/cli/cli/command/stack/kubernetes/client.go new file mode 100644 index 00000000..fd516bb5 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/client.go @@ -0,0 +1,107 @@ +package kubernetes + +import ( + "github.com/docker/cli/kubernetes" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + typesappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" +) + +// Factory is the kubernetes client factory +type Factory struct { + namespace string + config *restclient.Config + coreClientSet corev1.CoreV1Interface + appsClientSet appsv1beta2.AppsV1beta2Interface + clientSet *kubeclient.Clientset + experimental bool +} + +// NewFactory creates a kubernetes client factory +func NewFactory(namespace string, config *restclient.Config, clientSet *kubeclient.Clientset, experimental bool) (*Factory, error) { + coreClientSet, err := corev1.NewForConfig(config) + if err != nil { + return nil, err + } + + appsClientSet, err := appsv1beta2.NewForConfig(config) + if err != nil { + return nil, err + } + + return &Factory{ + namespace: namespace, + config: config, + coreClientSet: coreClientSet, + appsClientSet: appsClientSet, + clientSet: clientSet, + experimental: experimental, + }, nil +} + +// ConfigMaps returns a client for kubernetes's config maps +func (s *Factory) ConfigMaps() corev1.ConfigMapInterface { + return s.coreClientSet.ConfigMaps(s.namespace) +} + +// Secrets returns a client for kubernetes's secrets +func (s *Factory) Secrets() corev1.SecretInterface { + return s.coreClientSet.Secrets(s.namespace) +} + +// Services returns a client for kubernetes's secrets +func (s *Factory) Services() corev1.ServiceInterface { + return s.coreClientSet.Services(s.namespace) +} + +// Pods returns a client for kubernetes's pods +func (s *Factory) Pods() corev1.PodInterface { + return s.coreClientSet.Pods(s.namespace) +} + +// Nodes returns a client for kubernetes's nodes +func (s *Factory) Nodes() corev1.NodeInterface { + return s.coreClientSet.Nodes() +} + +// ReplicationControllers returns a client for kubernetes replication controllers +func (s *Factory) ReplicationControllers() corev1.ReplicationControllerInterface { + return s.coreClientSet.ReplicationControllers(s.namespace) +} + +// ReplicaSets returns a client for kubernetes replace sets +func (s *Factory) ReplicaSets() typesappsv1beta2.ReplicaSetInterface { + return s.appsClientSet.ReplicaSets(s.namespace) +} + +// DaemonSets returns a client for kubernetes daemon sets +func (s *Factory) DaemonSets() typesappsv1beta2.DaemonSetInterface { + return s.appsClientSet.DaemonSets(s.namespace) +} + +// Stacks returns a client for Docker's Stack on Kubernetes +func (s *Factory) Stacks(allNamespaces bool) (StackClient, error) { + version, err := kubernetes.GetStackAPIVersion(s.clientSet.Discovery(), s.experimental) + if err != nil { + return nil, err + } + namespace := s.namespace + if allNamespaces { + namespace = metav1.NamespaceAll + } + + switch version { + case kubernetes.StackAPIV1Beta1: + return newStackV1Beta1(s.config, namespace) + case kubernetes.StackAPIV1Beta2: + return newStackV1Beta2(s.config, namespace) + case kubernetes.StackAPIV1Alpha3: + return newStackV1Alpha3(s.config, namespace) + default: + return nil, errors.Errorf("unsupported stack API version: %q", version) + } +} diff --git a/cli/cli/command/stack/kubernetes/conversion.go b/cli/cli/command/stack/kubernetes/conversion.go new file mode 100644 index 00000000..e1fdec71 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/conversion.go @@ -0,0 +1,240 @@ +package kubernetes + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/docker/cli/cli/command/service" + "github.com/docker/compose-on-kubernetes/api/labels" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + appsv1beta2 "k8s.io/api/apps/v1beta2" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Pod conversion +func podToTask(pod apiv1.Pod) swarm.Task { + var startTime time.Time + if pod.Status.StartTime != nil { + startTime = (*pod.Status.StartTime).Time + } + task := swarm.Task{ + ID: string(pod.UID), + NodeID: pod.Spec.NodeName, + Spec: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: getContainerImage(pod.Spec.Containers), + }, + }, + DesiredState: podPhaseToState(pod.Status.Phase), + Status: swarm.TaskStatus{ + State: podPhaseToState(pod.Status.Phase), + Timestamp: startTime, + PortStatus: swarm.PortStatus{ + Ports: getPorts(pod.Spec.Containers), + }, + }, + } + + return task +} + +func podPhaseToState(phase apiv1.PodPhase) swarm.TaskState { + switch phase { + case apiv1.PodPending: + return swarm.TaskStatePending + case apiv1.PodRunning: + return swarm.TaskStateRunning + case apiv1.PodSucceeded: + return swarm.TaskStateComplete + case apiv1.PodFailed: + return swarm.TaskStateFailed + default: + return swarm.TaskState("unknown") + } +} + +func toSwarmProtocol(protocol apiv1.Protocol) swarm.PortConfigProtocol { + switch protocol { + case apiv1.ProtocolTCP: + return swarm.PortConfigProtocolTCP + case apiv1.ProtocolUDP: + return swarm.PortConfigProtocolUDP + } + return swarm.PortConfigProtocol("unknown") +} + +func fetchPods(stackName string, pods corev1.PodInterface, f filters.Args) ([]apiv1.Pod, error) { + services := f.Get("service") + // for existing script compatibility, support either or _ format + stackNamePrefix := stackName + "_" + for _, s := range services { + if strings.HasPrefix(s, stackNamePrefix) { + services = append(services, strings.TrimPrefix(s, stackNamePrefix)) + } + } + listOpts := metav1.ListOptions{LabelSelector: labels.SelectorForStack(stackName, services...)} + var result []apiv1.Pod + podsList, err := pods.List(listOpts) + if err != nil { + return nil, err + } + nodes := f.Get("node") + for _, pod := range podsList.Items { + if filterPod(pod, nodes) && + // name filter is done client side for matching partials + f.FuzzyMatch("name", stackNamePrefix+pod.Name) { + + result = append(result, pod) + } + } + return result, nil +} + +func filterPod(pod apiv1.Pod, nodes []string) bool { + if len(nodes) == 0 { + return true + } + for _, name := range nodes { + if pod.Spec.NodeName == name { + return true + } + } + return false +} + +func getContainerImage(containers []apiv1.Container) string { + if len(containers) == 0 { + return "" + } + return containers[0].Image +} + +func getPorts(containers []apiv1.Container) []swarm.PortConfig { + if len(containers) == 0 || len(containers[0].Ports) == 0 { + return nil + } + ports := make([]swarm.PortConfig, len(containers[0].Ports)) + for i, port := range containers[0].Ports { + ports[i] = swarm.PortConfig{ + PublishedPort: uint32(port.HostPort), + TargetPort: uint32(port.ContainerPort), + Protocol: toSwarmProtocol(port.Protocol), + } + } + return ports +} + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +const ( + publishedServiceSuffix = "-published" + publishedOnRandomPortSuffix = "-random-ports" +) + +func convertToServices(replicas *appsv1beta2.ReplicaSetList, daemons *appsv1beta2.DaemonSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]service.ListInfo, error) { + result := make([]swarm.Service, len(replicas.Items)) + infos := make(map[string]service.ListInfo, len(replicas.Items)+len(daemons.Items)) + for i, r := range replicas.Items { + s, err := convertToService(r.Labels[labels.ForServiceName], services, r.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err + } + result[i] = *s + infos[s.ID] = service.ListInfo{ + Mode: "replicated", + Replicas: fmt.Sprintf("%d/%d", r.Status.AvailableReplicas, r.Status.Replicas), + } + } + for _, d := range daemons.Items { + s, err := convertToService(d.Labels[labels.ForServiceName], services, d.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err + } + result = append(result, *s) + infos[s.ID] = service.ListInfo{ + Mode: "global", + Replicas: fmt.Sprintf("%d/%d", d.Status.NumberReady, d.Status.DesiredNumberScheduled), + } + } + sort.Slice(result, func(i, j int) bool { + return result[i].ID < result[j].ID + }) + return result, infos, nil +} + +func convertToService(serviceName string, services *apiv1.ServiceList, containers []apiv1.Container) (*swarm.Service, error) { + serviceHeadless, err := findService(services, serviceName) + if err != nil { + return nil, err + } + stack, ok := serviceHeadless.Labels[labels.ForStackName] + if ok { + stack += "_" + } + uid := string(serviceHeadless.UID) + s := &swarm.Service{ + ID: uid, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: stack + serviceHeadless.Name, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: getContainerImage(containers), + }, + }, + }, + } + if serviceNodePort, err := findService(services, serviceName+publishedOnRandomPortSuffix); err == nil && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort { + s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost) + } + if serviceLoadBalancer, err := findService(services, serviceName+publishedServiceSuffix); err == nil && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer { + s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress) + } + return s, nil +} + +func findService(services *apiv1.ServiceList, name string) (apiv1.Service, error) { + for _, s := range services.Items { + if s.Name == name { + return s, nil + } + } + return apiv1.Service{}, fmt.Errorf("could not find service '%s'", name) +} + +func serviceEndpoint(service apiv1.Service, publishMode swarm.PortConfigPublishMode) swarm.Endpoint { + configs := make([]swarm.PortConfig, len(service.Spec.Ports)) + for i, p := range service.Spec.Ports { + configs[i] = swarm.PortConfig{ + PublishMode: publishMode, + PublishedPort: uint32(p.Port), + TargetPort: uint32(p.TargetPort.IntValue()), + Protocol: toSwarmProtocol(p.Protocol), + } + } + return swarm.Endpoint{Ports: configs} +} diff --git a/cli/cli/command/stack/kubernetes/conversion_test.go b/cli/cli/command/stack/kubernetes/conversion_test.go new file mode 100644 index 00000000..3a5bd962 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/conversion_test.go @@ -0,0 +1,192 @@ +package kubernetes + +import ( + "testing" + + "github.com/docker/cli/cli/command/service" + "github.com/docker/compose-on-kubernetes/api/labels" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + appsv1beta2 "k8s.io/api/apps/v1beta2" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachineryTypes "k8s.io/apimachinery/pkg/types" + apimachineryUtil "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestReplicasConversionNeedsAService(t *testing.T) { + replicas := appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{makeReplicaSet("unknown", 0, 0)}, + } + services := apiv1.ServiceList{} + _, _, err := convertToServices(&replicas, &appsv1beta2.DaemonSetList{}, &services) + assert.ErrorContains(t, err, "could not find service") +} + +func TestKubernetesServiceToSwarmServiceConversion(t *testing.T) { + testCases := []struct { + replicas *appsv1beta2.ReplicaSetList + services *apiv1.ServiceList + expectedServices []swarm.Service + expectedListInfo map[string]service.ListInfo + }{ + // Match replicas with headless stack services + { + &appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{ + makeReplicaSet("service1", 2, 5), + makeReplicaSet("service2", 3, 3), + }, + }, + &apiv1.ServiceList{ + Items: []apiv1.Service{ + makeKubeService("service1", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service2", "stack", "uid2", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service3", "other-stack", "uid2", apiv1.ServiceTypeClusterIP, nil), + }, + }, + []swarm.Service{ + makeSwarmService("stack_service1", "uid1", nil), + makeSwarmService("stack_service2", "uid2", nil), + }, + map[string]service.ListInfo{ + "uid1": {Mode: "replicated", Replicas: "2/5"}, + "uid2": {Mode: "replicated", Replicas: "3/3"}, + }, + }, + // Headless service and LoadBalancer Service are tied to the same Swarm service + { + &appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{ + makeReplicaSet("service", 1, 1), + }, + }, + &apiv1.ServiceList{ + Items: []apiv1.Service{ + makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service-published", "stack", "uid2", apiv1.ServiceTypeLoadBalancer, []apiv1.ServicePort{ + { + Port: 80, + TargetPort: apimachineryUtil.FromInt(80), + Protocol: apiv1.ProtocolTCP, + }, + }), + }, + }, + []swarm.Service{ + makeSwarmService("stack_service", "uid1", []swarm.PortConfig{ + { + PublishMode: swarm.PortConfigPublishModeIngress, + PublishedPort: 80, + TargetPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + }, + }), + }, + map[string]service.ListInfo{ + "uid1": {Mode: "replicated", Replicas: "1/1"}, + }, + }, + // Headless service and NodePort Service are tied to the same Swarm service + + { + &appsv1beta2.ReplicaSetList{ + Items: []appsv1beta2.ReplicaSet{ + makeReplicaSet("service", 1, 1), + }, + }, + &apiv1.ServiceList{ + Items: []apiv1.Service{ + makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil), + makeKubeService("service-random-ports", "stack", "uid2", apiv1.ServiceTypeNodePort, []apiv1.ServicePort{ + { + Port: 35666, + TargetPort: apimachineryUtil.FromInt(80), + Protocol: apiv1.ProtocolTCP, + }, + }), + }, + }, + []swarm.Service{ + makeSwarmService("stack_service", "uid1", []swarm.PortConfig{ + { + PublishMode: swarm.PortConfigPublishModeHost, + PublishedPort: 35666, + TargetPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + }, + }), + }, + map[string]service.ListInfo{ + "uid1": {Mode: "replicated", Replicas: "1/1"}, + }, + }, + } + + for _, tc := range testCases { + swarmServices, listInfo, err := convertToServices(tc.replicas, &appsv1beta2.DaemonSetList{}, tc.services) + assert.NilError(t, err) + assert.DeepEqual(t, tc.expectedServices, swarmServices) + assert.DeepEqual(t, tc.expectedListInfo, listInfo) + } +} + +func makeReplicaSet(service string, available, replicas int32) appsv1beta2.ReplicaSet { + return appsv1beta2.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labels.ForServiceName: service, + }, + }, + Spec: appsv1beta2.ReplicaSetSpec{ + Template: apiv1.PodTemplateSpec{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Image: "image", + }, + }, + }, + }, + }, + Status: appsv1beta2.ReplicaSetStatus{ + AvailableReplicas: available, + Replicas: replicas, + }, + } +} + +func makeKubeService(service, stack, uid string, serviceType apiv1.ServiceType, ports []apiv1.ServicePort) apiv1.Service { + return apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + labels.ForStackName: stack, + }, + Name: service, + UID: apimachineryTypes.UID(uid), + }, + Spec: apiv1.ServiceSpec{ + Type: serviceType, + Ports: ports, + }, + } +} + +func makeSwarmService(service, id string, ports []swarm.PortConfig) swarm.Service { + return swarm.Service{ + ID: id, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: service, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "image", + }, + }, + }, + Endpoint: swarm.Endpoint{ + Ports: ports, + }, + } +} diff --git a/cli/cli/command/stack/kubernetes/convert.go b/cli/cli/command/stack/kubernetes/convert.go new file mode 100644 index 00000000..e42705cb --- /dev/null +++ b/cli/cli/command/stack/kubernetes/convert.go @@ -0,0 +1,567 @@ +package kubernetes + +import ( + "io" + "io/ioutil" + "regexp" + "strconv" + "strings" + + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/cli/compose/schema" + composeTypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/docker/cli/cli/compose/types" + latest "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta1" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta2" + "github.com/docker/go-connections/nat" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // kubernatesExtraField is an extra field on ServiceConfigs containing kubernetes-specific extensions to compose format + kubernatesExtraField = "x-kubernetes" +) + +// NewStackConverter returns a converter from types.Config (compose) to the specified +// stack version or error out if the version is not supported or existent. +func NewStackConverter(version string) (StackConverter, error) { + switch version { + case "v1beta1": + return stackV1Beta1Converter{}, nil + case "v1beta2": + return stackV1Beta2Converter{}, nil + case "v1alpha3": + return stackV1Alpha3Converter{}, nil + default: + return nil, errors.Errorf("stack version %s unsupported", version) + } +} + +// StackConverter converts a compose types.Config to a Stack +type StackConverter interface { + FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) +} + +type stackV1Beta1Converter struct{} + +func (s stackV1Beta1Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) { + cfg.Version = v1beta1.MaxComposeVersion + st, err := fromCompose(stderr, name, cfg, v1beta1Capabilities) + if err != nil { + return Stack{}, err + } + res, err := yaml.Marshal(cfg) + if err != nil { + return Stack{}, err + } + // reload the result to check that it produced a valid 3.5 compose file + resparsedConfig, err := loader.ParseYAML(res) + if err != nil { + return Stack{}, err + } + if err = schema.Validate(resparsedConfig, v1beta1.MaxComposeVersion); err != nil { + return Stack{}, errors.Wrapf(err, "the compose yaml file is invalid with v%s", v1beta1.MaxComposeVersion) + } + + st.ComposeFile = string(res) + return st, nil +} + +type stackV1Beta2Converter struct{} + +func (s stackV1Beta2Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) { + return fromCompose(stderr, name, cfg, v1beta2Capabilities) +} + +type stackV1Alpha3Converter struct{} + +func (s stackV1Alpha3Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) { + return fromCompose(stderr, name, cfg, v1alpha3Capabilities) +} + +func fromCompose(stderr io.Writer, name string, cfg *composetypes.Config, capabilities composeCapabilities) (Stack, error) { + spec, err := fromComposeConfig(stderr, cfg, capabilities) + if err != nil { + return Stack{}, err + } + return Stack{ + Name: name, + Spec: spec, + }, nil +} + +func loadStackData(composefile string) (*composetypes.Config, error) { + parsed, err := loader.ParseYAML([]byte(composefile)) + if err != nil { + return nil, err + } + return loader.Load(composetypes.ConfigDetails{ + ConfigFiles: []composetypes.ConfigFile{ + { + Config: parsed, + }, + }, + }) +} + +// Conversions from internal stack to different stack compose component versions. +func stackFromV1beta1(in *v1beta1.Stack) (Stack, error) { + cfg, err := loadStackData(in.Spec.ComposeFile) + if err != nil { + return Stack{}, err + } + spec, err := fromComposeConfig(ioutil.Discard, cfg, v1beta1Capabilities) + if err != nil { + return Stack{}, err + } + return Stack{ + Name: in.ObjectMeta.Name, + Namespace: in.ObjectMeta.Namespace, + ComposeFile: in.Spec.ComposeFile, + Spec: spec, + }, nil +} + +func stackToV1beta1(s Stack) *v1beta1.Stack { + return &v1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + Spec: v1beta1.StackSpec{ + ComposeFile: s.ComposeFile, + }, + } +} + +func stackFromV1beta2(in *v1beta2.Stack) (Stack, error) { + var spec *latest.StackSpec + if in.Spec != nil { + spec = &latest.StackSpec{} + if err := latest.Convert_v1beta2_StackSpec_To_v1alpha3_StackSpec(in.Spec, spec, nil); err != nil { + return Stack{}, err + } + } + return Stack{ + Name: in.ObjectMeta.Name, + Namespace: in.ObjectMeta.Namespace, + Spec: spec, + }, nil +} + +func stackToV1beta2(s Stack) (*v1beta2.Stack, error) { + var spec *v1beta2.StackSpec + if s.Spec != nil { + spec = &v1beta2.StackSpec{} + if err := latest.Convert_v1alpha3_StackSpec_To_v1beta2_StackSpec(s.Spec, spec, nil); err != nil { + return nil, err + } + } + return &v1beta2.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + Spec: spec, + }, nil +} + +func stackFromV1alpha3(in *latest.Stack) Stack { + return Stack{ + Name: in.ObjectMeta.Name, + Namespace: in.ObjectMeta.Namespace, + Spec: in.Spec, + } +} + +func stackToV1alpha3(s Stack) *latest.Stack { + return &latest.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + Spec: s.Spec, + } +} + +func fromComposeConfig(stderr io.Writer, c *composeTypes.Config, capabilities composeCapabilities) (*latest.StackSpec, error) { + if c == nil { + return nil, nil + } + warnUnsupportedFeatures(stderr, c) + serviceConfigs := make([]latest.ServiceConfig, len(c.Services)) + for i, s := range c.Services { + svc, err := fromComposeServiceConfig(s, capabilities) + if err != nil { + return nil, err + } + serviceConfigs[i] = svc + } + return &latest.StackSpec{ + Services: serviceConfigs, + Secrets: fromComposeSecrets(c.Secrets), + Configs: fromComposeConfigs(c.Configs), + }, nil +} + +func fromComposeSecrets(s map[string]composeTypes.SecretConfig) map[string]latest.SecretConfig { + if s == nil { + return nil + } + m := map[string]latest.SecretConfig{} + for key, value := range s { + m[key] = latest.SecretConfig{ + Name: value.Name, + File: value.File, + External: latest.External{ + Name: value.External.Name, + External: value.External.External, + }, + Labels: value.Labels, + } + } + return m +} + +func fromComposeConfigs(s map[string]composeTypes.ConfigObjConfig) map[string]latest.ConfigObjConfig { + if s == nil { + return nil + } + m := map[string]latest.ConfigObjConfig{} + for key, value := range s { + m[key] = latest.ConfigObjConfig{ + Name: value.Name, + File: value.File, + External: latest.External{ + Name: value.External.Name, + External: value.External.External, + }, + Labels: value.Labels, + } + } + return m +} + +func fromComposeServiceConfig(s composeTypes.ServiceConfig, capabilities composeCapabilities) (latest.ServiceConfig, error) { + var ( + userID *int64 + err error + ) + if s.User != "" { + numerical, err := strconv.Atoi(s.User) + if err == nil { + unixUserID := int64(numerical) + userID = &unixUserID + } + } + kubeExtra, err := resolveServiceExtra(s) + if err != nil { + return latest.ServiceConfig{}, err + } + if kubeExtra.PullSecret != "" && !capabilities.hasPullSecrets { + return latest.ServiceConfig{}, errors.Errorf(`stack API version %s does not support pull secrets (field "x-kubernetes.pull_secret"), please use version v1alpha3 or higher`, capabilities.apiVersion) + } + if kubeExtra.PullPolicy != "" && !capabilities.hasPullPolicies { + return latest.ServiceConfig{}, errors.Errorf(`stack API version %s does not support pull policies (field "x-kubernetes.pull_policy"), please use version v1alpha3 or higher`, capabilities.apiVersion) + } + + internalPorts, err := setupIntraStackNetworking(s, kubeExtra, capabilities) + if err != nil { + return latest.ServiceConfig{}, err + } + + return latest.ServiceConfig{ + Name: s.Name, + CapAdd: s.CapAdd, + CapDrop: s.CapDrop, + Command: s.Command, + Configs: fromComposeServiceConfigs(s.Configs), + Deploy: latest.DeployConfig{ + Mode: s.Deploy.Mode, + Replicas: s.Deploy.Replicas, + Labels: s.Deploy.Labels, + UpdateConfig: fromComposeUpdateConfig(s.Deploy.UpdateConfig), + Resources: fromComposeResources(s.Deploy.Resources), + RestartPolicy: fromComposeRestartPolicy(s.Deploy.RestartPolicy), + Placement: fromComposePlacement(s.Deploy.Placement), + }, + Entrypoint: s.Entrypoint, + Environment: s.Environment, + ExtraHosts: s.ExtraHosts, + Hostname: s.Hostname, + HealthCheck: fromComposeHealthcheck(s.HealthCheck), + Image: s.Image, + Ipc: s.Ipc, + Labels: s.Labels, + Pid: s.Pid, + Ports: fromComposePorts(s.Ports), + Privileged: s.Privileged, + ReadOnly: s.ReadOnly, + Secrets: fromComposeServiceSecrets(s.Secrets), + StdinOpen: s.StdinOpen, + StopGracePeriod: composetypes.ConvertDurationPtr(s.StopGracePeriod), + Tmpfs: s.Tmpfs, + Tty: s.Tty, + User: userID, + Volumes: fromComposeServiceVolumeConfig(s.Volumes), + WorkingDir: s.WorkingDir, + PullSecret: kubeExtra.PullSecret, + PullPolicy: kubeExtra.PullPolicy, + InternalServiceType: kubeExtra.InternalServiceType, + InternalPorts: internalPorts, + }, nil +} + +func setupIntraStackNetworking(s composeTypes.ServiceConfig, kubeExtra kubernetesExtra, capabilities composeCapabilities) ([]latest.InternalPort, error) { + if kubeExtra.InternalServiceType != latest.InternalServiceTypeAuto && !capabilities.hasIntraStackLoadBalancing { + return nil, + errors.Errorf(`stack API version %s does not support intra-stack load balancing (field "x-kubernetes.internal_service_type"), please use version v1alpha3 or higher`, + capabilities.apiVersion) + } + if !capabilities.hasIntraStackLoadBalancing { + return nil, nil + } + if err := validateInternalServiceType(kubeExtra.InternalServiceType); err != nil { + return nil, err + } + internalPorts, err := toInternalPorts(s.Expose) + if err != nil { + return nil, err + } + return internalPorts, nil +} + +func validateInternalServiceType(internalServiceType latest.InternalServiceType) error { + switch internalServiceType { + case latest.InternalServiceTypeAuto, latest.InternalServiceTypeClusterIP, latest.InternalServiceTypeHeadless: + default: + return errors.Errorf(`invalid value %q for field "x-kubernetes.internal_service_type", valid values are %q or %q`, internalServiceType, + latest.InternalServiceTypeClusterIP, + latest.InternalServiceTypeHeadless) + } + return nil +} + +func toInternalPorts(expose []string) ([]latest.InternalPort, error) { + var internalPorts []latest.InternalPort + for _, sourcePort := range expose { + proto, port := nat.SplitProtoPort(sourcePort) + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, errors.Errorf("invalid format for expose: %q, error: %s", sourcePort, err) + } + for i := start; i <= end; i++ { + k8sProto := v1.Protocol(strings.ToUpper(proto)) + switch k8sProto { + case v1.ProtocolSCTP, v1.ProtocolTCP, v1.ProtocolUDP: + default: + return nil, errors.Errorf("invalid protocol for expose: %q, supported values are %q, %q and %q", sourcePort, v1.ProtocolSCTP, v1.ProtocolTCP, v1.ProtocolUDP) + } + internalPorts = append(internalPorts, latest.InternalPort{ + Port: int32(i), + Protocol: k8sProto, + }) + } + } + return internalPorts, nil +} + +func resolveServiceExtra(s composeTypes.ServiceConfig) (kubernetesExtra, error) { + if iface, ok := s.Extras[kubernatesExtraField]; ok { + var result kubernetesExtra + if err := mapstructure.Decode(iface, &result); err != nil { + return kubernetesExtra{}, err + } + return result, nil + } + return kubernetesExtra{}, nil +} + +func fromComposePorts(ports []composeTypes.ServicePortConfig) []latest.ServicePortConfig { + if ports == nil { + return nil + } + p := make([]latest.ServicePortConfig, len(ports)) + for i, port := range ports { + p[i] = latest.ServicePortConfig{ + Mode: port.Mode, + Target: port.Target, + Published: port.Published, + Protocol: port.Protocol, + } + } + return p +} + +func fromComposeServiceSecrets(secrets []composeTypes.ServiceSecretConfig) []latest.ServiceSecretConfig { + if secrets == nil { + return nil + } + c := make([]latest.ServiceSecretConfig, len(secrets)) + for i, secret := range secrets { + c[i] = latest.ServiceSecretConfig{ + Source: secret.Source, + Target: secret.Target, + UID: secret.UID, + Mode: secret.Mode, + } + } + return c +} + +func fromComposeServiceConfigs(configs []composeTypes.ServiceConfigObjConfig) []latest.ServiceConfigObjConfig { + if configs == nil { + return nil + } + c := make([]latest.ServiceConfigObjConfig, len(configs)) + for i, config := range configs { + c[i] = latest.ServiceConfigObjConfig{ + Source: config.Source, + Target: config.Target, + UID: config.UID, + Mode: config.Mode, + } + } + return c +} + +func fromComposeHealthcheck(h *composeTypes.HealthCheckConfig) *latest.HealthCheckConfig { + if h == nil { + return nil + } + return &latest.HealthCheckConfig{ + Test: h.Test, + Timeout: composetypes.ConvertDurationPtr(h.Timeout), + Interval: composetypes.ConvertDurationPtr(h.Interval), + Retries: h.Retries, + } +} + +func fromComposePlacement(p composeTypes.Placement) latest.Placement { + return latest.Placement{ + Constraints: fromComposeConstraints(p.Constraints), + } +} + +var constraintEquals = regexp.MustCompile(`([\w\.]*)\W*(==|!=)\W*([\w\.]*)`) + +const ( + swarmOs = "node.platform.os" + swarmArch = "node.platform.arch" + swarmHostname = "node.hostname" + swarmLabelPrefix = "node.labels." +) + +func fromComposeConstraints(s []string) *latest.Constraints { + if len(s) == 0 { + return nil + } + constraints := &latest.Constraints{} + for _, constraint := range s { + matches := constraintEquals.FindStringSubmatch(constraint) + if len(matches) == 4 { + key := matches[1] + operator := matches[2] + value := matches[3] + constraint := &latest.Constraint{ + Operator: operator, + Value: value, + } + switch { + case key == swarmOs: + constraints.OperatingSystem = constraint + case key == swarmArch: + constraints.Architecture = constraint + case key == swarmHostname: + constraints.Hostname = constraint + case strings.HasPrefix(key, swarmLabelPrefix): + if constraints.MatchLabels == nil { + constraints.MatchLabels = map[string]latest.Constraint{} + } + constraints.MatchLabels[strings.TrimPrefix(key, swarmLabelPrefix)] = *constraint + } + } + } + return constraints +} + +func fromComposeResources(r composeTypes.Resources) latest.Resources { + return latest.Resources{ + Limits: fromComposeResourcesResource(r.Limits), + Reservations: fromComposeResourcesResource(r.Reservations), + } +} + +func fromComposeResourcesResource(r *composeTypes.Resource) *latest.Resource { + if r == nil { + return nil + } + return &latest.Resource{ + MemoryBytes: int64(r.MemoryBytes), + NanoCPUs: r.NanoCPUs, + } +} + +func fromComposeUpdateConfig(u *composeTypes.UpdateConfig) *latest.UpdateConfig { + if u == nil { + return nil + } + return &latest.UpdateConfig{ + Parallelism: u.Parallelism, + } +} + +func fromComposeRestartPolicy(r *composeTypes.RestartPolicy) *latest.RestartPolicy { + if r == nil { + return nil + } + return &latest.RestartPolicy{ + Condition: r.Condition, + } +} + +func fromComposeServiceVolumeConfig(vs []composeTypes.ServiceVolumeConfig) []latest.ServiceVolumeConfig { + if vs == nil { + return nil + } + volumes := []latest.ServiceVolumeConfig{} + for _, v := range vs { + volumes = append(volumes, latest.ServiceVolumeConfig{ + Type: v.Type, + Source: v.Source, + Target: v.Target, + ReadOnly: v.ReadOnly, + }) + } + return volumes +} + +var ( + v1beta1Capabilities = composeCapabilities{ + apiVersion: "v1beta1", + } + v1beta2Capabilities = composeCapabilities{ + apiVersion: "v1beta2", + } + v1alpha3Capabilities = composeCapabilities{ + apiVersion: "v1alpha3", + hasPullSecrets: true, + hasPullPolicies: true, + hasIntraStackLoadBalancing: true, + } +) + +type composeCapabilities struct { + apiVersion string + hasPullSecrets bool + hasPullPolicies bool + hasIntraStackLoadBalancing bool +} + +type kubernetesExtra struct { + PullSecret string `mapstructure:"pull_secret"` + PullPolicy string `mapstructure:"pull_policy"` + InternalServiceType latest.InternalServiceType `mapstructure:"internal_service_type"` +} diff --git a/cli/cli/command/stack/kubernetes/convert_test.go b/cli/cli/command/stack/kubernetes/convert_test.go new file mode 100644 index 00000000..1cdf4f27 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/convert_test.go @@ -0,0 +1,346 @@ +package kubernetes + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/cli/cli/compose/loader" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta1" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta2" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNewStackConverter(t *testing.T) { + _, err := NewStackConverter("v1alpha1") + assert.Check(t, is.ErrorContains(err, "stack version v1alpha1 unsupported")) + + _, err = NewStackConverter("v1beta1") + assert.NilError(t, err) + _, err = NewStackConverter("v1beta2") + assert.NilError(t, err) + _, err = NewStackConverter("v1alpha3") + assert.NilError(t, err) +} + +func TestConvertFromToV1beta1(t *testing.T) { + composefile := `version: "3.3" +services: + test: + image: nginx +secrets: + test: + file: testdata/secret +configs: + test: + file: testdata/config +` + stackv1beta1 := &v1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: v1beta1.StackSpec{ + ComposeFile: composefile, + }, + } + + result, err := stackFromV1beta1(stackv1beta1) + assert.NilError(t, err) + expected := Stack{ + Name: "test", + ComposeFile: composefile, + Spec: &v1alpha3.StackSpec{ + Services: []v1alpha3.ServiceConfig{ + { + Name: "test", + Image: "nginx", + Environment: make(map[string]*string), + }, + }, + Secrets: map[string]v1alpha3.SecretConfig{ + "test": {File: filepath.FromSlash("testdata/secret")}, + }, + Configs: map[string]v1alpha3.ConfigObjConfig{ + "test": {File: filepath.FromSlash("testdata/config")}, + }, + }, + } + assert.DeepEqual(t, expected, result) + assert.DeepEqual(t, stackv1beta1, stackToV1beta1(result)) +} + +func TestConvertFromToV1beta2(t *testing.T) { + stackv1beta2 := &v1beta2.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: &v1beta2.StackSpec{ + Services: []v1beta2.ServiceConfig{ + { + Name: "test", + Image: "nginx", + Environment: make(map[string]*string), + }, + }, + Secrets: map[string]v1beta2.SecretConfig{ + "test": {File: filepath.FromSlash("testdata/secret")}, + }, + Configs: map[string]v1beta2.ConfigObjConfig{ + "test": {File: filepath.FromSlash("testdata/config")}, + }, + }, + } + expected := Stack{ + Name: "test", + Spec: &v1alpha3.StackSpec{ + Services: []v1alpha3.ServiceConfig{ + { + Name: "test", + Image: "nginx", + Environment: make(map[string]*string), + }, + }, + Secrets: map[string]v1alpha3.SecretConfig{ + "test": {File: filepath.FromSlash("testdata/secret")}, + }, + Configs: map[string]v1alpha3.ConfigObjConfig{ + "test": {File: filepath.FromSlash("testdata/config")}, + }, + }, + } + result, err := stackFromV1beta2(stackv1beta2) + assert.NilError(t, err) + assert.DeepEqual(t, expected, result) + gotBack, err := stackToV1beta2(result) + assert.NilError(t, err) + assert.DeepEqual(t, stackv1beta2, gotBack) +} + +func TestConvertFromToV1alpha3(t *testing.T) { + stackv1alpha3 := &v1alpha3.Stack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: &v1alpha3.StackSpec{ + Services: []v1alpha3.ServiceConfig{ + { + Name: "test", + Image: "nginx", + Environment: make(map[string]*string), + }, + }, + Secrets: map[string]v1alpha3.SecretConfig{ + "test": {File: filepath.FromSlash("testdata/secret")}, + }, + Configs: map[string]v1alpha3.ConfigObjConfig{ + "test": {File: filepath.FromSlash("testdata/config")}, + }, + }, + } + expected := Stack{ + Name: "test", + Spec: &v1alpha3.StackSpec{ + Services: []v1alpha3.ServiceConfig{ + { + Name: "test", + Image: "nginx", + Environment: make(map[string]*string), + }, + }, + Secrets: map[string]v1alpha3.SecretConfig{ + "test": {File: filepath.FromSlash("testdata/secret")}, + }, + Configs: map[string]v1alpha3.ConfigObjConfig{ + "test": {File: filepath.FromSlash("testdata/config")}, + }, + }, + } + result := stackFromV1alpha3(stackv1alpha3) + assert.DeepEqual(t, expected, result) + gotBack := stackToV1alpha3(result) + assert.DeepEqual(t, stackv1alpha3, gotBack) +} + +func loadTestStackWith(t *testing.T, with string) *composetypes.Config { + t.Helper() + filePath := fmt.Sprintf("testdata/compose-with-%s.yml", with) + data, err := ioutil.ReadFile(filePath) + assert.NilError(t, err) + yamlData, err := loader.ParseYAML(data) + assert.NilError(t, err) + cfg, err := loader.Load(composetypes.ConfigDetails{ + ConfigFiles: []composetypes.ConfigFile{ + {Config: yamlData, Filename: filePath}, + }, + }) + assert.NilError(t, err) + return cfg +} + +func TestHandlePullSecret(t *testing.T) { + testData := loadTestStackWith(t, "pull-secret") + cases := []struct { + version string + err string + }{ + {version: "v1beta1", err: `stack API version v1beta1 does not support pull secrets (field "x-kubernetes.pull_secret"), please use version v1alpha3 or higher`}, + {version: "v1beta2", err: `stack API version v1beta2 does not support pull secrets (field "x-kubernetes.pull_secret"), please use version v1alpha3 or higher`}, + {version: "v1alpha3"}, + } + + for _, c := range cases { + t.Run(c.version, func(t *testing.T) { + conv, err := NewStackConverter(c.version) + assert.NilError(t, err) + s, err := conv.FromCompose(ioutil.Discard, "test", testData) + if c.err != "" { + assert.Error(t, err, c.err) + + } else { + assert.NilError(t, err) + assert.Equal(t, s.Spec.Services[0].PullSecret, "some-secret") + } + }) + } +} + +func TestHandlePullPolicy(t *testing.T) { + testData := loadTestStackWith(t, "pull-policy") + cases := []struct { + version string + err string + }{ + {version: "v1beta1", err: `stack API version v1beta1 does not support pull policies (field "x-kubernetes.pull_policy"), please use version v1alpha3 or higher`}, + {version: "v1beta2", err: `stack API version v1beta2 does not support pull policies (field "x-kubernetes.pull_policy"), please use version v1alpha3 or higher`}, + {version: "v1alpha3"}, + } + + for _, c := range cases { + t.Run(c.version, func(t *testing.T) { + conv, err := NewStackConverter(c.version) + assert.NilError(t, err) + s, err := conv.FromCompose(ioutil.Discard, "test", testData) + if c.err != "" { + assert.Error(t, err, c.err) + + } else { + assert.NilError(t, err) + assert.Equal(t, s.Spec.Services[0].PullPolicy, "Never") + } + }) + } +} + +func TestHandleInternalServiceType(t *testing.T) { + cases := []struct { + name string + value string + caps composeCapabilities + err string + expected v1alpha3.InternalServiceType + }{ + { + name: "v1beta1", + value: "ClusterIP", + caps: v1beta1Capabilities, + err: `stack API version v1beta1 does not support intra-stack load balancing (field "x-kubernetes.internal_service_type"), please use version v1alpha3 or higher`, + }, + { + name: "v1beta2", + value: "ClusterIP", + caps: v1beta2Capabilities, + err: `stack API version v1beta2 does not support intra-stack load balancing (field "x-kubernetes.internal_service_type"), please use version v1alpha3 or higher`, + }, + { + name: "v1alpha3", + value: "ClusterIP", + caps: v1alpha3Capabilities, + expected: v1alpha3.InternalServiceTypeClusterIP, + }, + { + name: "v1alpha3-invalid", + value: "invalid", + caps: v1alpha3Capabilities, + err: `invalid value "invalid" for field "x-kubernetes.internal_service_type", valid values are "ClusterIP" or "Headless"`, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + res, err := fromComposeServiceConfig(composetypes.ServiceConfig{ + Name: "test", + Image: "test", + Extras: map[string]interface{}{ + "x-kubernetes": map[string]interface{}{ + "internal_service_type": c.value, + }, + }, + }, c.caps) + if c.err == "" { + assert.NilError(t, err) + assert.Equal(t, res.InternalServiceType, c.expected) + } else { + assert.ErrorContains(t, err, c.err) + } + }) + } +} + +func TestIgnoreExpose(t *testing.T) { + testData := loadTestStackWith(t, "expose") + for _, version := range []string{"v1beta1", "v1beta2"} { + conv, err := NewStackConverter(version) + assert.NilError(t, err) + s, err := conv.FromCompose(ioutil.Discard, "test", testData) + assert.NilError(t, err) + assert.Equal(t, len(s.Spec.Services[0].InternalPorts), 0) + } +} + +func TestParseExpose(t *testing.T) { + testData := loadTestStackWith(t, "expose") + conv, err := NewStackConverter("v1alpha3") + assert.NilError(t, err) + s, err := conv.FromCompose(ioutil.Discard, "test", testData) + assert.NilError(t, err) + expected := []v1alpha3.InternalPort{ + { + Port: 1, + Protocol: v1.ProtocolTCP, + }, + { + Port: 2, + Protocol: v1.ProtocolTCP, + }, + { + Port: 3, + Protocol: v1.ProtocolTCP, + }, + { + Port: 4, + Protocol: v1.ProtocolTCP, + }, + { + Port: 5, + Protocol: v1.ProtocolUDP, + }, + { + Port: 6, + Protocol: v1.ProtocolUDP, + }, + { + Port: 7, + Protocol: v1.ProtocolUDP, + }, + { + Port: 8, + Protocol: v1.ProtocolUDP, + }, + } + assert.DeepEqual(t, s.Spec.Services[0].InternalPorts, expected) +} diff --git a/cli/cli/command/stack/kubernetes/deploy.go b/cli/cli/command/stack/kubernetes/deploy.go new file mode 100644 index 00000000..84fdc638 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/deploy.go @@ -0,0 +1,171 @@ +package kubernetes + +import ( + "fmt" + "io" + + "github.com/docker/cli/cli/command/stack/options" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/cli/cli/streams" + "github.com/morikuni/aec" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// RunDeploy is the kubernetes implementation of docker stack deploy +func RunDeploy(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config) error { + cmdOut := dockerCli.Out() + + // Initialize clients + composeClient, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := composeClient.Stacks(false) + if err != nil { + return err + } + + stack, err := stacks.FromCompose(dockerCli.Err(), opts.Namespace, cfg) + if err != nil { + return err + } + + configMaps := composeClient.ConfigMaps() + secrets := composeClient.Secrets() + services := composeClient.Services() + + if err := stacks.IsColliding(services, stack); err != nil { + return err + } + + if err := createResources(stack, stacks, configMaps, secrets); err != nil { + return err + } + + fmt.Fprintln(cmdOut, "Waiting for the stack to be stable and running...") + v1beta1Cli, err := dockerCli.stacksv1beta1() + if err != nil { + return err + } + + pods := composeClient.Pods() + watcher := &deployWatcher{ + stacks: v1beta1Cli, + pods: pods, + } + statusUpdates := make(chan serviceStatus) + displayDone := make(chan struct{}) + go func() { + defer close(displayDone) + display := newStatusDisplay(dockerCli.Out()) + for status := range statusUpdates { + display.OnStatus(status) + } + }() + + err = watcher.Watch(stack.Name, stack.getServices(), statusUpdates) + close(statusUpdates) + <-displayDone + if err != nil { + return err + } + fmt.Fprintf(cmdOut, "\nStack %s is stable and running\n\n", stack.Name) + return nil + +} + +func createResources(stack Stack, stacks StackClient, configMaps corev1.ConfigMapInterface, secrets corev1.SecretInterface) error { + var childResources []childResource + + cr, err := stack.createFileBasedConfigMaps(configMaps) + childResources = append(childResources, cr...) // make sure we collect childresources already created in case of failure + if err != nil { + deleteChildResources(childResources) + return err + } + + cr, err = stack.createFileBasedSecrets(secrets) + childResources = append(childResources, cr...) // make sure we collect childresources already created in case of failure + if err != nil { + deleteChildResources(childResources) + return err + } + + return stacks.CreateOrUpdate(stack, childResources) +} + +type statusDisplay interface { + OnStatus(serviceStatus) +} +type metaServiceState string + +const ( + metaServiceStateReady = metaServiceState("Ready") + metaServiceStatePending = metaServiceState("Pending") + metaServiceStateFailed = metaServiceState("Failed") +) + +func metaStateFromStatus(status serviceStatus) metaServiceState { + switch { + case status.podsReady > 0: + return metaServiceStateReady + case status.podsPending > 0: + return metaServiceStatePending + default: + return metaServiceStateFailed + } +} + +type forwardOnlyStatusDisplay struct { + o *streams.Out + states map[string]metaServiceState +} + +func (d *forwardOnlyStatusDisplay) OnStatus(status serviceStatus) { + state := metaStateFromStatus(status) + if d.states[status.name] != state { + d.states[status.name] = state + fmt.Fprintf(d.o, "%s: %s\n", status.name, state) + } +} + +type interactiveStatusDisplay struct { + o *streams.Out + statuses []serviceStatus +} + +func (d *interactiveStatusDisplay) OnStatus(status serviceStatus) { + b := aec.EmptyBuilder + for ix := 0; ix < len(d.statuses); ix++ { + b = b.Up(1).EraseLine(aec.EraseModes.All) + } + b = b.Column(0) + fmt.Fprint(d.o, b.ANSI) + updated := false + for ix, s := range d.statuses { + if s.name == status.name { + d.statuses[ix] = status + s = status + updated = true + } + displayInteractiveServiceStatus(s, d.o) + } + if !updated { + d.statuses = append(d.statuses, status) + displayInteractiveServiceStatus(status, d.o) + } +} + +func displayInteractiveServiceStatus(status serviceStatus, o io.Writer) { + state := metaStateFromStatus(status) + totalFailed := status.podsFailed + status.podsSucceeded + status.podsUnknown + fmt.Fprintf(o, "%[1]s: %[2]s\t\t[pod status: %[3]d/%[6]d ready, %[4]d/%[6]d pending, %[5]d/%[6]d failed]\n", status.name, state, + status.podsReady, status.podsPending, totalFailed, status.podsTotal) +} + +func newStatusDisplay(o *streams.Out) statusDisplay { + if !o.IsTerminal() { + return &forwardOnlyStatusDisplay{o: o, states: map[string]metaServiceState{}} + } + return &interactiveStatusDisplay{o: o} +} diff --git a/cli/cli/command/stack/kubernetes/deploy_test.go b/cli/cli/command/stack/kubernetes/deploy_test.go new file mode 100644 index 00000000..85d1a5ff --- /dev/null +++ b/cli/cli/command/stack/kubernetes/deploy_test.go @@ -0,0 +1,299 @@ +package kubernetes + +import ( + "errors" + "testing" + + composev1alpha3 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1alpha3" + composev1beta1 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1" + composev1beta2 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2" + "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta1" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta2" + "gotest.tools/assert" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/fake" +) + +func testStack() Stack { + return Stack{ + Name: "test", + Namespace: "test", + ComposeFile: `version: "3.3" +services: + test: + image: nginx +secrets: + test: + file: testdata/secret +configs: + test: + file: testdata/config +`, + Spec: &v1alpha3.StackSpec{ + Configs: map[string]v1alpha3.ConfigObjConfig{ + "test": {Name: "test", File: "testdata/config"}, + }, + Secrets: map[string]v1alpha3.SecretConfig{ + "test": {Name: "test", File: "testdata/secret"}, + }, + }, + } +} + +func TestCreateChildResourcesV1Beta1(t *testing.T) { + k8sclientSet := fake.NewSimpleClientset() + stack := testStack() + configs := k8sclientSet.CoreV1().ConfigMaps("test") + secrets := k8sclientSet.CoreV1().Secrets("test") + assert.NilError(t, createResources( + stack, + &stackV1Beta1{stacks: &fakeV1beta1Client{}}, + configs, + secrets)) + c, err := configs.Get("test", metav1.GetOptions{}) + assert.NilError(t, err) + checkOwnerReferences(t, c.ObjectMeta, "test", v1beta1.SchemeGroupVersion.String()) + s, err := secrets.Get("test", metav1.GetOptions{}) + assert.NilError(t, err) + checkOwnerReferences(t, s.ObjectMeta, "test", v1beta1.SchemeGroupVersion.String()) +} + +func checkOwnerReferences(t *testing.T, objMeta metav1.ObjectMeta, stackName, stackVersion string) { + t.Helper() + assert.Equal(t, len(objMeta.OwnerReferences), 1) + assert.Equal(t, objMeta.OwnerReferences[0].Name, stackName) + assert.Equal(t, objMeta.OwnerReferences[0].Kind, "Stack") + assert.Equal(t, objMeta.OwnerReferences[0].APIVersion, stackVersion) +} + +func TestCreateChildResourcesV1Beta2(t *testing.T) { + k8sclientSet := fake.NewSimpleClientset() + stack := testStack() + configs := k8sclientSet.CoreV1().ConfigMaps("test") + secrets := k8sclientSet.CoreV1().Secrets("test") + assert.NilError(t, createResources( + stack, + &stackV1Beta2{stacks: &fakeV1beta2Client{}}, + configs, + secrets)) + c, err := configs.Get("test", metav1.GetOptions{}) + assert.NilError(t, err) + checkOwnerReferences(t, c.ObjectMeta, "test", v1beta2.SchemeGroupVersion.String()) + s, err := secrets.Get("test", metav1.GetOptions{}) + assert.NilError(t, err) + checkOwnerReferences(t, s.ObjectMeta, "test", v1beta2.SchemeGroupVersion.String()) +} + +func TestCreateChildResourcesV1Alpha3(t *testing.T) { + k8sclientSet := fake.NewSimpleClientset() + stack := testStack() + configs := k8sclientSet.CoreV1().ConfigMaps("test") + secrets := k8sclientSet.CoreV1().Secrets("test") + assert.NilError(t, createResources( + stack, + &stackV1Alpha3{stacks: &fakeV1alpha3Client{}}, + configs, + secrets)) + c, err := configs.Get("test", metav1.GetOptions{}) + assert.NilError(t, err) + checkOwnerReferences(t, c.ObjectMeta, "test", v1alpha3.SchemeGroupVersion.String()) + s, err := secrets.Get("test", metav1.GetOptions{}) + assert.NilError(t, err) + checkOwnerReferences(t, s.ObjectMeta, "test", v1alpha3.SchemeGroupVersion.String()) +} + +func TestCreateChildResourcesWithStackCreationErrorV1Beta1(t *testing.T) { + k8sclientSet := fake.NewSimpleClientset() + stack := testStack() + configs := k8sclientSet.CoreV1().ConfigMaps("test") + secrets := k8sclientSet.CoreV1().Secrets("test") + err := createResources( + stack, + &stackV1Beta1{stacks: &fakeV1beta1Client{errorOnCreate: true}}, + configs, + secrets) + assert.Error(t, err, "some error") + _, err = configs.Get("test", metav1.GetOptions{}) + assert.Check(t, kerrors.IsNotFound(err)) + _, err = secrets.Get("test", metav1.GetOptions{}) + assert.Check(t, kerrors.IsNotFound(err)) +} + +func TestCreateChildResourcesWithStackCreationErrorV1Beta2(t *testing.T) { + k8sclientSet := fake.NewSimpleClientset() + stack := testStack() + configs := k8sclientSet.CoreV1().ConfigMaps("test") + secrets := k8sclientSet.CoreV1().Secrets("test") + err := createResources( + stack, + &stackV1Beta2{stacks: &fakeV1beta2Client{errorOnCreate: true}}, + configs, + secrets) + assert.Error(t, err, "some error") + _, err = configs.Get("test", metav1.GetOptions{}) + assert.Check(t, kerrors.IsNotFound(err)) + _, err = secrets.Get("test", metav1.GetOptions{}) + assert.Check(t, kerrors.IsNotFound(err)) +} + +func TestCreateChildResourcesWithStackCreationErrorV1Alpha3(t *testing.T) { + k8sclientSet := fake.NewSimpleClientset() + stack := testStack() + configs := k8sclientSet.CoreV1().ConfigMaps("test") + secrets := k8sclientSet.CoreV1().Secrets("test") + err := createResources( + stack, + &stackV1Alpha3{stacks: &fakeV1alpha3Client{errorOnCreate: true}}, + configs, + secrets) + assert.Error(t, err, "some error") + _, err = configs.Get("test", metav1.GetOptions{}) + assert.Check(t, kerrors.IsNotFound(err)) + _, err = secrets.Get("test", metav1.GetOptions{}) + assert.Check(t, kerrors.IsNotFound(err)) +} + +type fakeV1beta1Client struct { + errorOnCreate bool +} + +func (c *fakeV1beta1Client) Create(s *v1beta1.Stack) (*v1beta1.Stack, error) { + if c.errorOnCreate { + return nil, errors.New("some error") + } + return s, nil +} + +func (c *fakeV1beta1Client) Update(*v1beta1.Stack) (*v1beta1.Stack, error) { + return nil, nil +} + +func (c *fakeV1beta1Client) UpdateStatus(*v1beta1.Stack) (*v1beta1.Stack, error) { + return nil, nil +} + +func (c *fakeV1beta1Client) Delete(name string, options *metav1.DeleteOptions) error { + return nil +} + +func (c *fakeV1beta1Client) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return nil +} + +func (c *fakeV1beta1Client) Get(name string, options metav1.GetOptions) (*v1beta1.Stack, error) { + return nil, kerrors.NewNotFound(v1beta1.SchemeGroupVersion.WithResource("stacks").GroupResource(), name) +} + +func (c *fakeV1beta1Client) List(opts metav1.ListOptions) (*v1beta1.StackList, error) { + return nil, nil +} + +func (c *fakeV1beta1Client) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func (c *fakeV1beta1Client) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.Stack, error) { + return nil, nil +} + +func (c *fakeV1beta1Client) WithSkipValidation() composev1beta1.StackInterface { + return c +} + +type fakeV1beta2Client struct { + errorOnCreate bool +} + +func (c *fakeV1beta2Client) Create(s *v1beta2.Stack) (*v1beta2.Stack, error) { + if c.errorOnCreate { + return nil, errors.New("some error") + } + return s, nil +} + +func (c *fakeV1beta2Client) Update(*v1beta2.Stack) (*v1beta2.Stack, error) { + return nil, nil +} + +func (c *fakeV1beta2Client) UpdateStatus(*v1beta2.Stack) (*v1beta2.Stack, error) { + return nil, nil +} + +func (c *fakeV1beta2Client) Delete(name string, options *metav1.DeleteOptions) error { + return nil +} + +func (c *fakeV1beta2Client) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return nil +} + +func (c *fakeV1beta2Client) Get(name string, options metav1.GetOptions) (*v1beta2.Stack, error) { + return nil, kerrors.NewNotFound(v1beta1.SchemeGroupVersion.WithResource("stacks").GroupResource(), name) +} + +func (c *fakeV1beta2Client) List(opts metav1.ListOptions) (*v1beta2.StackList, error) { + return nil, nil +} + +func (c *fakeV1beta2Client) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func (c *fakeV1beta2Client) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta2.Stack, error) { + return nil, nil +} + +func (c *fakeV1beta2Client) WithSkipValidation() composev1beta2.StackInterface { + return c +} + +type fakeV1alpha3Client struct { + errorOnCreate bool +} + +func (c *fakeV1alpha3Client) Create(s *v1alpha3.Stack) (*v1alpha3.Stack, error) { + if c.errorOnCreate { + return nil, errors.New("some error") + } + return s, nil +} + +func (c *fakeV1alpha3Client) Update(*v1alpha3.Stack) (*v1alpha3.Stack, error) { + return nil, nil +} + +func (c *fakeV1alpha3Client) UpdateStatus(*v1alpha3.Stack) (*v1alpha3.Stack, error) { + return nil, nil +} + +func (c *fakeV1alpha3Client) Delete(name string, options *metav1.DeleteOptions) error { + return nil +} + +func (c *fakeV1alpha3Client) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return nil +} + +func (c *fakeV1alpha3Client) Get(name string, options metav1.GetOptions) (*v1alpha3.Stack, error) { + return nil, kerrors.NewNotFound(v1beta1.SchemeGroupVersion.WithResource("stacks").GroupResource(), name) +} + +func (c *fakeV1alpha3Client) List(opts metav1.ListOptions) (*v1alpha3.StackList, error) { + return nil, nil +} + +func (c *fakeV1alpha3Client) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func (c *fakeV1alpha3Client) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1alpha3.Stack, error) { + return nil, nil +} + +func (c *fakeV1alpha3Client) WithSkipValidation() composev1alpha3.StackInterface { + return c +} diff --git a/cli/cli/command/stack/kubernetes/list.go b/cli/cli/command/stack/kubernetes/list.go new file mode 100644 index 00000000..9706f4be --- /dev/null +++ b/cli/cli/command/stack/kubernetes/list.go @@ -0,0 +1,136 @@ +package kubernetes + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/config/configfile" + "github.com/pkg/errors" + core_v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetStacks lists the kubernetes stacks +func GetStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + if opts.AllNamespaces || len(opts.Namespaces) == 0 { + if isAllNamespacesDisabled(kubeCli.ConfigFile().Kubernetes) { + opts.AllNamespaces = true + } + return getStacksWithAllNamespaces(kubeCli, opts) + } + return getStacksWithNamespaces(kubeCli, opts, removeDuplicates(opts.Namespaces)) +} + +func isAllNamespacesDisabled(kubeCliConfig *configfile.KubernetesConfig) bool { + return kubeCliConfig == nil || kubeCliConfig != nil && kubeCliConfig.AllNamespaces != "disabled" +} + +func getStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + composeClient, err := kubeCli.composeClient() + if err != nil { + return nil, err + } + stackSvc, err := composeClient.Stacks(opts.AllNamespaces) + if err != nil { + return nil, err + } + stacks, err := stackSvc.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + var formattedStacks []*formatter.Stack + for _, stack := range stacks { + formattedStacks = append(formattedStacks, &formatter.Stack{ + Name: stack.Name, + Services: len(stack.getServices()), + Orchestrator: "Kubernetes", + Namespace: stack.Namespace, + }) + } + return formattedStacks, nil +} + +func getStacksWithAllNamespaces(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) { + stacks, err := getStacks(kubeCli, opts) + if !apierrs.IsForbidden(err) { + return stacks, err + } + namespaces, err2 := getUserVisibleNamespaces(*kubeCli) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to query user visible namespaces") + } + if namespaces == nil { + // UCP API not present, fall back to Kubernetes error + return nil, err + } + opts.AllNamespaces = false + return getStacksWithNamespaces(kubeCli, opts, namespaces) +} + +func getUserVisibleNamespaces(dockerCli command.Cli) ([]string, error) { + host := dockerCli.Client().DaemonHost() + endpoint, err := url.Parse(host) + if err != nil { + return nil, err + } + endpoint.Scheme = "https" + endpoint.Path = "/kubernetesNamespaces" + resp, err := dockerCli.Client().HTTPClient().Get(endpoint.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrapf(err, "received %d status and unable to read response", resp.StatusCode) + } + switch resp.StatusCode { + case http.StatusOK: + nms := &core_v1.NamespaceList{} + if err := json.Unmarshal(body, nms); err != nil { + return nil, errors.Wrapf(err, "unmarshal failed: %s", string(body)) + } + namespaces := make([]string, len(nms.Items)) + for i, namespace := range nms.Items { + namespaces[i] = namespace.Name + } + return namespaces, nil + case http.StatusNotFound: + // UCP API not present + return nil, nil + default: + return nil, fmt.Errorf("received %d status while retrieving namespaces: %s", resp.StatusCode, string(body)) + } +} + +func getStacksWithNamespaces(kubeCli *KubeCli, opts options.List, namespaces []string) ([]*formatter.Stack, error) { + stacks := []*formatter.Stack{} + for _, namespace := range namespaces { + kubeCli.kubeNamespace = namespace + ss, err := getStacks(kubeCli, opts) + if err != nil { + return nil, err + } + stacks = append(stacks, ss...) + } + return stacks, nil +} + +func removeDuplicates(namespaces []string) []string { + found := make(map[string]bool) + results := namespaces[:0] + for _, n := range namespaces { + if !found[n] { + results = append(results, n) + found[n] = true + } + } + return results +} diff --git a/cli/cli/command/stack/kubernetes/ps.go b/cli/cli/command/stack/kubernetes/ps.go new file mode 100644 index 00000000..c8a0600d --- /dev/null +++ b/cli/cli/command/stack/kubernetes/ps.go @@ -0,0 +1,112 @@ +package kubernetes + +import ( + "fmt" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/task" + "github.com/docker/docker/api/types/swarm" + apiv1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +var supportedPSFilters = map[string]bool{ + "name": true, + "service": true, + "node": true, +} + +// RunPS is the kubernetes implementation of docker stack ps +func RunPS(dockerCli *KubeCli, options options.PS) error { + filters := options.Filter.Value() + if err := filters.Validate(supportedPSFilters); err != nil { + return err + } + client, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := client.Stacks(false) + if err != nil { + return err + } + stackName := options.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) + } + if err != nil { + return err + } + pods, err := fetchPods(stackName, client.Pods(), filters) + if err != nil { + return err + } + if len(pods) == 0 { + return fmt.Errorf("nothing found in stack: %s", stackName) + } + return printTasks(dockerCli, options, stackName, client, pods) +} + +func printTasks(dockerCli command.Cli, options options.PS, namespace string, client corev1.NodesGetter, pods []apiv1.Pod) error { + format := options.Format + if format == "" { + format = task.DefaultFormat(dockerCli.ConfigFile(), options.Quiet) + } + + tasks := make([]swarm.Task, len(pods)) + for i, pod := range pods { + tasks[i] = podToTask(pod) + } + sort.Stable(tasksBySlot(tasks)) + + names := map[string]string{} + nodes := map[string]string{} + + n, err := listNodes(client, options.NoResolve) + if err != nil { + return err + } + for i, task := range tasks { + nodeValue, err := resolveNode(pods[i].Spec.NodeName, n, options.NoResolve) + if err != nil { + return err + } + names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name) + nodes[task.ID] = nodeValue + } + + tasksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: task.NewTaskFormat(format, options.Quiet), + Trunc: !options.NoTrunc, + } + + return task.FormatWrite(tasksCtx, tasks, names, nodes) +} + +func resolveNode(name string, nodes *apiv1.NodeList, noResolve bool) (string, error) { + // Here we have a name and we need to resolve its identifier. To mimic swarm behavior + // we need to resolve to the id when noResolve is set, otherwise we return the name. + if noResolve { + for _, node := range nodes.Items { + if node.Name == name { + return string(node.UID), nil + } + } + return "", fmt.Errorf("could not find node '%s'", name) + } + return name, nil +} + +func listNodes(client corev1.NodesGetter, noResolve bool) (*apiv1.NodeList, error) { + if noResolve { + return client.Nodes().List(metav1.ListOptions{}) + } + return nil, nil +} diff --git a/cli/cli/command/stack/kubernetes/remove.go b/cli/cli/command/stack/kubernetes/remove.go new file mode 100644 index 00000000..311c7597 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/remove.go @@ -0,0 +1,27 @@ +package kubernetes + +import ( + "fmt" + + "github.com/docker/cli/cli/command/stack/options" + "github.com/pkg/errors" +) + +// RunRemove is the kubernetes implementation of docker stack remove +func RunRemove(dockerCli *KubeCli, opts options.Remove) error { + composeClient, err := dockerCli.composeClient() + if err != nil { + return err + } + stacks, err := composeClient.Stacks(false) + if err != nil { + return err + } + for _, stack := range opts.Namespaces { + fmt.Fprintf(dockerCli.Out(), "Removing stack: %s\n", stack) + if err := stacks.Delete(stack); err != nil { + return errors.Wrapf(err, "Failed to remove stack %s", stack) + } + } + return nil +} diff --git a/cli/cli/command/stack/kubernetes/services.go b/cli/cli/command/stack/kubernetes/services.go new file mode 100644 index 00000000..8f91b7e2 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/services.go @@ -0,0 +1,159 @@ +package kubernetes + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli/command/service" + "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/compose-on-kubernetes/api/labels" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var supportedServicesFilters = map[string]bool{ + "mode": true, + "name": true, + "label": true, +} + +func generateSelector(labels map[string][]string) []string { + var result []string + for k, v := range labels { + for _, val := range v { + result = append(result, fmt.Sprintf("%s=%s", k, val)) + } + if len(v) == 0 { + result = append(result, k) + } + } + return result +} + +func parseLabelFilters(rawFilters []string) map[string][]string { + labels := map[string][]string{} + for _, rawLabel := range rawFilters { + v := strings.SplitN(rawLabel, "=", 2) + key := v[0] + if len(v) > 1 { + labels[key] = append(labels[key], v[1]) + } else if _, ok := labels[key]; !ok { + labels[key] = []string{} + } + } + return labels +} + +func generateLabelSelector(f filters.Args, stackName string) string { + selectors := append(generateSelector(parseLabelFilters(f.Get("label"))), labels.SelectorForStack(stackName)) + return strings.Join(selectors, ",") +} + +func getResourcesForServiceList(dockerCli *KubeCli, filters filters.Args, labelSelector string) (*appsv1beta2.ReplicaSetList, *appsv1beta2.DaemonSetList, *corev1.ServiceList, error) { + client, err := dockerCli.composeClient() + if err != nil { + return nil, nil, nil, err + } + modes := filters.Get("mode") + replicas := &appsv1beta2.ReplicaSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "replicated") { + if replicas, err = client.ReplicaSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + daemons := &appsv1beta2.DaemonSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "global") { + if daemons, err = client.DaemonSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + services, err := client.Services().List(metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return nil, nil, nil, err + } + return replicas, daemons, services, nil +} + +// RunServices is the kubernetes implementation of docker stack services +func RunServices(dockerCli *KubeCli, opts options.Services) error { + filters := opts.Filter.Value() + if err := filters.Validate(supportedServicesFilters); err != nil { + return err + } + client, err := dockerCli.composeClient() + if err != nil { + return nil + } + stacks, err := client.Stacks(false) + if err != nil { + return nil + } + stackName := opts.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) + } + if err != nil { + return err + } + + labelSelector := generateLabelSelector(filters, stackName) + replicasList, daemonsList, servicesList, err := getResourcesForServiceList(dockerCli, filters, labelSelector) + if err != nil { + return err + } + + // Convert Replicas sets and kubernetes services to swarm services and formatter information + services, info, err := convertToServices(replicasList, daemonsList, servicesList) + if err != nil { + return err + } + services = filterServicesByName(services, filters.Get("name"), stackName) + + if opts.Quiet { + info = map[string]service.ListInfo{} + } + + format := opts.Format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.Quiet { + format = dockerCli.ConfigFile().ServicesFormat + } else { + format = formatter.TableFormatKey + } + } + + servicesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: service.NewListFormat(format, opts.Quiet), + } + return service.ListFormatWrite(servicesCtx, services, info) +} + +func filterServicesByName(services []swarm.Service, names []string, stackName string) []swarm.Service { + if len(names) == 0 { + return services + } + prefix := stackName + "_" + // Accepts unprefixed service name (for compatibility with existing swarm scripts where service names are prefixed by stack names) + for i, n := range names { + if !strings.HasPrefix(n, prefix) { + names[i] = stackName + "_" + n + } + } + // Filter services + result := []swarm.Service{} + for _, s := range services { + for _, n := range names { + if strings.HasPrefix(s.Spec.Name, n) { + result = append(result, s) + } + } + } + return result +} diff --git a/cli/cli/command/stack/kubernetes/services_test.go b/cli/cli/command/stack/kubernetes/services_test.go new file mode 100644 index 00000000..5603eeda --- /dev/null +++ b/cli/cli/command/stack/kubernetes/services_test.go @@ -0,0 +1,138 @@ +package kubernetes + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + "gotest.tools/assert/cmp" +) + +func TestServiceFiltersLabelSelectorGen(t *testing.T) { + cases := []struct { + name string + stackName string + filters filters.Args + expectedSelectorParts []string + }{ + { + name: "no-filter", + stackName: "test", + filters: filters.NewArgs(), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + }, + }, + { + name: "label present filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label-is-present"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label-is-present", + }, + }, + { + name: "single value label filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + }, + }, + { + name: "multi value label filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + filters.KeyValuePair{Key: "label", Value: "label1=test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + "label1=test2", + }, + }, + { + name: "2 different labels filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + filters.KeyValuePair{Key: "label", Value: "label2=test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + "label2=test2", + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result := generateLabelSelector(c.filters, c.stackName) + for _, toFind := range c.expectedSelectorParts { + assert.Assert(t, cmp.Contains(result, toFind)) + } + }) + } +} +func TestServiceFiltersServiceByName(t *testing.T) { + cases := []struct { + name string + filters []string + services []swarm.Service + expectedServices []swarm.Service + }{ + { + name: "no filter", + filters: []string{}, + services: makeServices("s1", "s2"), + expectedServices: makeServices("s1", "s2"), + }, + { + name: "single-name filter", + filters: []string{"s1"}, + services: makeServices("s1", "s2"), + expectedServices: makeServices("s1"), + }, + { + name: "filter by prefix", + filters: []string{"prefix"}, + services: makeServices("prefix-s1", "prefix-s2", "s2"), + expectedServices: makeServices("prefix-s1", "prefix-s2"), + }, + { + name: "multi-name filter", + filters: []string{"s1", "s2"}, + services: makeServices("s1", "s2", "s3"), + expectedServices: makeServices("s1", "s2"), + }, + { + name: "stack name prefix is valid", + filters: []string{"stack_s1"}, + services: makeServices("s1", "s11", "s2"), + expectedServices: makeServices("s1", "s11"), + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result := filterServicesByName(c.services, c.filters, "stack") + assert.DeepEqual(t, c.expectedServices, result) + }) + } +} + +func makeServices(names ...string) []swarm.Service { + result := make([]swarm.Service, len(names)) + for i, n := range names { + result[i] = swarm.Service{Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "stack_" + n}}} + } + return result +} diff --git a/cli/cli/command/stack/kubernetes/stack.go b/cli/cli/command/stack/kubernetes/stack.go new file mode 100644 index 00000000..e368d718 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/stack.go @@ -0,0 +1,161 @@ +package kubernetes + +import ( + "io/ioutil" + "path/filepath" + "sort" + + latest "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3" + "github.com/docker/compose-on-kubernetes/api/labels" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Stack is the main type used by stack commands so they remain independent from kubernetes compose component version. +type Stack struct { + Name string + Namespace string + ComposeFile string + Spec *latest.StackSpec +} + +type childResource interface { + setOwner(metav1.OwnerReference) error + delete() // does not report error, as if a deletion failed, we want to continue deleting other child resources +} + +func deleteChildResources(childResources []childResource) { + for _, cr := range childResources { + cr.delete() + } +} + +func setChildResourcesOwner(childResources []childResource, owner metav1.OwnerReference) error { + for _, cr := range childResources { + if err := cr.setOwner(owner); err != nil { + return err + } + } + return nil +} + +// getServices returns all the stack service names, sorted lexicographically +func (s *Stack) getServices() []string { + services := make([]string, len(s.Spec.Services)) + for i, service := range s.Spec.Services { + services[i] = service.Name + } + sort.Strings(services) + return services +} + +// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config. +func (s *Stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) ([]childResource, error) { + var resources []childResource + for name, config := range s.Spec.Configs { + if config.File == "" { + continue + } + + fileName := filepath.Base(config.File) + content, err := ioutil.ReadFile(config.File) + if err != nil { + return resources, err + } + + configMap, err := configMaps.Create(toConfigMap(s.Name, name, fileName, content)) + if err != nil { + return resources, err + } + resources = append(resources, &configMapChildResource{client: configMaps, configMap: configMap}) + } + return resources, nil +} + +type configMapChildResource struct { + client corev1.ConfigMapInterface + configMap *apiv1.ConfigMap +} + +func (r *configMapChildResource) setOwner(ref metav1.OwnerReference) error { + r.configMap.OwnerReferences = append(r.configMap.OwnerReferences, ref) + _, err := r.client.Update(r.configMap) + return err +} + +func (r *configMapChildResource) delete() { + r.client.Delete(r.configMap.Name, nil) +} + +// toConfigMap converts a Compose Config to a Kube ConfigMap. +func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap { + return &apiv1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + labels.ForStackName: stackName, + }, + }, + Data: map[string]string{ + key: string(content), + }, + } +} + +// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret. +func (s *Stack) createFileBasedSecrets(secrets corev1.SecretInterface) ([]childResource, error) { + var resources []childResource + for name, secret := range s.Spec.Secrets { + if secret.File == "" { + continue + } + + fileName := filepath.Base(secret.File) + content, err := ioutil.ReadFile(secret.File) + if err != nil { + return resources, err + } + + secret, err := secrets.Create(toSecret(s.Name, name, fileName, content)) + if err != nil { + return resources, err + } + resources = append(resources, &secretChildResource{client: secrets, secret: secret}) + } + return resources, nil +} + +type secretChildResource struct { + client corev1.SecretInterface + secret *apiv1.Secret +} + +func (r *secretChildResource) setOwner(ref metav1.OwnerReference) error { + r.secret.OwnerReferences = append(r.secret.OwnerReferences, ref) + _, err := r.client.Update(r.secret) + return err +} + +func (r *secretChildResource) delete() { + r.client.Delete(r.secret.Name, nil) +} + +// toSecret converts a Compose Secret to a Kube Secret. +func toSecret(stackName, name, key string, content []byte) *apiv1.Secret { + return &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + labels.ForStackName: stackName, + }, + }, + Data: map[string][]byte{ + key: content, + }, + } +} diff --git a/cli/cli/command/stack/kubernetes/stackclient.go b/cli/cli/command/stack/kubernetes/stackclient.go new file mode 100644 index 00000000..5ce4480e --- /dev/null +++ b/cli/cli/command/stack/kubernetes/stackclient.go @@ -0,0 +1,274 @@ +package kubernetes + +import ( + "fmt" + + composev1alpha3 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1alpha3" + composev1beta1 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1" + composev1beta2 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2" + "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta1" + "github.com/docker/compose-on-kubernetes/api/compose/v1beta2" + "github.com/docker/compose-on-kubernetes/api/labels" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" +) + +// StackClient talks to a kubernetes compose component. +type StackClient interface { + StackConverter + CreateOrUpdate(s Stack, childResources []childResource) error + Delete(name string) error + Get(name string) (Stack, error) + List(opts metav1.ListOptions) ([]Stack, error) + IsColliding(servicesClient corev1.ServiceInterface, s Stack) error +} + +// stackV1Beta1 implements stackClient interface and talks to compose component v1beta1. +type stackV1Beta1 struct { + stackV1Beta1Converter + stacks composev1beta1.StackInterface +} + +func newStackV1Beta1(config *rest.Config, namespace string) (*stackV1Beta1, error) { + client, err := composev1beta1.NewForConfig(config) + if err != nil { + return nil, err + } + return &stackV1Beta1{stacks: client.Stacks(namespace)}, nil +} + +func (s *stackV1Beta1) CreateOrUpdate(internalStack Stack, childResources []childResource) error { + // If it already exists, update the stack + var ( + stack *v1beta1.Stack + err error + ) + if stack, err = s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil { + stack.Spec.ComposeFile = internalStack.ComposeFile + stack, err = s.stacks.Update(stack) + } else { + // Or create it + stack, err = s.stacks.Create(stackToV1beta1(internalStack)) + } + if err != nil { + deleteChildResources(childResources) + return err + } + blockOwnerDeletion := true + isController := true + return setChildResourcesOwner(childResources, metav1.OwnerReference{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Kind: "Stack", + Name: stack.Name, + UID: stack.UID, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + }) +} + +func (s *stackV1Beta1) Delete(name string) error { + return s.stacks.Delete(name, &metav1.DeleteOptions{}) +} + +func (s *stackV1Beta1) Get(name string) (Stack, error) { + stackBeta1, err := s.stacks.Get(name, metav1.GetOptions{}) + if err != nil { + return Stack{}, err + } + return stackFromV1beta1(stackBeta1) +} + +func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]Stack, error) { + list, err := s.stacks.List(opts) + if err != nil { + return nil, err + } + stacks := make([]Stack, len(list.Items)) + for i := range list.Items { + stack, err := stackFromV1beta1(&list.Items[i]) + if err != nil { + return nil, err + } + stacks[i] = stack + } + return stacks, nil +} + +// IsColliding verifies that services defined in the stack collides with already deployed services +func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error { + for _, srv := range st.getServices() { + if err := verify(servicesClient, st.Name, srv); err != nil { + return err + } + } + return nil +} + +// verify checks whether the service is already present in kubernetes. +// If we find the service by name but it doesn't have our label or it has a different value +// than the stack name for the label, we fail (i.e. it will collide) +func verify(services corev1.ServiceInterface, stackName string, service string) error { + svc, err := services.Get(service, metav1.GetOptions{}) + if err == nil { + if key, ok := svc.ObjectMeta.Labels[labels.ForStackName]; ok { + if key != stackName { + return fmt.Errorf("service %s already present in stack named %s", service, key) + } + return nil + } + return fmt.Errorf("service %s already present in the cluster", service) + } + return nil +} + +// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2. +type stackV1Beta2 struct { + stackV1Beta2Converter + stacks composev1beta2.StackInterface +} + +func newStackV1Beta2(config *rest.Config, namespace string) (*stackV1Beta2, error) { + client, err := composev1beta2.NewForConfig(config) + if err != nil { + return nil, err + } + return &stackV1Beta2{stacks: client.Stacks(namespace)}, nil +} + +func (s *stackV1Beta2) CreateOrUpdate(internalStack Stack, childResources []childResource) error { + var ( + stack *v1beta2.Stack + err error + ) + resolved, err := stackToV1beta2(internalStack) + if err != nil { + deleteChildResources(childResources) + return err + } + if stack, err = s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil { + stack.Spec = resolved.Spec + stack, err = s.stacks.Update(stack) + } else { + // Or create it + stack, err = s.stacks.Create(resolved) + } + if err != nil { + deleteChildResources(childResources) + return err + } + blockOwnerDeletion := true + isController := true + return setChildResourcesOwner(childResources, metav1.OwnerReference{ + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: "Stack", + Name: stack.Name, + UID: stack.UID, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + }) +} + +func (s *stackV1Beta2) Delete(name string) error { + return s.stacks.Delete(name, &metav1.DeleteOptions{}) +} + +func (s *stackV1Beta2) Get(name string) (Stack, error) { + stackBeta2, err := s.stacks.Get(name, metav1.GetOptions{}) + if err != nil { + return Stack{}, err + } + return stackFromV1beta2(stackBeta2) +} + +func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]Stack, error) { + list, err := s.stacks.List(opts) + if err != nil { + return nil, err + } + stacks := make([]Stack, len(list.Items)) + for i := range list.Items { + if stacks[i], err = stackFromV1beta2(&list.Items[i]); err != nil { + return nil, err + } + } + return stacks, nil +} + +// IsColliding is handle server side with the compose api v1beta2, so nothing to do here +func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error { + return nil +} + +// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2. +type stackV1Alpha3 struct { + stackV1Alpha3Converter + stacks composev1alpha3.StackInterface +} + +func newStackV1Alpha3(config *rest.Config, namespace string) (*stackV1Alpha3, error) { + client, err := composev1alpha3.NewForConfig(config) + if err != nil { + return nil, err + } + return &stackV1Alpha3{stacks: client.Stacks(namespace)}, nil +} + +func (s *stackV1Alpha3) CreateOrUpdate(internalStack Stack, childResources []childResource) error { + var ( + stack *v1alpha3.Stack + err error + ) + resolved := stackToV1alpha3(internalStack) + if stack, err = s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil { + stack.Spec = resolved.Spec + stack, err = s.stacks.Update(stack) + } else { + // Or create it + stack, err = s.stacks.Create(resolved) + } + if err != nil { + deleteChildResources(childResources) + return err + } + blockOwnerDeletion := true + isController := true + return setChildResourcesOwner(childResources, metav1.OwnerReference{ + APIVersion: v1alpha3.SchemeGroupVersion.String(), + Kind: "Stack", + Name: stack.Name, + UID: stack.UID, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + }) +} + +func (s *stackV1Alpha3) Delete(name string) error { + return s.stacks.Delete(name, &metav1.DeleteOptions{}) +} + +func (s *stackV1Alpha3) Get(name string) (Stack, error) { + stackAlpha3, err := s.stacks.Get(name, metav1.GetOptions{}) + if err != nil { + return Stack{}, err + } + return stackFromV1alpha3(stackAlpha3), nil +} + +func (s *stackV1Alpha3) List(opts metav1.ListOptions) ([]Stack, error) { + list, err := s.stacks.List(opts) + if err != nil { + return nil, err + } + stacks := make([]Stack, len(list.Items)) + for i := range list.Items { + stacks[i] = stackFromV1alpha3(&list.Items[i]) + } + return stacks, nil +} + +// IsColliding is handle server side with the compose api v1beta2, so nothing to do here +func (s *stackV1Alpha3) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error { + return nil +} diff --git a/cli/cli/command/stack/kubernetes/stackclient_test.go b/cli/cli/command/stack/kubernetes/stackclient_test.go new file mode 100644 index 00000000..507a5898 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/stackclient_test.go @@ -0,0 +1,60 @@ +package kubernetes + +import ( + "io/ioutil" + "testing" + + composetypes "github.com/docker/cli/cli/compose/types" + "gotest.tools/assert" +) + +func TestFromCompose(t *testing.T) { + stackClient := &stackV1Beta1{} + s, err := stackClient.FromCompose(ioutil.Discard, "foo", &composetypes.Config{ + Version: "3.1", + Filename: "banana", + Services: []composetypes.ServiceConfig{ + { + Name: "foo", + Image: "foo", + }, + { + Name: "bar", + Image: "bar", + }, + }, + }) + assert.NilError(t, err) + assert.Equal(t, "foo", s.Name) + assert.Equal(t, string(`version: "3.5" +services: + bar: + image: bar + foo: + image: foo +`), s.ComposeFile) +} + +func TestFromComposeUnsupportedVersion(t *testing.T) { + stackClient := &stackV1Beta1{} + _, err := stackClient.FromCompose(ioutil.Discard, "foo", &composetypes.Config{ + Version: "3.6", + Filename: "banana", + Services: []composetypes.ServiceConfig{ + { + Name: "foo", + Image: "foo", + Volumes: []composetypes.ServiceVolumeConfig{ + { + Type: "tmpfs", + Target: "/app", + Tmpfs: &composetypes.ServiceVolumeTmpfs{ + Size: 10000, + }, + }, + }, + }, + }, + }) + assert.ErrorContains(t, err, "the compose yaml file is invalid with v3.5: services.foo.volumes.0 Additional property tmpfs is not allowed") +} diff --git a/cli/cli/command/stack/kubernetes/testdata/compose-with-expose.yml b/cli/cli/command/stack/kubernetes/testdata/compose-with-expose.yml new file mode 100644 index 00000000..4d0b6f7e --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/compose-with-expose.yml @@ -0,0 +1,9 @@ +version: "3.7" +services: + test: + image: "some-image" + expose: + - "1" # default protocol, single port + - "2-4" # default protocol, port range + - "5/udp" # specific protocol, single port + - "6-8/udp" # specific protocol, port range \ No newline at end of file diff --git a/cli/cli/command/stack/kubernetes/testdata/compose-with-pull-policy.yml b/cli/cli/command/stack/kubernetes/testdata/compose-with-pull-policy.yml new file mode 100644 index 00000000..471988f5 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/compose-with-pull-policy.yml @@ -0,0 +1,6 @@ +version: "3.7" +services: + test: + image: "some-image" + x-kubernetes: + pull_policy: "Never" \ No newline at end of file diff --git a/cli/cli/command/stack/kubernetes/testdata/compose-with-pull-secret.yml b/cli/cli/command/stack/kubernetes/testdata/compose-with-pull-secret.yml new file mode 100644 index 00000000..8c265378 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/compose-with-pull-secret.yml @@ -0,0 +1,6 @@ +version: "3.7" +services: + test: + image: "some-private-image" + x-kubernetes: + pull_secret: "some-secret" \ No newline at end of file diff --git a/cli/cli/command/stack/kubernetes/testdata/config b/cli/cli/command/stack/kubernetes/testdata/config new file mode 100644 index 00000000..6ce433f2 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/config @@ -0,0 +1 @@ +this is a config \ No newline at end of file diff --git a/cli/cli/command/stack/kubernetes/testdata/secret b/cli/cli/command/stack/kubernetes/testdata/secret new file mode 100644 index 00000000..51ed8039 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/secret @@ -0,0 +1 @@ +this is a secret \ No newline at end of file diff --git a/cli/cli/command/stack/kubernetes/testdata/warnings.golden b/cli/cli/command/stack/kubernetes/testdata/warnings.golden new file mode 100644 index 00000000..1ee14b0f --- /dev/null +++ b/cli/cli/command/stack/kubernetes/testdata/warnings.golden @@ -0,0 +1,31 @@ +top-level network "global" is ignored +service "front": network "private" is ignored +service "front": update_config.delay is not supported +service "front": update_config.failure_action is not supported +service "front": update_config.monitor is not supported +service "front": update_config.max_failure_ratio is not supported +service "front": restart_policy.delay is ignored +service "front": restart_policy.max_attempts is ignored +service "front": restart_policy.window is ignored +service "front": container_name is deprecated +service "front": expose is deprecated +service "front": build is ignored +service "front": cgroup_parent is ignored +service "front": devices are ignored +service "front": domainname is ignored +service "front": external_links are ignored +service "front": links are ignored +service "front": mac_address is ignored +service "front": network_mode is ignored +service "front": restart is ignored +service "front": security_opt are ignored +service "front": ulimits are ignored +service "front": depends_on are ignored +service "front": credential_spec is ignored +service "front": dns are ignored +service "front": dns_search are ignored +service "front": env_file are ignored +service "front": stop_signal is ignored +service "front": logging is ignored +service "front": volume.propagation is ignored +service "front": volume.nocopy is ignored diff --git a/cli/cli/command/stack/kubernetes/warnings.go b/cli/cli/command/stack/kubernetes/warnings.go new file mode 100644 index 00000000..eb4598db --- /dev/null +++ b/cli/cli/command/stack/kubernetes/warnings.go @@ -0,0 +1,145 @@ +package kubernetes + +import ( + "fmt" + "io" + + composetypes "github.com/docker/cli/cli/compose/types" +) + +func warnUnsupportedFeatures(stderr io.Writer, cfg *composetypes.Config) { + warnForGlobalNetworks(stderr, cfg) + for _, s := range cfg.Services { + warnForServiceNetworks(stderr, s) + warnForUnsupportedDeploymentStrategy(stderr, s) + warnForUnsupportedRestartPolicy(stderr, s) + warnForDeprecatedProperties(stderr, s) + warnForUnsupportedProperties(stderr, s) + } +} + +func warnForGlobalNetworks(stderr io.Writer, config *composetypes.Config) { + for network := range config.Networks { + fmt.Fprintf(stderr, "top-level network %q is ignored\n", network) + } +} + +func warnServicef(stderr io.Writer, service, format string, args ...interface{}) { + fmt.Fprintf(stderr, "service \"%s\": %s\n", service, fmt.Sprintf(format, args...)) +} + +func warnForServiceNetworks(stderr io.Writer, s composetypes.ServiceConfig) { + for network := range s.Networks { + warnServicef(stderr, s.Name, "network %q is ignored", network) + } +} + +func warnForDeprecatedProperties(stderr io.Writer, s composetypes.ServiceConfig) { + if s.ContainerName != "" { + warnServicef(stderr, s.Name, "container_name is deprecated") + } + if len(s.Expose) > 0 { + warnServicef(stderr, s.Name, "expose is deprecated") + } +} + +func warnForUnsupportedDeploymentStrategy(stderr io.Writer, s composetypes.ServiceConfig) { + config := s.Deploy.UpdateConfig + if config == nil { + return + } + if config.Delay != 0 { + warnServicef(stderr, s.Name, "update_config.delay is not supported") + } + if config.FailureAction != "" { + warnServicef(stderr, s.Name, "update_config.failure_action is not supported") + } + if config.Monitor != 0 { + warnServicef(stderr, s.Name, "update_config.monitor is not supported") + } + if config.MaxFailureRatio != 0 { + warnServicef(stderr, s.Name, "update_config.max_failure_ratio is not supported") + } +} + +func warnForUnsupportedRestartPolicy(stderr io.Writer, s composetypes.ServiceConfig) { + policy := s.Deploy.RestartPolicy + if policy == nil { + return + } + + if policy.Delay != nil { + warnServicef(stderr, s.Name, "restart_policy.delay is ignored") + } + if policy.MaxAttempts != nil { + warnServicef(stderr, s.Name, "restart_policy.max_attempts is ignored") + } + if policy.Window != nil { + warnServicef(stderr, s.Name, "restart_policy.window is ignored") + } +} + +func warnForUnsupportedProperties(stderr io.Writer, s composetypes.ServiceConfig) { // nolint: gocyclo + if build := s.Build; build.Context != "" || build.Dockerfile != "" || len(build.Args) > 0 || len(build.Labels) > 0 || len(build.CacheFrom) > 0 || build.Network != "" || build.Target != "" { + warnServicef(stderr, s.Name, "build is ignored") + } + if s.CgroupParent != "" { + warnServicef(stderr, s.Name, "cgroup_parent is ignored") + } + if len(s.Devices) > 0 { + warnServicef(stderr, s.Name, "devices are ignored") + } + if s.DomainName != "" { + warnServicef(stderr, s.Name, "domainname is ignored") + } + if len(s.ExternalLinks) > 0 { + warnServicef(stderr, s.Name, "external_links are ignored") + } + if len(s.Links) > 0 { + warnServicef(stderr, s.Name, "links are ignored") + } + if s.MacAddress != "" { + warnServicef(stderr, s.Name, "mac_address is ignored") + } + if s.NetworkMode != "" { + warnServicef(stderr, s.Name, "network_mode is ignored") + } + if s.Restart != "" { + warnServicef(stderr, s.Name, "restart is ignored") + } + if len(s.SecurityOpt) > 0 { + warnServicef(stderr, s.Name, "security_opt are ignored") + } + if len(s.Ulimits) > 0 { + warnServicef(stderr, s.Name, "ulimits are ignored") + } + if len(s.DependsOn) > 0 { + warnServicef(stderr, s.Name, "depends_on are ignored") + } + if s.CredentialSpec.File != "" { + warnServicef(stderr, s.Name, "credential_spec is ignored") + } + if len(s.DNS) > 0 { + warnServicef(stderr, s.Name, "dns are ignored") + } + if len(s.DNSSearch) > 0 { + warnServicef(stderr, s.Name, "dns_search are ignored") + } + if len(s.EnvFile) > 0 { + warnServicef(stderr, s.Name, "env_file are ignored") + } + if s.StopSignal != "" { + warnServicef(stderr, s.Name, "stop_signal is ignored") + } + if s.Logging != nil { + warnServicef(stderr, s.Name, "logging is ignored") + } + for _, m := range s.Volumes { + if m.Volume != nil && m.Volume.NoCopy { + warnServicef(stderr, s.Name, "volume.nocopy is ignored") + } + if m.Bind != nil && m.Bind.Propagation != "" { + warnServicef(stderr, s.Name, "volume.propagation is ignored") + } + } +} diff --git a/cli/cli/command/stack/kubernetes/warnings_test.go b/cli/cli/command/stack/kubernetes/warnings_test.go new file mode 100644 index 00000000..111fa7bd --- /dev/null +++ b/cli/cli/command/stack/kubernetes/warnings_test.go @@ -0,0 +1,78 @@ +package kubernetes + +import ( + "bytes" + "testing" + "time" + + composetypes "github.com/docker/cli/cli/compose/types" + "gotest.tools/golden" +) + +func TestWarnings(t *testing.T) { + duration := composetypes.Duration(5 * time.Second) + attempts := uint64(3) + config := &composetypes.Config{ + Version: "3.4", + Services: []composetypes.ServiceConfig{ + { + Name: "front", + Build: composetypes.BuildConfig{ + Context: "ignored", + }, + ContainerName: "ignored", + CgroupParent: "ignored", + CredentialSpec: composetypes.CredentialSpecConfig{File: "ignored"}, + DependsOn: []string{"ignored"}, + Deploy: composetypes.DeployConfig{ + UpdateConfig: &composetypes.UpdateConfig{ + Delay: composetypes.Duration(5 * time.Second), + FailureAction: "rollback", + Monitor: composetypes.Duration(10 * time.Second), + MaxFailureRatio: 0.5, + }, + RestartPolicy: &composetypes.RestartPolicy{ + Delay: &duration, + MaxAttempts: &attempts, + Window: &duration, + }, + }, + Devices: []string{"ignored"}, + DNSSearch: []string{"ignored"}, + DNS: []string{"ignored"}, + DomainName: "ignored", + EnvFile: []string{"ignored"}, + Expose: []string{"80"}, + ExternalLinks: []string{"ignored"}, + Image: "dockerdemos/front", + Links: []string{"ignored"}, + Logging: &composetypes.LoggingConfig{Driver: "syslog"}, + MacAddress: "ignored", + Networks: map[string]*composetypes.ServiceNetworkConfig{"private": {}}, + NetworkMode: "ignored", + Restart: "ignored", + SecurityOpt: []string{"ignored"}, + StopSignal: "ignored", + Ulimits: map[string]*composetypes.UlimitsConfig{"nproc": {Hard: 65535}}, + User: "ignored", + Volumes: []composetypes.ServiceVolumeConfig{ + { + Type: "bind", + Bind: &composetypes.ServiceVolumeBind{Propagation: "ignored"}, + }, + { + Type: "volume", + Volume: &composetypes.ServiceVolumeVolume{NoCopy: true}, + }, + }, + }, + }, + Networks: map[string]composetypes.NetworkConfig{ + "global": {}, + }, + } + var buf bytes.Buffer + warnUnsupportedFeatures(&buf, config) + warnings := buf.String() + golden.Assert(t, warnings, "warnings.golden") +} diff --git a/cli/cli/command/stack/kubernetes/watcher.go b/cli/cli/command/stack/kubernetes/watcher.go new file mode 100644 index 00000000..5d801c37 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/watcher.go @@ -0,0 +1,254 @@ +package kubernetes + +import ( + "context" + "sync" + "time" + + apiv1beta1 "github.com/docker/compose-on-kubernetes/api/compose/v1beta1" + "github.com/docker/compose-on-kubernetes/api/labels" + "github.com/pkg/errors" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + runtimeutil "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + podutils "k8s.io/kubernetes/pkg/api/v1/pod" +) + +type stackListWatch interface { + List(opts metav1.ListOptions) (*apiv1beta1.StackList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) +} + +type podListWatch interface { + List(opts metav1.ListOptions) (*apiv1.PodList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) +} + +// DeployWatcher watches a stack deployement +type deployWatcher struct { + pods podListWatch + stacks stackListWatch +} + +// Watch watches a stuck deployement and return a chan that will holds the state of the stack +func (w *deployWatcher) Watch(name string, serviceNames []string, statusUpdates chan serviceStatus) error { + errC := make(chan error, 1) + defer close(errC) + + handlers := runtimeutil.ErrorHandlers + + // informer errors are reported using global error handlers + runtimeutil.ErrorHandlers = append(handlers, func(err error) { + errC <- err + }) + defer func() { + runtimeutil.ErrorHandlers = handlers + }() + + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + defer func() { + cancel() + wg.Wait() + }() + wg.Add(2) + go func() { + defer wg.Done() + w.watchStackStatus(ctx, name, errC) + }() + go func() { + defer wg.Done() + w.waitForPods(ctx, name, serviceNames, errC, statusUpdates) + }() + + return <-errC +} + +type stackWatcher struct { + resultChan chan error + stackName string +} + +var _ cache.ResourceEventHandler = &stackWatcher{} + +func (sw *stackWatcher) OnAdd(obj interface{}) { + stack, ok := obj.(*apiv1beta1.Stack) + switch { + case !ok: + sw.resultChan <- errors.Errorf("stack %s has incorrect type", sw.stackName) + case stack.Status.Phase == apiv1beta1.StackFailure: + sw.resultChan <- errors.Errorf("stack %s failed with status %s: %s", sw.stackName, stack.Status.Phase, stack.Status.Message) + } +} + +func (sw *stackWatcher) OnUpdate(oldObj, newObj interface{}) { + sw.OnAdd(newObj) +} + +func (sw *stackWatcher) OnDelete(obj interface{}) { +} + +func (w *deployWatcher) watchStackStatus(ctx context.Context, stackname string, e chan error) { + informer := newStackInformer(w.stacks, stackname) + sw := &stackWatcher{ + resultChan: e, + } + informer.AddEventHandler(sw) + informer.Run(ctx.Done()) +} + +type serviceStatus struct { + name string + podsPending int + podsRunning int + podsSucceeded int + podsFailed int + podsUnknown int + podsReady int + podsTotal int +} + +type podWatcher struct { + stackName string + services map[string]serviceStatus + resultChan chan error + starts map[string]int32 + indexer cache.Indexer + statusUpdates chan serviceStatus +} + +var _ cache.ResourceEventHandler = &podWatcher{} + +func (pw *podWatcher) handlePod(obj interface{}) { + pod, ok := obj.(*apiv1.Pod) + if !ok { + pw.resultChan <- errors.Errorf("Pod has incorrect type in stack %s", pw.stackName) + return + } + serviceName := pod.Labels[labels.ForServiceName] + pw.updateServiceStatus(serviceName) + if pw.allReady() { + select { + case pw.resultChan <- nil: + default: + // result has already been reported, just don't block + } + } +} + +func (pw *podWatcher) updateServiceStatus(serviceName string) { + pods, _ := pw.indexer.ByIndex("byservice", serviceName) + status := serviceStatus{name: serviceName} + for _, obj := range pods { + if pod, ok := obj.(*apiv1.Pod); ok { + switch pod.Status.Phase { + case apiv1.PodPending: + status.podsPending++ + case apiv1.PodRunning: + status.podsRunning++ + case apiv1.PodSucceeded: + status.podsSucceeded++ + case apiv1.PodFailed: + status.podsFailed++ + case apiv1.PodUnknown: + status.podsUnknown++ + } + if podutils.IsPodReady(pod) { + status.podsReady++ + } + } + } + status.podsTotal = len(pods) + oldStatus := pw.services[serviceName] + if oldStatus != status { + pw.statusUpdates <- status + } + pw.services[serviceName] = status +} + +func (pw *podWatcher) allReady() bool { + for _, status := range pw.services { + if status.podsReady == 0 { + return false + } + } + return true +} + +func (pw *podWatcher) OnAdd(obj interface{}) { + pw.handlePod(obj) +} + +func (pw *podWatcher) OnUpdate(oldObj, newObj interface{}) { + pw.handlePod(newObj) +} + +func (pw *podWatcher) OnDelete(obj interface{}) { + pw.handlePod(obj) +} + +func (w *deployWatcher) waitForPods(ctx context.Context, stackName string, serviceNames []string, e chan error, statusUpdates chan serviceStatus) { + informer := newPodInformer(w.pods, stackName, cache.Indexers{ + "byservice": func(obj interface{}) ([]string, error) { + pod, ok := obj.(*apiv1.Pod) + if !ok { + return nil, errors.Errorf("Pod has incorrect type in stack %s", stackName) + } + return []string{pod.Labels[labels.ForServiceName]}, nil + }}) + services := map[string]serviceStatus{} + for _, name := range serviceNames { + services[name] = serviceStatus{name: name} + } + pw := &podWatcher{ + stackName: stackName, + services: services, + resultChan: e, + starts: map[string]int32{}, + indexer: informer.GetIndexer(), + statusUpdates: statusUpdates, + } + informer.AddEventHandler(pw) + informer.Run(ctx.Done()) +} + +func newPodInformer(podsClient podListWatch, stackName string, indexers cache.Indexers) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + return podsClient.List(options) + }, + + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labels.SelectorForStack(stackName) + return podsClient.Watch(options) + }, + }, + &apiv1.Pod{}, + time.Second*5, + indexers, + ) +} + +func newStackInformer(stacksClient stackListWatch, stackName string) cache.SharedInformer { + return cache.NewSharedInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fields.OneTermEqualSelector("metadata.name", stackName).String() + return stacksClient.List(options) + }, + + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fields.OneTermEqualSelector("metadata.name", stackName).String() + return stacksClient.Watch(options) + }, + }, + &apiv1beta1.Stack{}, + time.Second*5, + ) +} diff --git a/cli/cli/command/stack/kubernetes/watcher_test.go b/cli/cli/command/stack/kubernetes/watcher_test.go new file mode 100644 index 00000000..0972a914 --- /dev/null +++ b/cli/cli/command/stack/kubernetes/watcher_test.go @@ -0,0 +1,218 @@ +package kubernetes + +import ( + "testing" + + apiv1beta1 "github.com/docker/compose-on-kubernetes/api/compose/v1beta1" + composelabels "github.com/docker/compose-on-kubernetes/api/labels" + "gotest.tools/assert" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + k8stesting "k8s.io/client-go/testing" +) + +var podsResource = apiv1.SchemeGroupVersion.WithResource("pods") +var podKind = apiv1.SchemeGroupVersion.WithKind("Pod") +var stacksResource = apiv1beta1.SchemeGroupVersion.WithResource("stacks") +var stackKind = apiv1beta1.SchemeGroupVersion.WithKind("Stack") + +type testPodAndStackRepository struct { + fake *k8stesting.Fake +} + +func (r *testPodAndStackRepository) stackListWatchForNamespace(ns string) *testStackListWatch { + return &testStackListWatch{fake: r.fake, ns: ns} +} +func (r *testPodAndStackRepository) podListWatchForNamespace(ns string) *testPodListWatch { + return &testPodListWatch{fake: r.fake, ns: ns} +} + +func newTestPodAndStackRepository(initialPods []apiv1.Pod, initialStacks []apiv1beta1.Stack, podWatchHandler, stackWatchHandler k8stesting.WatchReactionFunc) *testPodAndStackRepository { + var scheme = runtime.NewScheme() + var codecs = serializer.NewCodecFactory(scheme) + metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + apiv1.AddToScheme(scheme) + apiv1beta1.AddToScheme(scheme) + + o := k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range initialPods { + if err := o.Add(&obj); err != nil { + panic(err) + } + } + for _, obj := range initialStacks { + if err := o.Add(&obj); err != nil { + panic(err) + } + } + fakePtr := &k8stesting.Fake{} + fakePtr.AddReactor("*", "*", k8stesting.ObjectReaction(o)) + if podWatchHandler != nil { + fakePtr.AddWatchReactor(podsResource.Resource, podWatchHandler) + } + if stackWatchHandler != nil { + fakePtr.AddWatchReactor(stacksResource.Resource, stackWatchHandler) + } + fakePtr.AddWatchReactor("*", k8stesting.DefaultWatchReactor(watch.NewFake(), nil)) + return &testPodAndStackRepository{fake: fakePtr} +} + +type testStackListWatch struct { + fake *k8stesting.Fake + ns string +} + +func (s *testStackListWatch) List(opts metav1.ListOptions) (*apiv1beta1.StackList, error) { + obj, err := s.fake.Invokes(k8stesting.NewListAction(stacksResource, stackKind, s.ns, opts), &apiv1beta1.StackList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := k8stesting.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apiv1beta1.StackList{} + for _, item := range obj.(*apiv1beta1.StackList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} +func (s *testStackListWatch) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return s.fake.InvokesWatch(k8stesting.NewWatchAction(stacksResource, s.ns, opts)) +} + +type testPodListWatch struct { + fake *k8stesting.Fake + ns string +} + +func (p *testPodListWatch) List(opts metav1.ListOptions) (*apiv1.PodList, error) { + obj, err := p.fake.Invokes(k8stesting.NewListAction(podsResource, podKind, p.ns, opts), &apiv1.PodList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := k8stesting.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &apiv1.PodList{} + for _, item := range obj.(*apiv1.PodList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err + +} +func (p *testPodListWatch) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return p.fake.InvokesWatch(k8stesting.NewWatchAction(podsResource, p.ns, opts)) +} + +func TestDeployWatchOk(t *testing.T) { + stack := apiv1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{Name: "test-stack", Namespace: "test-ns"}, + } + + serviceNames := []string{"svc1", "svc2"} + testRepo := newTestPodAndStackRepository(nil, []apiv1beta1.Stack{stack}, func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { + res := watch.NewFake() + go func() { + pod1 := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + Namespace: "test-ns", + Labels: composelabels.ForService("test-stack", "svc1"), + }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodRunning, + Conditions: []apiv1.PodCondition{ + { + Type: apiv1.PodReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + } + pod2 := &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + Namespace: "test-ns", + Labels: composelabels.ForService("test-stack", "svc2"), + }, + Status: apiv1.PodStatus{ + Phase: apiv1.PodRunning, + Conditions: []apiv1.PodCondition{ + { + Type: apiv1.PodReady, + Status: apiv1.ConditionTrue, + }, + }, + }, + } + res.Add(pod1) + res.Add(pod2) + }() + + return true, res, nil + }, nil) + + testee := &deployWatcher{ + stacks: testRepo.stackListWatchForNamespace("test-ns"), + pods: testRepo.podListWatchForNamespace("test-ns"), + } + + statusUpdates := make(chan serviceStatus) + go func() { + for range statusUpdates { + } + }() + defer close(statusUpdates) + err := testee.Watch(stack.Name, serviceNames, statusUpdates) + assert.NilError(t, err) +} + +func TestDeployReconcileFailure(t *testing.T) { + stack := apiv1beta1.Stack{ + ObjectMeta: metav1.ObjectMeta{Name: "test-stack", Namespace: "test-ns"}, + } + + serviceNames := []string{"svc1", "svc2"} + testRepo := newTestPodAndStackRepository(nil, []apiv1beta1.Stack{stack}, nil, func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { + res := watch.NewFake() + go func() { + sfailed := stack + sfailed.Status = apiv1beta1.StackStatus{ + Phase: apiv1beta1.StackFailure, + Message: "test error", + } + res.Modify(&sfailed) + }() + + return true, res, nil + }) + + testee := &deployWatcher{ + stacks: testRepo.stackListWatchForNamespace("test-ns"), + pods: testRepo.podListWatchForNamespace("test-ns"), + } + + statusUpdates := make(chan serviceStatus) + go func() { + for range statusUpdates { + } + }() + defer close(statusUpdates) + err := testee.Watch(stack.Name, serviceNames, statusUpdates) + assert.ErrorContains(t, err, "Failure: test error") +} diff --git a/cli/cli/command/stack/list.go b/cli/cli/command/stack/list.go new file mode 100644 index 00000000..697cbcce --- /dev/null +++ b/cli/cli/command/stack/list.go @@ -0,0 +1,80 @@ +package stack + +import ( + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.List{} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List stacks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return RunList(cmd, dockerCli, opts, common.orchestrator) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Format, "format", "", "Pretty-print stacks using a Go template") + flags.StringSliceVar(&opts.Namespaces, "namespace", []string{}, "Kubernetes namespaces to use") + flags.SetAnnotation("namespace", "kubernetes", nil) + flags.BoolVarP(&opts.AllNamespaces, "all-namespaces", "", false, "List stacks from all Kubernetes namespaces") + flags.SetAnnotation("all-namespaces", "kubernetes", nil) + return cmd +} + +// RunList performs a stack list against the specified orchestrator +func RunList(cmd *cobra.Command, dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator) error { + stacks := []*formatter.Stack{} + if orchestrator.HasSwarm() { + ss, err := swarm.GetStacks(dockerCli) + if err != nil { + return err + } + stacks = append(stacks, ss...) + } + if orchestrator.HasKubernetes() { + kubeCli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), orchestrator)) + if err != nil { + return err + } + ss, err := kubernetes.GetStacks(kubeCli, opts) + if err != nil { + return err + } + stacks = append(stacks, ss...) + } + return format(dockerCli, opts, orchestrator, stacks) +} + +func format(dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator, stacks []*formatter.Stack) error { + format := opts.Format + if format == "" || format == formatter.TableFormatKey { + format = formatter.SwarmStackTableFormat + if orchestrator.HasKubernetes() { + format = formatter.KubernetesStackTableFormat + } + } + stackCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.Format(format), + } + sort.Slice(stacks, func(i, j int) bool { + return sortorder.NaturalLess(stacks[i].Name, stacks[j].Name) || + !sortorder.NaturalLess(stacks[j].Name, stacks[i].Name) && + sortorder.NaturalLess(stacks[j].Namespace, stacks[i].Namespace) + }) + return formatter.StackWrite(stackCtx, stacks) +} diff --git a/cli/cli/command/stack/list_test.go b/cli/cli/command/stack/list_test.go new file mode 100644 index 00000000..5fdc04be --- /dev/null +++ b/cli/cli/command/stack/list_test.go @@ -0,0 +1,130 @@ +package stack + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" +) + +var ( + orchestrator = commonOptions{orchestrator: command.OrchestratorSwarm} +) + +func TestListErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{}, errors.Errorf("error getting services") + }, + expectedError: "error getting services", + }, + { + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + expectedError: "cannot get label", + }, + } + + for _, tc := range testCases { + cmd := newListCommand(test.NewFakeCli(&fakeClient{ + serviceListFunc: tc.serviceListFunc, + }), &orchestrator) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestStackList(t *testing.T) { + testCases := []struct { + doc string + serviceNames []string + flags map[string]string + golden string + }{ + { + doc: "WithFormat", + serviceNames: []string{"service-name-foo"}, + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "stack-list-with-format.golden", + }, + { + doc: "WithoutFormat", + serviceNames: []string{"service-name-foo"}, + golden: "stack-list-without-format.golden", + }, + { + doc: "Sort", + serviceNames: []string{ + "service-name-foo", + "service-name-bar", + }, + golden: "stack-list-sort.golden", + }, + { + doc: "SortNatural", + serviceNames: []string{ + "service-name-1-foo", + "service-name-10-foo", + "service-name-2-foo", + }, + golden: "stack-list-sort-natural.golden", + }, + } + + for _, tc := range testCases { + t.Run(tc.doc, func(t *testing.T) { + var services []swarm.Service + for _, name := range tc.serviceNames { + services = append(services, + *Service( + ServiceLabels(map[string]string{ + "com.docker.stack.namespace": name, + }), + ), + ) + } + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return services, nil + }, + }) + cmd := newListCommand(cli, &orchestrator) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + }) + } +} diff --git a/cli/cli/command/stack/loader/loader.go b/cli/cli/command/stack/loader/loader.go new file mode 100644 index 00000000..b4790945 --- /dev/null +++ b/cli/cli/command/stack/loader/loader.go @@ -0,0 +1,152 @@ +package loader + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/cli/compose/schema" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/pkg/errors" +) + +// LoadComposefile parse the composefile specified in the cli and returns its Config and version. +func LoadComposefile(dockerCli command.Cli, opts options.Deploy) (*composetypes.Config, error) { + configDetails, err := getConfigDetails(opts.Composefiles, dockerCli.In()) + if err != nil { + return nil, err + } + + dicts := getDictsFrom(configDetails.ConfigFiles) + config, err := loader.Load(configDetails) + if err != nil { + if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { + return nil, errors.Errorf("Compose file contains unsupported options:\n\n%s\n", + propertyWarnings(fpe.Properties)) + } + + return nil, err + } + + unsupportedProperties := loader.GetUnsupportedProperties(dicts...) + if len(unsupportedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", + strings.Join(unsupportedProperties, ", ")) + } + + deprecatedProperties := loader.GetDeprecatedProperties(dicts...) + if len(deprecatedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", + propertyWarnings(deprecatedProperties)) + } + return config, nil +} + +func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]interface{} { + dicts := []map[string]interface{}{} + + for _, configFile := range configFiles { + dicts = append(dicts, configFile.Config) + } + + return dicts +} + +func propertyWarnings(properties map[string]string) string { + var msgs []string + for name, description := range properties { + msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) + } + sort.Strings(msgs) + return strings.Join(msgs, "\n\n") +} + +func getConfigDetails(composefiles []string, stdin io.Reader) (composetypes.ConfigDetails, error) { + var details composetypes.ConfigDetails + + if len(composefiles) == 0 { + return details, errors.New("no composefile(s)") + } + + if composefiles[0] == "-" && len(composefiles) == 1 { + workingDir, err := os.Getwd() + if err != nil { + return details, err + } + details.WorkingDir = workingDir + } else { + absPath, err := filepath.Abs(composefiles[0]) + if err != nil { + return details, err + } + details.WorkingDir = filepath.Dir(absPath) + } + + var err error + details.ConfigFiles, err = loadConfigFiles(composefiles, stdin) + if err != nil { + return details, err + } + // Take the first file version (2 files can't have different version) + details.Version = schema.Version(details.ConfigFiles[0].Config) + details.Environment, err = buildEnvironment(os.Environ()) + return details, err +} + +func buildEnvironment(env []string) (map[string]string, error) { + result := make(map[string]string, len(env)) + for _, s := range env { + // if value is empty, s is like "K=", not "K". + if !strings.Contains(s, "=") { + return result, errors.Errorf("unexpected environment %q", s) + } + kv := strings.SplitN(s, "=", 2) + result[kv[0]] = kv[1] + } + return result, nil +} + +func loadConfigFiles(filenames []string, stdin io.Reader) ([]composetypes.ConfigFile, error) { + var configFiles []composetypes.ConfigFile + + for _, filename := range filenames { + configFile, err := loadConfigFile(filename, stdin) + if err != nil { + return configFiles, err + } + configFiles = append(configFiles, *configFile) + } + + return configFiles, nil +} + +func loadConfigFile(filename string, stdin io.Reader) (*composetypes.ConfigFile, error) { + var bytes []byte + var err error + + if filename == "-" { + bytes, err = ioutil.ReadAll(stdin) + } else { + bytes, err = ioutil.ReadFile(filename) + } + if err != nil { + return nil, err + } + + config, err := loader.ParseYAML(bytes) + if err != nil { + return nil, err + } + + return &composetypes.ConfigFile{ + Filename: filename, + Config: config, + }, nil +} diff --git a/cli/cli/command/stack/loader/loader_test.go b/cli/cli/command/stack/loader/loader_test.go new file mode 100644 index 00000000..de524cc5 --- /dev/null +++ b/cli/cli/command/stack/loader/loader_test.go @@ -0,0 +1,47 @@ +package loader + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestGetConfigDetails(t *testing.T) { + content := ` +version: "3.0" +services: + foo: + image: alpine:3.5 +` + file := fs.NewFile(t, "test-get-config-details", fs.WithContent(content)) + defer file.Remove() + + details, err := getConfigDetails([]string{file.Path()}, nil) + assert.NilError(t, err) + assert.Check(t, is.Equal(filepath.Dir(file.Path()), details.WorkingDir)) + assert.Assert(t, is.Len(details.ConfigFiles, 1)) + assert.Check(t, is.Equal("3.0", details.ConfigFiles[0].Config["version"])) + assert.Check(t, is.Len(details.Environment, len(os.Environ()))) +} + +func TestGetConfigDetailsStdin(t *testing.T) { + content := ` +version: "3.0" +services: + foo: + image: alpine:3.5 +` + details, err := getConfigDetails([]string{"-"}, strings.NewReader(content)) + assert.NilError(t, err) + cwd, err := os.Getwd() + assert.NilError(t, err) + assert.Check(t, is.Equal(cwd, details.WorkingDir)) + assert.Assert(t, is.Len(details.ConfigFiles, 1)) + assert.Check(t, is.Equal("3.0", details.ConfigFiles[0].Config["version"])) + assert.Check(t, is.Len(details.Environment, len(os.Environ()))) +} diff --git a/cli/cli/command/stack/options/opts.go b/cli/cli/command/stack/options/opts.go new file mode 100644 index 00000000..afcecd99 --- /dev/null +++ b/cli/cli/command/stack/options/opts.go @@ -0,0 +1,43 @@ +package options + +import "github.com/docker/cli/opts" + +// Deploy holds docker stack deploy options +type Deploy struct { + Bundlefile string + Composefiles []string + Namespace string + ResolveImage string + SendRegistryAuth bool + Prune bool +} + +// List holds docker stack ls options +type List struct { + Format string + AllNamespaces bool + Namespaces []string +} + +// PS holds docker stack ps options +type PS struct { + Filter opts.FilterOpt + NoTrunc bool + Namespace string + NoResolve bool + Quiet bool + Format string +} + +// Remove holds docker stack remove options +type Remove struct { + Namespaces []string +} + +// Services holds docker stack services options +type Services struct { + Quiet bool + Format string + Filter opts.FilterOpt + Namespace string +} diff --git a/cli/cli/command/stack/ps.go b/cli/cli/command/stack/ps.go new file mode 100644 index 00000000..ae692d4e --- /dev/null +++ b/cli/cli/command/stack/ps.go @@ -0,0 +1,44 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + cliopts "github.com/docker/cli/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newPsCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.PS{Filter: cliopts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] STACK", + Short: "List the tasks in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + return RunPs(dockerCli, cmd.Flags(), common.Orchestrator(), opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.NoTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.NoResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") + flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display task IDs") + flags.StringVar(&opts.Format, "format", "", "Pretty-print tasks using a Go template") + kubernetes.AddNamespaceFlag(flags) + return cmd +} + +// RunPs performs a stack ps against the specified orchestrator +func RunPs(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.PS) error { + return runOrchestratedCommand(dockerCli, flags, commonOrchestrator, + func() error { return swarm.RunPS(dockerCli, opts) }, + func(kli *kubernetes.KubeCli) error { return kubernetes.RunPS(kli, opts) }) +} diff --git a/cli/cli/command/stack/ps_test.go b/cli/cli/command/stack/ps_test.go new file mode 100644 index 00000000..a3e68656 --- /dev/null +++ b/cli/cli/command/stack/ps_test.go @@ -0,0 +1,184 @@ +package stack + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestStackPsErrors(t *testing.T) { + testCases := []struct { + args []string + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + expectedError string + }{ + + { + args: []string{}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"foo", "bar"}, + expectedError: "requires exactly 1 argument", + }, + { + args: []string{"foo"}, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return nil, errors.Errorf("error getting tasks") + }, + expectedError: "error getting tasks", + }, + } + + for _, tc := range testCases { + cmd := newPsCommand(test.NewFakeCli(&fakeClient{ + taskListFunc: tc.taskListFunc, + }), &orchestrator) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestStackPs(t *testing.T) { + testCases := []struct { + doc string + taskListFunc func(types.TaskListOptions) ([]swarm.Task, error) + nodeInspectWithRaw func(string) (swarm.Node, []byte, error) + config configfile.ConfigFile + args []string + flags map[string]string + expectedErr string + golden string + }{ + { + doc: "WithEmptyName", + args: []string{"' '"}, + expectedErr: `invalid stack name: "' '"`, + }, + { + doc: "WithEmptyStack", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{}, nil + }, + args: []string{"foo"}, + expectedErr: "nothing found in stack: foo", + }, + { + doc: "WithQuietOption", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskID("id-foo"))}, nil + }, + args: []string{"foo"}, + flags: map[string]string{ + "quiet": "true", + }, + golden: "stack-ps-with-quiet-option.golden", + }, + { + doc: "WithNoTruncOption", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskID("xn4cypcov06f2w8gsbaf2lst3"))}, nil + }, + args: []string{"foo"}, + flags: map[string]string{ + "no-trunc": "true", + "format": "{{ .ID }}", + }, + golden: "stack-ps-with-no-trunc-option.golden", + }, + { + doc: "WithNoResolveOption", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task( + TaskNodeID("id-node-foo"), + )}, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + args: []string{"foo"}, + flags: map[string]string{ + "no-resolve": "true", + "format": "{{ .Node }}", + }, + golden: "stack-ps-with-no-resolve-option.golden", + }, + { + doc: "WithFormat", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil + }, + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{ .Name }}", + }, + golden: "stack-ps-with-format.golden", + }, + { + doc: "WithConfigFormat", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil + }, + config: configfile.ConfigFile{ + TasksFormat: "{{ .Name }}", + }, + args: []string{"foo"}, + golden: "stack-ps-with-config-format.golden", + }, + { + doc: "WithoutFormat", + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return []swarm.Task{*Task( + TaskID("id-foo"), + TaskServiceID("service-id-foo"), + TaskNodeID("id-node"), + WithTaskSpec(TaskImage("myimage:mytag")), + TaskDesiredState(swarm.TaskStateReady), + WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))), + )}, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + args: []string{"foo"}, + golden: "stack-ps-without-format.golden", + }, + } + + for _, tc := range testCases { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + taskListFunc: tc.taskListFunc, + nodeInspectWithRaw: tc.nodeInspectWithRaw, + }) + cli.SetConfigFile(&tc.config) + + cmd := newPsCommand(cli, &orchestrator) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + + if tc.expectedErr != "" { + assert.Error(t, cmd.Execute(), tc.expectedErr) + assert.Check(t, is.Equal("", cli.OutBuffer().String())) + return + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + }) + } +} diff --git a/cli/cli/command/stack/remove.go b/cli/cli/command/stack/remove.go new file mode 100644 index 00000000..a8dceff2 --- /dev/null +++ b/cli/cli/command/stack/remove.go @@ -0,0 +1,39 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newRemoveCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + var opts options.Remove + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] STACK [STACK...]", + Aliases: []string{"remove", "down"}, + Short: "Remove one or more stacks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespaces = args + if err := validateStackNames(opts.Namespaces); err != nil { + return err + } + return RunRemove(dockerCli, cmd.Flags(), common.Orchestrator(), opts) + }, + } + flags := cmd.Flags() + kubernetes.AddNamespaceFlag(flags) + return cmd +} + +// RunRemove performs a stack remove against the specified orchestrator +func RunRemove(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.Remove) error { + return runOrchestratedCommand(dockerCli, flags, commonOrchestrator, + func() error { return swarm.RunRemove(dockerCli, opts) }, + func(kli *kubernetes.KubeCli) error { return kubernetes.RunRemove(kli, opts) }) +} diff --git a/cli/cli/command/stack/remove_test.go b/cli/cli/command/stack/remove_test.go new file mode 100644 index 00000000..c7032d84 --- /dev/null +++ b/cli/cli/command/stack/remove_test.go @@ -0,0 +1,166 @@ +package stack + +import ( + "errors" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func fakeClientForRemoveStackTest(version string) *fakeClient { + allServices := []string{ + objectName("foo", "service1"), + objectName("foo", "service2"), + objectName("bar", "service1"), + objectName("bar", "service2"), + } + allNetworks := []string{ + objectName("foo", "network1"), + objectName("bar", "network1"), + } + allSecrets := []string{ + objectName("foo", "secret1"), + objectName("foo", "secret2"), + objectName("bar", "secret1"), + } + allConfigs := []string{ + objectName("foo", "config1"), + objectName("foo", "config2"), + objectName("bar", "config1"), + } + return &fakeClient{ + version: version, + services: allServices, + networks: allNetworks, + secrets: allSecrets, + configs: allConfigs, + } +} + +func TestRemoveWithEmptyName(t *testing.T) { + cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{}), &orchestrator) + cmd.SetArgs([]string{"good", "' '", "alsogood"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} + +func TestRemoveStackVersion124DoesNotRemoveConfigsOrSecrets(t *testing.T) { + client := fakeClientForRemoveStackTest("1.24") + cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.services), client.removedServices)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.networks), client.removedNetworks)) + assert.Check(t, is.Len(client.removedSecrets, 0)) + assert.Check(t, is.Len(client.removedConfigs, 0)) +} + +func TestRemoveStackVersion125DoesNotRemoveConfigs(t *testing.T) { + client := fakeClientForRemoveStackTest("1.25") + cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.services), client.removedServices)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.networks), client.removedNetworks)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.secrets), client.removedSecrets)) + assert.Check(t, is.Len(client.removedConfigs, 0)) +} + +func TestRemoveStackVersion130RemovesEverything(t *testing.T) { + client := fakeClientForRemoveStackTest("1.30") + cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.services), client.removedServices)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.networks), client.removedNetworks)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.secrets), client.removedSecrets)) + assert.Check(t, is.DeepEqual(buildObjectIDs(client.configs), client.removedConfigs)) +} + +func TestRemoveStackSkipEmpty(t *testing.T) { + allServices := []string{objectName("bar", "service1"), objectName("bar", "service2")} + allServiceIDs := buildObjectIDs(allServices) + + allNetworks := []string{objectName("bar", "network1")} + allNetworkIDs := buildObjectIDs(allNetworks) + + allSecrets := []string{objectName("bar", "secret1")} + allSecretIDs := buildObjectIDs(allSecrets) + + allConfigs := []string{objectName("bar", "config1")} + allConfigIDs := buildObjectIDs(allConfigs) + + fakeClient := &fakeClient{ + version: "1.30", + services: allServices, + networks: allNetworks, + secrets: allSecrets, + configs: allConfigs, + } + fakeCli := test.NewFakeCli(fakeClient) + cmd := newRemoveCommand(fakeCli, &orchestrator) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.NilError(t, cmd.Execute()) + expectedList := []string{"Removing service bar_service1", + "Removing service bar_service2", + "Removing secret bar_secret1", + "Removing config bar_config1", + "Removing network bar_network1\n", + } + assert.Check(t, is.Equal(strings.Join(expectedList, "\n"), fakeCli.OutBuffer().String())) + assert.Check(t, is.Contains(fakeCli.ErrBuffer().String(), "Nothing found in stack: foo\n")) + assert.Check(t, is.DeepEqual(allServiceIDs, fakeClient.removedServices)) + assert.Check(t, is.DeepEqual(allNetworkIDs, fakeClient.removedNetworks)) + assert.Check(t, is.DeepEqual(allSecretIDs, fakeClient.removedSecrets)) + assert.Check(t, is.DeepEqual(allConfigIDs, fakeClient.removedConfigs)) +} + +func TestRemoveContinueAfterError(t *testing.T) { + allServices := []string{objectName("foo", "service1"), objectName("bar", "service1")} + allServiceIDs := buildObjectIDs(allServices) + + allNetworks := []string{objectName("foo", "network1"), objectName("bar", "network1")} + allNetworkIDs := buildObjectIDs(allNetworks) + + allSecrets := []string{objectName("foo", "secret1"), objectName("bar", "secret1")} + allSecretIDs := buildObjectIDs(allSecrets) + + allConfigs := []string{objectName("foo", "config1"), objectName("bar", "config1")} + allConfigIDs := buildObjectIDs(allConfigs) + + removedServices := []string{} + cli := &fakeClient{ + version: "1.30", + services: allServices, + networks: allNetworks, + secrets: allSecrets, + configs: allConfigs, + + serviceRemoveFunc: func(serviceID string) error { + removedServices = append(removedServices, serviceID) + + if strings.Contains(serviceID, "foo") { + return errors.New("") + } + return nil + }, + } + cmd := newRemoveCommand(test.NewFakeCli(cli), &orchestrator) + cmd.SetOutput(ioutil.Discard) + cmd.SetArgs([]string{"foo", "bar"}) + + assert.Error(t, cmd.Execute(), "Failed to remove some resources from stack: foo") + assert.Check(t, is.DeepEqual(allServiceIDs, removedServices)) + assert.Check(t, is.DeepEqual(allNetworkIDs, cli.removedNetworks)) + assert.Check(t, is.DeepEqual(allSecretIDs, cli.removedSecrets)) + assert.Check(t, is.DeepEqual(allConfigIDs, cli.removedConfigs)) +} diff --git a/cli/cli/command/stack/services.go b/cli/cli/command/stack/services.go new file mode 100644 index 00000000..d875fa73 --- /dev/null +++ b/cli/cli/command/stack/services.go @@ -0,0 +1,42 @@ +package stack + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/kubernetes" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/stack/swarm" + cliopts "github.com/docker/cli/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newServicesCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command { + opts := options.Services{Filter: cliopts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "services [OPTIONS] STACK", + Short: "List the services in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.Namespace = args[0] + if err := validateStackName(opts.Namespace); err != nil { + return err + } + return RunServices(dockerCli, cmd.Flags(), common.Orchestrator(), opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs") + flags.StringVar(&opts.Format, "format", "", "Pretty-print services using a Go template") + flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") + kubernetes.AddNamespaceFlag(flags) + return cmd +} + +// RunServices performs a stack services against the specified orchestrator +func RunServices(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.Services) error { + return runOrchestratedCommand(dockerCli, flags, commonOrchestrator, + func() error { return swarm.RunServices(dockerCli, opts) }, + func(kli *kubernetes.KubeCli) error { return kubernetes.RunServices(kli, opts) }) +} diff --git a/cli/cli/command/stack/services_test.go b/cli/cli/command/stack/services_test.go new file mode 100644 index 00000000..64a58b99 --- /dev/null +++ b/cli/cli/command/stack/services_test.go @@ -0,0 +1,170 @@ +package stack + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestStackServicesErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + nodeListFunc func(options types.NodeListOptions) ([]swarm.Node, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + expectedError string + }{ + { + args: []string{"foo"}, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return nil, errors.Errorf("error getting services") + }, + expectedError: "error getting services", + }, + { + args: []string{"foo"}, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + nodeListFunc: func(options types.NodeListOptions) ([]swarm.Node, error) { + return nil, errors.Errorf("error getting nodes") + }, + expectedError: "error getting nodes", + }, + { + args: []string{"foo"}, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) { + return nil, errors.Errorf("error getting tasks") + }, + expectedError: "error getting tasks", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service()}, nil + }, + expectedError: "Template parsing error", + }, + } + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: tc.serviceListFunc, + nodeListFunc: tc.nodeListFunc, + taskListFunc: tc.taskListFunc, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestRunServicesWithEmptyName(t *testing.T) { + cmd := newServicesCommand(test.NewFakeCli(&fakeClient{}), &orchestrator) + cmd.SetArgs([]string{"' '"}) + cmd.SetOutput(ioutil.Discard) + + assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`) +} + +func TestStackServicesEmptyServiceList(t *testing.T) { + fakeCli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{}, nil + }, + }) + cmd := newServicesCommand(fakeCli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("", fakeCli.OutBuffer().String())) + assert.Check(t, is.Equal("Nothing found in stack: foo\n", fakeCli.ErrBuffer().String())) +} + +func TestStackServicesWithQuietOption(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service(ServiceID("id-foo"))}, nil + }, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.Flags().Set("quiet", "true") + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-quiet-option.golden") +} + +func TestStackServicesWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + *Service(ServiceName("service-name-foo")), + }, nil + }, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + cmd.Flags().Set("format", "{{ .Name }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-format.golden") +} + +func TestStackServicesWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + *Service(ServiceName("service-name-foo")), + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + ServicesFormat: "{{ .Name }}", + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-config-format.golden") +} + +func TestStackServicesWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{*Service( + ServiceName("name-foo"), + ServiceID("id-foo"), + ReplicatedService(2), + ServiceImage("busybox:latest"), + ServicePort(swarm.PortConfig{ + PublishMode: swarm.PortConfigPublishModeIngress, + PublishedPort: 0, + TargetPort: 3232, + Protocol: swarm.PortConfigProtocolTCP, + }), + )}, nil + }, + }) + cmd := newServicesCommand(cli, &orchestrator) + cmd.SetArgs([]string{"foo"}) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "stack-services-without-format.golden") +} diff --git a/cli/cli/command/stack/swarm/client_test.go b/cli/cli/command/stack/swarm/client_test.go new file mode 100644 index 00000000..7f9375e9 --- /dev/null +++ b/cli/cli/command/stack/swarm/client_test.go @@ -0,0 +1,239 @@ +package swarm + +import ( + "context" + "strings" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + + version string + + services []string + networks []string + secrets []string + configs []string + + removedServices []string + removedNetworks []string + removedSecrets []string + removedConfigs []string + + serviceListFunc func(options types.ServiceListOptions) ([]swarm.Service, error) + networkListFunc func(options types.NetworkListOptions) ([]types.NetworkResource, error) + secretListFunc func(options types.SecretListOptions) ([]swarm.Secret, error) + configListFunc func(options types.ConfigListOptions) ([]swarm.Config, error) + nodeListFunc func(options types.NodeListOptions) ([]swarm.Node, error) + taskListFunc func(options types.TaskListOptions) ([]swarm.Task, error) + nodeInspectWithRaw func(ref string) (swarm.Node, []byte, error) + + serviceUpdateFunc func(serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + + serviceRemoveFunc func(serviceID string) error + networkRemoveFunc func(networkID string) error + secretRemoveFunc func(secretID string) error + configRemoveFunc func(configID string) error +} + +func (cli *fakeClient) ServerVersion(ctx context.Context) (types.Version, error) { + return types.Version{ + Version: "docker-dev", + APIVersion: api.DefaultVersion, + }, nil +} + +func (cli *fakeClient) ClientVersion() string { + return cli.version +} + +func (cli *fakeClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + if cli.serviceListFunc != nil { + return cli.serviceListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + servicesList := []swarm.Service{} + for _, name := range cli.services { + if belongToNamespace(name, namespace) { + servicesList = append(servicesList, serviceFromName(name)) + } + } + return servicesList, nil +} + +func (cli *fakeClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + if cli.networkListFunc != nil { + return cli.networkListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + networksList := []types.NetworkResource{} + for _, name := range cli.networks { + if belongToNamespace(name, namespace) { + networksList = append(networksList, networkFromName(name)) + } + } + return networksList, nil +} + +func (cli *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if cli.secretListFunc != nil { + return cli.secretListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + secretsList := []swarm.Secret{} + for _, name := range cli.secrets { + if belongToNamespace(name, namespace) { + secretsList = append(secretsList, secretFromName(name)) + } + } + return secretsList, nil +} + +func (cli *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if cli.configListFunc != nil { + return cli.configListFunc(options) + } + + namespace := namespaceFromFilters(options.Filters) + configsList := []swarm.Config{} + for _, name := range cli.configs { + if belongToNamespace(name, namespace) { + configsList = append(configsList, configFromName(name)) + } + } + return configsList, nil +} + +func (cli *fakeClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + if cli.taskListFunc != nil { + return cli.taskListFunc(options) + } + return []swarm.Task{}, nil +} + +func (cli *fakeClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + if cli.nodeListFunc != nil { + return cli.nodeListFunc(options) + } + return []swarm.Node{}, nil +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectWithRaw != nil { + return cli.nodeInspectWithRaw(ref) + } + return swarm.Node{}, nil, nil +} + +func (cli *fakeClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + if cli.serviceUpdateFunc != nil { + return cli.serviceUpdateFunc(serviceID, version, service, options) + } + + return types.ServiceUpdateResponse{}, nil +} + +func (cli *fakeClient) ServiceRemove(ctx context.Context, serviceID string) error { + if cli.serviceRemoveFunc != nil { + return cli.serviceRemoveFunc(serviceID) + } + + cli.removedServices = append(cli.removedServices, serviceID) + return nil +} + +func (cli *fakeClient) NetworkRemove(ctx context.Context, networkID string) error { + if cli.networkRemoveFunc != nil { + return cli.networkRemoveFunc(networkID) + } + + cli.removedNetworks = append(cli.removedNetworks, networkID) + return nil +} + +func (cli *fakeClient) SecretRemove(ctx context.Context, secretID string) error { + if cli.secretRemoveFunc != nil { + return cli.secretRemoveFunc(secretID) + } + + cli.removedSecrets = append(cli.removedSecrets, secretID) + return nil +} + +func (cli *fakeClient) ConfigRemove(ctx context.Context, configID string) error { + if cli.configRemoveFunc != nil { + return cli.configRemoveFunc(configID) + } + + cli.removedConfigs = append(cli.removedConfigs, configID) + return nil +} + +func serviceFromName(name string) swarm.Service { + return swarm.Service{ + ID: "ID-" + name, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func networkFromName(name string) types.NetworkResource { + return types.NetworkResource{ + ID: "ID-" + name, + Name: name, + } +} + +func secretFromName(name string) swarm.Secret { + return swarm.Secret{ + ID: "ID-" + name, + Spec: swarm.SecretSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func configFromName(name string) swarm.Config { + return swarm.Config{ + ID: "ID-" + name, + Spec: swarm.ConfigSpec{ + Annotations: swarm.Annotations{Name: name}, + }, + } +} + +func namespaceFromFilters(filters filters.Args) string { + label := filters.Get("label")[0] + return strings.TrimPrefix(label, convert.LabelNamespace+"=") +} + +func belongToNamespace(id, namespace string) bool { + return strings.HasPrefix(id, namespace+"_") +} + +func objectName(namespace, name string) string { + return namespace + "_" + name +} + +func objectID(name string) string { + return "ID-" + name +} + +func buildObjectIDs(objectNames []string) []string { + IDs := make([]string, len(objectNames)) + for i, name := range objectNames { + IDs[i] = objectID(name) + } + return IDs +} diff --git a/cli/cli/command/stack/swarm/common.go b/cli/cli/command/stack/swarm/common.go new file mode 100644 index 00000000..b4193df3 --- /dev/null +++ b/cli/cli/command/stack/swarm/common.go @@ -0,0 +1,50 @@ +package swarm + +import ( + "context" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +func getStackFilter(namespace string) filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getStackServiceFilter(namespace string) filters.Args { + return getStackFilter(namespace) +} + +func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { + filter := opt.Value() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getAllStacksFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace) + return filter +} + +func getStackServices(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Service, error) { + return apiclient.ServiceList(ctx, types.ServiceListOptions{Filters: getStackServiceFilter(namespace)}) +} + +func getStackNetworks(ctx context.Context, apiclient client.APIClient, namespace string) ([]types.NetworkResource, error) { + return apiclient.NetworkList(ctx, types.NetworkListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackSecrets(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Secret, error) { + return apiclient.SecretList(ctx, types.SecretListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackConfigs(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Config, error) { + return apiclient.ConfigList(ctx, types.ConfigListOptions{Filters: getStackFilter(namespace)}) +} diff --git a/cli/cli/command/stack/swarm/deploy.go b/cli/cli/command/stack/swarm/deploy.go new file mode 100644 index 00000000..d11c328f --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy.go @@ -0,0 +1,80 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/convert" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// Resolve image constants +const ( + defaultNetworkDriver = "overlay" + ResolveImageAlways = "always" + ResolveImageChanged = "changed" + ResolveImageNever = "never" +) + +// RunDeploy is the swarm implementation of docker stack deploy +func RunDeploy(dockerCli command.Cli, opts options.Deploy, cfg *composetypes.Config) error { + ctx := context.Background() + + if err := validateResolveImageFlag(dockerCli, &opts); err != nil { + return err + } + + return deployCompose(ctx, dockerCli, opts, cfg) +} + +// validateResolveImageFlag validates the opts.resolveImage command line option +// and also turns image resolution off if the version is older than 1.30 +func validateResolveImageFlag(dockerCli command.Cli, opts *options.Deploy) error { + if opts.ResolveImage != ResolveImageAlways && opts.ResolveImage != ResolveImageChanged && opts.ResolveImage != ResolveImageNever { + return errors.Errorf("Invalid option %s for flag --resolve-image", opts.ResolveImage) + } + // client side image resolution should not be done when the supported + // server version is older than 1.30 + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") { + opts.ResolveImage = ResolveImageNever + } + return nil +} + +// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is +// a swarm manager. This is necessary because we must create networks before we +// create services, but the API call for creating a network does not return a +// proper status code when it can't create a network in the "global" scope. +func checkDaemonIsSwarmManager(ctx context.Context, dockerCli command.Cli) error { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if !info.Swarm.ControlAvailable { + return errors.New("this node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again") + } + return nil +} + +// pruneServices removes services that are no longer referenced in the source +func pruneServices(ctx context.Context, dockerCli command.Cli, namespace convert.Namespace, services map[string]struct{}) { + client := dockerCli.Client() + + oldServices, err := getStackServices(ctx, client, namespace.Name()) + if err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to list services: %s\n", err) + } + + pruneServices := []swarm.Service{} + for _, service := range oldServices { + if _, exists := services[namespace.Descope(service.Spec.Name)]; !exists { + pruneServices = append(pruneServices, service) + } + } + removeServices(ctx, dockerCli, pruneServices) +} diff --git a/cli/cli/command/stack/swarm/deploy_bundlefile.go b/cli/cli/command/stack/swarm/deploy_bundlefile.go new file mode 100644 index 00000000..8db6f66b --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_bundlefile.go @@ -0,0 +1,124 @@ +package swarm + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/bundlefile" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" +) + +// DeployBundle deploy a bundlefile (dab) on a swarm. +func DeployBundle(ctx context.Context, dockerCli command.Cli, opts options.Deploy) error { + bundle, err := loadBundlefile(dockerCli.Err(), opts.Namespace, opts.Bundlefile) + if err != nil { + return err + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.Namespace) + + if opts.Prune { + services := map[string]struct{}{} + for service := range bundle.Services { + services[service] = struct{}{} + } + pruneServices(ctx, dockerCli, namespace, services) + } + + networks := make(map[string]types.NetworkCreate) + for _, service := range bundle.Services { + for _, networkName := range service.Networks { + networks[namespace.Scope(networkName)] = types.NetworkCreate{ + Labels: convert.AddStackLabel(namespace, nil), + } + } + } + + services := make(map[string]swarm.ServiceSpec) + for internalName, service := range bundle.Services { + name := namespace.Scope(internalName) + + var ports []swarm.PortConfig + for _, portSpec := range service.Ports { + ports = append(ports, swarm.PortConfig{ + Protocol: swarm.PortConfigProtocol(portSpec.Protocol), + TargetPort: portSpec.Port, + }) + } + + nets := []swarm.NetworkAttachmentConfig{} + for _, networkName := range service.Networks { + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: namespace.Scope(networkName), + Aliases: []string{internalName}, + }) + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: convert.AddStackLabel(namespace, service.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: service.Image, + Command: service.Command, + Args: service.Args, + Env: service.Env, + // Service Labels will not be copied to Containers + // automatically during the deployment so we apply + // it here. + Labels: convert.AddStackLabel(namespace, nil), + }, + }, + EndpointSpec: &swarm.EndpointSpec{ + Ports: ports, + }, + Networks: nets, + } + + services[internalName] = serviceSpec + } + + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.SendRegistryAuth, opts.ResolveImage) +} + +func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { + defaultPath := fmt.Sprintf("%s.dab", namespace) + + if path == "" { + path = defaultPath + } + if _, err := os.Stat(path); err != nil { + return nil, errors.Errorf( + "Bundle %s not found. Specify the path with --file", + path) + } + + fmt.Fprintf(stderr, "Loading bundle from %s\n", path) + reader, err := os.Open(path) + if err != nil { + return nil, err + } + defer reader.Close() + + bundle, err := bundlefile.LoadFile(reader) + if err != nil { + return nil, errors.Errorf("Error reading %s: %v\n", path, err) + } + return bundle, err +} diff --git a/cli/cli/command/stack/swarm/deploy_bundlefile_test.go b/cli/cli/command/stack/swarm/deploy_bundlefile_test.go new file mode 100644 index 00000000..485271cb --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_bundlefile_test.go @@ -0,0 +1,50 @@ +package swarm + +import ( + "bytes" + "path/filepath" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestLoadBundlefileErrors(t *testing.T) { + testCases := []struct { + namespace string + path string + expectedError string + }{ + { + namespace: "namespace_foo", + expectedError: "Bundle namespace_foo.dab not found", + }, + { + namespace: "namespace_foo", + path: "invalid_path", + expectedError: "Bundle invalid_path not found", + }, + // FIXME: this test never working, testdata file is missing from repo + //{ + // namespace: "namespace_foo", + // path: string(golden.Get(t, "bundlefile_with_invalid_syntax")), + // expectedError: "Error reading", + //}, + } + + for _, tc := range testCases { + _, err := loadBundlefile(&bytes.Buffer{}, tc.namespace, tc.path) + assert.ErrorContains(t, err, tc.expectedError) + } +} + +func TestLoadBundlefile(t *testing.T) { + buf := new(bytes.Buffer) + + namespace := "" + path := filepath.Join("testdata", "bundlefile_with_two_services.dab") + bundleFile, err := loadBundlefile(buf, namespace, path) + + assert.NilError(t, err) + assert.Check(t, is.Equal(len(bundleFile.Services), 2)) +} diff --git a/cli/cli/command/stack/swarm/deploy_composefile.go b/cli/cli/command/stack/swarm/deploy_composefile.go new file mode 100644 index 00000000..e4574fd9 --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_composefile.go @@ -0,0 +1,281 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/convert" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + apiclient "github.com/docker/docker/client" + dockerclient "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Deploy, config *composetypes.Config) error { + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.Namespace) + + if opts.Prune { + services := map[string]struct{}{} + for _, service := range config.Services { + services[service.Name] = struct{}{} + } + pruneServices(ctx, dockerCli, namespace, services) + } + + serviceNetworks := getServicesDeclaredNetworks(config.Services) + networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) + if err := validateExternalNetworks(ctx, dockerCli.Client(), externalNetworks); err != nil { + return err + } + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + + secrets, err := convert.Secrets(namespace, config.Secrets) + if err != nil { + return err + } + if err := createSecrets(ctx, dockerCli, secrets); err != nil { + return err + } + + configs, err := convert.Configs(namespace, config.Configs) + if err != nil { + return err + } + if err := createConfigs(ctx, dockerCli, configs); err != nil { + return err + } + + services, err := convert.Services(namespace, config, dockerCli.Client()) + if err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.SendRegistryAuth, opts.ResolveImage) +} + +func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { + serviceNetworks := map[string]struct{}{} + for _, serviceConfig := range serviceConfigs { + if len(serviceConfig.Networks) == 0 { + serviceNetworks["default"] = struct{}{} + continue + } + for network := range serviceConfig.Networks { + serviceNetworks[network] = struct{}{} + } + } + return serviceNetworks +} + +func validateExternalNetworks( + ctx context.Context, + client dockerclient.NetworkAPIClient, + externalNetworks []string, +) error { + for _, networkName := range externalNetworks { + if !container.NetworkMode(networkName).IsUserDefined() { + // Networks that are not user defined always exist on all nodes as + // local-scoped networks, so there's no need to inspect them. + continue + } + network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}) + switch { + case dockerclient.IsErrNotFound(err): + return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed", networkName) + case err != nil: + return err + case network.Scope != "swarm": + return errors.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of \"swarm\"", networkName, network.Scope) + } + } + return nil +} + +func createSecrets( + ctx context.Context, + dockerCli command.Cli, + secrets []swarm.SecretSpec, +) error { + client := dockerCli.Client() + + for _, secretSpec := range secrets { + secret, _, err := client.SecretInspectWithRaw(ctx, secretSpec.Name) + switch { + case err == nil: + // secret already exists, then we update that + if err := client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec); err != nil { + return errors.Wrapf(err, "failed to update secret %s", secretSpec.Name) + } + case apiclient.IsErrNotFound(err): + // secret does not exist, then we create a new one. + fmt.Fprintf(dockerCli.Out(), "Creating secret %s\n", secretSpec.Name) + if _, err := client.SecretCreate(ctx, secretSpec); err != nil { + return errors.Wrapf(err, "failed to create secret %s", secretSpec.Name) + } + default: + return err + } + } + return nil +} + +func createConfigs( + ctx context.Context, + dockerCli command.Cli, + configs []swarm.ConfigSpec, +) error { + client := dockerCli.Client() + + for _, configSpec := range configs { + config, _, err := client.ConfigInspectWithRaw(ctx, configSpec.Name) + switch { + case err == nil: + // config already exists, then we update that + if err := client.ConfigUpdate(ctx, config.ID, config.Meta.Version, configSpec); err != nil { + return errors.Wrapf(err, "failed to update config %s", configSpec.Name) + } + case apiclient.IsErrNotFound(err): + // config does not exist, then we create a new one. + fmt.Fprintf(dockerCli.Out(), "Creating config %s\n", configSpec.Name) + if _, err := client.ConfigCreate(ctx, configSpec); err != nil { + return errors.Wrapf(err, "failed to create config %s", configSpec.Name) + } + default: + return err + } + } + return nil +} + +func createNetworks( + ctx context.Context, + dockerCli command.Cli, + namespace convert.Namespace, + networks map[string]types.NetworkCreate, +) error { + client := dockerCli.Client() + + existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) + if err != nil { + return err + } + + existingNetworkMap := make(map[string]types.NetworkResource) + for _, network := range existingNetworks { + existingNetworkMap[network.Name] = network + } + + for name, createOpts := range networks { + if _, exists := existingNetworkMap[name]; exists { + continue + } + + if createOpts.Driver == "" { + createOpts.Driver = defaultNetworkDriver + } + + fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) + if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { + return errors.Wrapf(err, "failed to create network %s", name) + } + } + return nil +} + +func deployServices( + ctx context.Context, + dockerCli command.Cli, + services map[string]swarm.ServiceSpec, + namespace convert.Namespace, + sendAuth bool, + resolveImage string, +) error { + apiClient := dockerCli.Client() + out := dockerCli.Out() + + existingServices, err := getStackServices(ctx, apiClient, namespace.Name()) + if err != nil { + return err + } + + existingServiceMap := make(map[string]swarm.Service) + for _, service := range existingServices { + existingServiceMap[service.Spec.Name] = service + } + + for internalName, serviceSpec := range services { + name := namespace.Scope(internalName) + + encodedAuth := "" + image := serviceSpec.TaskTemplate.ContainerSpec.Image + if sendAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + } + + if service, exists := existingServiceMap[name]; exists { + fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) + + updateOpts := types.ServiceUpdateOptions{EncodedRegistryAuth: encodedAuth} + + switch { + case resolveImage == ResolveImageAlways || (resolveImage == ResolveImageChanged && image != service.Spec.Labels[convert.LabelImage]): + // image should be updated by the server using QueryRegistry + updateOpts.QueryRegistry = true + case image == service.Spec.Labels[convert.LabelImage]: + // image has not changed; update the serviceSpec with the + // existing information that was set by QueryRegistry on the + // previous deploy. Otherwise this will trigger an incorrect + // service update. + serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image + } + + // Stack deploy does not have a `--force` option. Preserve existing ForceUpdate + // value so that tasks are not re-deployed if not updated. + // TODO move this to API client? + serviceSpec.TaskTemplate.ForceUpdate = service.Spec.TaskTemplate.ForceUpdate + + response, err := apiClient.ServiceUpdate( + ctx, + service.ID, + service.Version, + serviceSpec, + updateOpts, + ) + if err != nil { + return errors.Wrapf(err, "failed to update service %s", name) + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + } else { + fmt.Fprintf(out, "Creating service %s\n", name) + + createOpts := types.ServiceCreateOptions{EncodedRegistryAuth: encodedAuth} + + // query registry if flag disabling it was not set + if resolveImage == ResolveImageAlways || resolveImage == ResolveImageChanged { + createOpts.QueryRegistry = true + } + + if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { + return errors.Wrapf(err, "failed to create service %s", name) + } + } + } + return nil +} diff --git a/cli/cli/command/stack/swarm/deploy_composefile_test.go b/cli/cli/command/stack/swarm/deploy_composefile_test.go new file mode 100644 index 00000000..065a4f29 --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_composefile_test.go @@ -0,0 +1,67 @@ +package swarm + +import ( + "context" + "testing" + + "github.com/docker/cli/internal/test/network" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +type notFound struct { + error +} + +func (n notFound) NotFound() bool { + return true +} + +func TestValidateExternalNetworks(t *testing.T) { + var testcases = []struct { + inspectResponse types.NetworkResource + inspectError error + expectedMsg string + network string + }{ + { + inspectError: notFound{}, + expectedMsg: "could not be found. You need to create a swarm-scoped network", + }, + { + inspectError: errors.New("Unexpected"), + expectedMsg: "Unexpected", + }, + // FIXME(vdemeester) that doesn't work under windows, the check needs to be smarter + /* + { + inspectError: errors.New("host net does not exist on swarm classic"), + network: "host", + }, + */ + { + network: "user", + expectedMsg: "is not in the right scope", + }, + { + network: "user", + inspectResponse: types.NetworkResource{Scope: "swarm"}, + }, + } + + for _, testcase := range testcases { + fakeClient := &network.FakeClient{ + NetworkInspectFunc: func(_ context.Context, _ string, _ types.NetworkInspectOptions) (types.NetworkResource, error) { + return testcase.inspectResponse, testcase.inspectError + }, + } + networks := []string{testcase.network} + err := validateExternalNetworks(context.Background(), fakeClient, networks) + if testcase.expectedMsg == "" { + assert.NilError(t, err) + } else { + assert.ErrorContains(t, err, testcase.expectedMsg) + } + } +} diff --git a/cli/cli/command/stack/swarm/deploy_test.go b/cli/cli/command/stack/swarm/deploy_test.go new file mode 100644 index 00000000..970d790b --- /dev/null +++ b/cli/cli/command/stack/swarm/deploy_test.go @@ -0,0 +1,110 @@ +package swarm + +import ( + "context" + "testing" + + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPruneServices(t *testing.T) { + ctx := context.Background() + namespace := convert.NewNamespace("foo") + services := map[string]struct{}{ + "new": {}, + "keep": {}, + } + client := &fakeClient{services: []string{objectName("foo", "keep"), objectName("foo", "remove")}} + dockerCli := test.NewFakeCli(client) + + pruneServices(ctx, dockerCli, namespace, services) + assert.Check(t, is.DeepEqual(buildObjectIDs([]string{objectName("foo", "remove")}), client.removedServices)) +} + +// TestServiceUpdateResolveImageChanged tests that the service's +// image digest, and "ForceUpdate" is preserved if the image did not change in +// the compose file +func TestServiceUpdateResolveImageChanged(t *testing.T) { + namespace := convert.NewNamespace("mystack") + + var ( + receivedOptions types.ServiceUpdateOptions + receivedService swarm.ServiceSpec + ) + + client := test.NewFakeCli(&fakeClient{ + serviceListFunc: func(options types.ServiceListOptions) ([]swarm.Service, error) { + return []swarm.Service{ + { + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: namespace.Name() + "_myservice", + Labels: map[string]string{"com.docker.stack.image": "foobar:1.2.3"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "foobar:1.2.3@sha256:deadbeef", + }, + ForceUpdate: 123, + }, + }, + }, + }, nil + }, + serviceUpdateFunc: func(serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + receivedOptions = options + receivedService = service + return types.ServiceUpdateResponse{}, nil + }, + }) + + var testcases = []struct { + image string + expectedQueryRegistry bool + expectedImage string + expectedForceUpdate uint64 + }{ + // Image not changed + { + image: "foobar:1.2.3", + expectedQueryRegistry: false, + expectedImage: "foobar:1.2.3@sha256:deadbeef", + expectedForceUpdate: 123, + }, + // Image changed + { + image: "foobar:1.2.4", + expectedQueryRegistry: true, + expectedImage: "foobar:1.2.4", + expectedForceUpdate: 123, + }, + } + + ctx := context.Background() + + for _, testcase := range testcases { + t.Logf("Testing image %q", testcase.image) + spec := map[string]swarm.ServiceSpec{ + "myservice": { + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: testcase.image, + }, + }, + }, + } + err := deployServices(ctx, client, spec, namespace, false, ResolveImageChanged) + assert.NilError(t, err) + assert.Check(t, is.Equal(receivedOptions.QueryRegistry, testcase.expectedQueryRegistry)) + assert.Check(t, is.Equal(receivedService.TaskTemplate.ContainerSpec.Image, testcase.expectedImage)) + assert.Check(t, is.Equal(receivedService.TaskTemplate.ForceUpdate, testcase.expectedForceUpdate)) + + receivedService = swarm.ServiceSpec{} + receivedOptions = types.ServiceUpdateOptions{} + } +} diff --git a/cli/cli/command/stack/swarm/list.go b/cli/cli/command/stack/swarm/list.go new file mode 100644 index 00000000..817275e8 --- /dev/null +++ b/cli/cli/command/stack/swarm/list.go @@ -0,0 +1,45 @@ +package swarm + +import ( + "context" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/compose/convert" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// GetStacks lists the swarm stacks. +func GetStacks(dockerCli command.Cli) ([]*formatter.Stack, error) { + services, err := dockerCli.Client().ServiceList( + context.Background(), + types.ServiceListOptions{Filters: getAllStacksFilter()}) + if err != nil { + return nil, err + } + m := make(map[string]*formatter.Stack) + for _, service := range services { + labels := service.Spec.Labels + name, ok := labels[convert.LabelNamespace] + if !ok { + return nil, errors.Errorf("cannot get label %s for service %s", + convert.LabelNamespace, service.ID) + } + ztack, ok := m[name] + if !ok { + m[name] = &formatter.Stack{ + Name: name, + Services: 1, + Orchestrator: "Swarm", + } + } else { + ztack.Services++ + } + } + var stacks []*formatter.Stack + for _, stack := range m { + stacks = append(stacks, stack) + } + return stacks, nil +} diff --git a/cli/cli/command/stack/swarm/ps.go b/cli/cli/command/stack/swarm/ps.go new file mode 100644 index 00000000..5b28a39e --- /dev/null +++ b/cli/cli/command/stack/swarm/ps.go @@ -0,0 +1,35 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/command/task" + "github.com/docker/docker/api/types" +) + +// RunPS is the swarm implementation of docker stack ps +func RunPS(dockerCli command.Cli, opts options.PS) error { + filter := getStackFilterFromOpt(opts.Namespace, opts.Filter) + + ctx := context.Background() + client := dockerCli.Client() + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if len(tasks) == 0 { + return fmt.Errorf("nothing found in stack: %s", opts.Namespace) + } + + format := opts.Format + if len(format) == 0 { + format = task.DefaultFormat(dockerCli.ConfigFile(), opts.Quiet) + } + + return task.Print(ctx, dockerCli, tasks, idresolver.New(client, opts.NoResolve), !opts.NoTrunc, opts.Quiet, format) +} diff --git a/cli/cli/command/stack/swarm/remove.go b/cli/cli/command/stack/swarm/remove.go new file mode 100644 index 00000000..4dedef12 --- /dev/null +++ b/cli/cli/command/stack/swarm/remove.go @@ -0,0 +1,140 @@ +package swarm + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// RunRemove is the swarm implementation of docker stack remove +func RunRemove(dockerCli command.Cli, opts options.Remove) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + for _, namespace := range opts.Namespaces { + services, err := getStackServices(ctx, client, namespace) + if err != nil { + return err + } + + networks, err := getStackNetworks(ctx, client, namespace) + if err != nil { + return err + } + + var secrets []swarm.Secret + if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.25") { + secrets, err = getStackSecrets(ctx, client, namespace) + if err != nil { + return err + } + } + + var configs []swarm.Config + if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.30") { + configs, err = getStackConfigs(ctx, client, namespace) + if err != nil { + return err + } + } + + if len(services)+len(networks)+len(secrets)+len(configs) == 0 { + fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", namespace) + continue + } + + hasError := removeServices(ctx, dockerCli, services) + hasError = removeSecrets(ctx, dockerCli, secrets) || hasError + hasError = removeConfigs(ctx, dockerCli, configs) || hasError + hasError = removeNetworks(ctx, dockerCli, networks) || hasError + + if hasError { + errs = append(errs, fmt.Sprintf("Failed to remove some resources from stack: %s", namespace)) + } + } + + if len(errs) > 0 { + return errors.Errorf(strings.Join(errs, "\n")) + } + return nil +} + +func sortServiceByName(services []swarm.Service) func(i, j int) bool { + return func(i, j int) bool { + return services[i].Spec.Name < services[j].Spec.Name + } +} + +func removeServices( + ctx context.Context, + dockerCli command.Cli, + services []swarm.Service, +) bool { + var hasError bool + sort.Slice(services, sortServiceByName(services)) + for _, service := range services { + fmt.Fprintf(dockerCli.Out(), "Removing service %s\n", service.Spec.Name) + if err := dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) + } + } + return hasError +} + +func removeNetworks( + ctx context.Context, + dockerCli command.Cli, + networks []types.NetworkResource, +) bool { + var hasError bool + for _, network := range networks { + fmt.Fprintf(dockerCli.Out(), "Removing network %s\n", network.Name) + if err := dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) + } + } + return hasError +} + +func removeSecrets( + ctx context.Context, + dockerCli command.Cli, + secrets []swarm.Secret, +) bool { + var hasError bool + for _, secret := range secrets { + fmt.Fprintf(dockerCli.Out(), "Removing secret %s\n", secret.Spec.Name) + if err := dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) + } + } + return hasError +} + +func removeConfigs( + ctx context.Context, + dockerCli command.Cli, + configs []swarm.Config, +) bool { + var hasError bool + for _, config := range configs { + fmt.Fprintf(dockerCli.Out(), "Removing config %s\n", config.Spec.Name) + if err := dockerCli.Client().ConfigRemove(ctx, config.ID); err != nil { + hasError = true + fmt.Fprintf(dockerCli.Err(), "Failed to remove config %s: %s", config.ID, err) + } + } + return hasError +} diff --git a/cli/cli/command/stack/swarm/services.go b/cli/cli/command/stack/swarm/services.go new file mode 100644 index 00000000..cd7208f8 --- /dev/null +++ b/cli/cli/command/stack/swarm/services.go @@ -0,0 +1,66 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/service" + "github.com/docker/cli/cli/command/stack/formatter" + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// RunServices is the swarm implementation of docker stack services +func RunServices(dockerCli command.Cli, opts options.Services) error { + ctx := context.Background() + client := dockerCli.Client() + + filter := getStackFilterFromOpt(opts.Namespace, opts.Filter) + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) + if err != nil { + return err + } + + // if no services in this stack, print message and exit 0 + if len(services) == 0 { + fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", opts.Namespace) + return nil + } + + info := map[string]service.ListInfo{} + if !opts.Quiet { + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + info = service.GetServicesStatus(services, nodes, tasks) + } + + format := opts.Format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.Quiet { + format = dockerCli.ConfigFile().ServicesFormat + } else { + format = formatter.TableFormatKey + } + } + + servicesCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: service.NewListFormat(format, opts.Quiet), + } + return service.ListFormatWrite(servicesCtx, services, info) +} diff --git a/cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab b/cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab new file mode 100644 index 00000000..ced8180d --- /dev/null +++ b/cli/cli/command/stack/swarm/testdata/bundlefile_with_two_services.dab @@ -0,0 +1,29 @@ +{ + "Services": { + "visualizer": { + "Image": "busybox@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f", + "Networks": [ + "webnet" + ], + "Ports": [ + { + "Port": 8080, + "Protocol": "tcp" + } + ] + }, + "web": { + "Image": "busybox@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f", + "Networks": [ + "webnet" + ], + "Ports": [ + { + "Port": 80, + "Protocol": "tcp" + } + ] + } + }, + "Version": "0.1" +} diff --git a/cli/cli/command/stack/testdata/stack-list-sort-natural.golden b/cli/cli/command/stack/testdata/stack-list-sort-natural.golden new file mode 100644 index 00000000..3090cb9e --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-sort-natural.golden @@ -0,0 +1,4 @@ +NAME SERVICES ORCHESTRATOR +service-name-1-foo 1 Swarm +service-name-2-foo 1 Swarm +service-name-10-foo 1 Swarm diff --git a/cli/cli/command/stack/testdata/stack-list-sort.golden b/cli/cli/command/stack/testdata/stack-list-sort.golden new file mode 100644 index 00000000..179ae71d --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-sort.golden @@ -0,0 +1,3 @@ +NAME SERVICES ORCHESTRATOR +service-name-bar 1 Swarm +service-name-foo 1 Swarm diff --git a/cli/cli/command/stack/testdata/stack-list-with-format.golden b/cli/cli/command/stack/testdata/stack-list-with-format.golden new file mode 100644 index 00000000..b53e6401 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-with-format.golden @@ -0,0 +1 @@ +service-name-foo diff --git a/cli/cli/command/stack/testdata/stack-list-without-format.golden b/cli/cli/command/stack/testdata/stack-list-without-format.golden new file mode 100644 index 00000000..37213aaf --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-list-without-format.golden @@ -0,0 +1,2 @@ +NAME SERVICES ORCHESTRATOR +service-name-foo 1 Swarm diff --git a/cli/cli/command/stack/testdata/stack-ps-with-config-format.golden b/cli/cli/command/stack/testdata/stack-ps-with-config-format.golden new file mode 100644 index 00000000..9ecebdaf --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-config-format.golden @@ -0,0 +1 @@ +service-id-foo.1 diff --git a/cli/cli/command/stack/testdata/stack-ps-with-format.golden b/cli/cli/command/stack/testdata/stack-ps-with-format.golden new file mode 100644 index 00000000..9ecebdaf --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-format.golden @@ -0,0 +1 @@ +service-id-foo.1 diff --git a/cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden b/cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden new file mode 100644 index 00000000..b90d743b --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-no-resolve-option.golden @@ -0,0 +1 @@ +id-node-foo diff --git a/cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden b/cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden new file mode 100644 index 00000000..8179bf4d --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-no-trunc-option.golden @@ -0,0 +1 @@ +xn4cypcov06f2w8gsbaf2lst3 diff --git a/cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden b/cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/stack/testdata/stack-ps-without-format.golden b/cli/cli/command/stack/testdata/stack-ps-without-format.golden new file mode 100644 index 00000000..ceb4f841 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-ps-without-format.golden @@ -0,0 +1,2 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +id-foo service-id-foo.1 myimage:mytag node-name-bar Ready Failed 2 hours ago diff --git a/cli/cli/command/stack/testdata/stack-services-with-config-format.golden b/cli/cli/command/stack/testdata/stack-services-with-config-format.golden new file mode 100644 index 00000000..b53e6401 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-with-config-format.golden @@ -0,0 +1 @@ +service-name-foo diff --git a/cli/cli/command/stack/testdata/stack-services-with-format.golden b/cli/cli/command/stack/testdata/stack-services-with-format.golden new file mode 100644 index 00000000..b53e6401 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-with-format.golden @@ -0,0 +1 @@ +service-name-foo diff --git a/cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden b/cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/stack/testdata/stack-services-without-format.golden b/cli/cli/command/stack/testdata/stack-services-without-format.golden new file mode 100644 index 00000000..dcca0dfa --- /dev/null +++ b/cli/cli/command/stack/testdata/stack-services-without-format.golden @@ -0,0 +1,2 @@ +ID NAME MODE REPLICAS IMAGE PORTS +id-foo name-foo replicated 0/2 busybox:latest *:30000->3232/tcp diff --git a/cli/cli/command/streams.go b/cli/cli/command/streams.go new file mode 100644 index 00000000..fa435e16 --- /dev/null +++ b/cli/cli/command/streams.go @@ -0,0 +1,23 @@ +package command + +import ( + "github.com/docker/cli/cli/streams" +) + +// InStream is an input stream used by the DockerCli to read user input +// Deprecated: Use github.com/docker/cli/cli/streams.In instead +type InStream = streams.In + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +// Deprecated: Use github.com/docker/cli/cli/streams.Out instead +type OutStream = streams.Out + +var ( + // NewInStream returns a new InStream object from a ReadCloser + // Deprecated: Use github.com/docker/cli/cli/streams.NewIn instead + NewInStream = streams.NewIn + // NewOutStream returns a new OutStream object from a Writer + // Deprecated: Use github.com/docker/cli/cli/streams.NewOut instead + NewOutStream = streams.NewOut +) diff --git a/cli/cli/command/swarm/ca.go b/cli/cli/command/swarm/ca.go new file mode 100644 index 00000000..961e7e89 --- /dev/null +++ b/cli/cli/command/swarm/ca.go @@ -0,0 +1,141 @@ +package swarm + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/swarm/progress" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type caOptions struct { + swarmCAOptions + rootCACert PEMFile + rootCAKey PEMFile + rotate bool + detach bool + quiet bool +} + +func newCACommand(dockerCli command.Cli) *cobra.Command { + opts := caOptions{} + + cmd := &cobra.Command{ + Use: "ca [OPTIONS]", + Short: "Display and rotate the root CA", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runCA(dockerCli, cmd.Flags(), opts) + }, + Annotations: map[string]string{"version": "1.30"}, + } + + flags := cmd.Flags() + addSwarmCAFlags(flags, &opts.swarmCAOptions) + flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate the swarm CA - if no certificate or key are provided, new ones will be generated") + flags.Var(&opts.rootCACert, flagCACert, "Path to the PEM-formatted root CA certificate to use for the new cluster") + flags.Var(&opts.rootCAKey, flagCAKey, "Path to the PEM-formatted root CA key to use for the new cluster") + + flags.BoolVarP(&opts.detach, "detach", "d", false, "Exit immediately instead of waiting for the root rotation to converge") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress progress output") + return cmd +} + +func runCA(dockerCli command.Cli, flags *pflag.FlagSet, opts caOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + swarmInspect, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !opts.rotate { + for _, f := range []string{flagCACert, flagCAKey, flagCertExpiry, flagExternalCA} { + if flags.Changed(f) { + return fmt.Errorf("`--%s` flag requires the `--rotate` flag to update the CA", f) + } + } + return displayTrustRoot(dockerCli.Out(), swarmInspect) + } + + if flags.Changed(flagExternalCA) && len(opts.externalCA.Value()) > 0 && !flags.Changed(flagCACert) { + return fmt.Errorf( + "rotating to an external CA requires the `--%s` flag to specify the external CA's cert - "+ + "to add an external CA with the current root CA certificate, use the `update` command instead", flagCACert) + } + + if flags.Changed(flagCACert) && len(opts.externalCA.Value()) == 0 && !flags.Changed(flagCAKey) { + return fmt.Errorf("the --%s flag requires that a --%s flag and/or --%s flag be provided as well", + flagCACert, flagCAKey, flagExternalCA) + } + + updateSwarmSpec(&swarmInspect.Spec, flags, opts) + if err := client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, swarm.UpdateFlags{}); err != nil { + return err + } + + if opts.detach { + return nil + } + return attach(ctx, dockerCli, opts) +} + +func updateSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, opts caOptions) { + caCert := opts.rootCACert.Contents() + caKey := opts.rootCAKey.Contents() + opts.mergeSwarmSpecCAFlags(spec, flags, caCert) + + spec.CAConfig.SigningCACert = caCert + spec.CAConfig.SigningCAKey = caKey + + if caKey == "" && caCert == "" { + spec.CAConfig.ForceRotate++ + } +} + +func attach(ctx context.Context, dockerCli command.Cli, opts caOptions) error { + client := dockerCli.Client() + errChan := make(chan error, 1) + pipeReader, pipeWriter := io.Pipe() + + go func() { + errChan <- progress.RootRotationProgress(ctx, client, pipeWriter) + }() + + if opts.quiet { + go io.Copy(ioutil.Discard, pipeReader) + return <-errChan + } + + err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, dockerCli.Out(), nil) + if err == nil { + err = <-errChan + } + if err != nil { + return err + } + + swarmInspect, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + return displayTrustRoot(dockerCli.Out(), swarmInspect) +} + +func displayTrustRoot(out io.Writer, info swarm.Swarm) error { + if info.ClusterInfo.TLSInfo.TrustRoot == "" { + return errors.New("No CA information available") + } + fmt.Fprintln(out, strings.TrimSpace(info.ClusterInfo.TLSInfo.TrustRoot)) + return nil +} diff --git a/cli/cli/command/swarm/ca_test.go b/cli/cli/command/swarm/ca_test.go new file mode 100644 index 00000000..d0eff116 --- /dev/null +++ b/cli/cli/command/swarm/ca_test.go @@ -0,0 +1,300 @@ +package swarm + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +const ( + cert = ` +-----BEGIN CERTIFICATE----- +MIIBuDCCAV4CCQDOqUYOWdqMdjAKBggqhkjOPQQDAzBjMQswCQYDVQQGEwJVUzEL +MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv +Y2tlcjEPMA0GA1UECwwGRG9ja2VyMQ0wCwYDVQQDDARUZXN0MCAXDTE4MDcwMjIx +MjkxOFoYDzMwMTcxMTAyMjEyOTE4WjBjMQswCQYDVQQGEwJVUzELMAkGA1UECAwC +Q0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRvY2tlcjEPMA0G +A1UECwwGRG9ja2VyMQ0wCwYDVQQDDARUZXN0MFkwEwYHKoZIzj0CAQYIKoZIzj0D +AQcDQgAEgvvZl5Vqpr1e+g5IhoU6TZHgRau+BZETVFTmqyWYajA/mooRQ1MZTozu +s9ZZZA8tzUhIqS36gsFuyIZ4YiAlyjAKBggqhkjOPQQDAwNIADBFAiBQ7pCPQrj8 +8zaItMf0pk8j1NU5XrFqFEZICzvjzUJQBAIhAKq2gFwoTn8KH+cAAXZpAGJPmOsT +zsBT8gBAOHhNA6/2 +-----END CERTIFICATE-----` + key = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEICyheZpw70pbgO4hEuwhZTETWyTpNJmJ3TyFaWT6WTRkoAoGCCqGSM49 +AwEHoUQDQgAEgvvZl5Vqpr1e+g5IhoU6TZHgRau+BZETVFTmqyWYajA/mooRQ1MZ +Tozus9ZZZA8tzUhIqS36gsFuyIZ4YiAlyg== +-----END EC PRIVATE KEY-----` +) + +func swarmSpecWithFullCAConfig() *swarm.Spec { + return &swarm.Spec{ + CAConfig: swarm.CAConfig{ + SigningCACert: "cacert", + SigningCAKey: "cakey", + ForceRotate: 1, + NodeCertExpiry: time.Duration(200), + ExternalCAs: []*swarm.ExternalCA{ + { + URL: "https://example.com/ca", + Protocol: swarm.ExternalCAProtocolCFSSL, + CACert: "excacert", + }, + }, + }, + } +} + +func TestDisplayTrustRootNoRoot(t *testing.T) { + buffer := new(bytes.Buffer) + err := displayTrustRoot(buffer, swarm.Swarm{}) + assert.Error(t, err, "No CA information available") +} + +type invalidCATestCases struct { + args []string + errorMsg string +} + +func writeFile(data string) (string, error) { + tmpfile, err := ioutil.TempFile("", "testfile") + if err != nil { + return "", err + } + _, err = tmpfile.Write([]byte(data)) + if err != nil { + return "", err + } + tmpfile.Close() + return tmpfile.Name(), nil +} + +func TestDisplayTrustRootInvalidFlags(t *testing.T) { + // we need an actual PEMfile to test + tmpfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(tmpfile) + + errorTestCases := []invalidCATestCases{ + { + args: []string{"--ca-cert=" + tmpfile}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{"--ca-key=" + tmpfile}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { // to make sure we're not erroring because we didn't provide a CA key along with the CA cert + args: []string{ + "--ca-cert=" + tmpfile, + "--ca-key=" + tmpfile, + }, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{"--cert-expiry=2160h0m0s"}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{"--external-ca=protocol=cfssl,url=https://some.com/https/url"}, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { // to make sure we're not erroring because we didn't provide a CA cert and external CA + args: []string{ + "--ca-cert=" + tmpfile, + "--external-ca=protocol=cfssl,url=https://some.com/https/url", + }, + errorMsg: "flag requires the `--rotate` flag to update the CA", + }, + { + args: []string{ + "--rotate", + "--external-ca=protocol=cfssl,url=https://some.com/https/url", + }, + errorMsg: "rotating to an external CA requires the `--ca-cert` flag to specify the external CA's cert - " + + "to add an external CA with the current root CA certificate, use the `update` command instead", + }, + { + args: []string{ + "--rotate", + "--ca-cert=" + tmpfile, + }, + errorMsg: "the --ca-cert flag requires that a --ca-key flag and/or --external-ca flag be provided as well", + }, + } + + for _, testCase := range errorTestCases { + cmd := newCACommand( + test.NewFakeCli(&fakeClient{ + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + TLSInfo: swarm.TLSInfo{ + TrustRoot: "root", + }, + }, + }, nil + }, + })) + assert.Check(t, cmd.Flags().Parse(testCase.args)) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), testCase.errorMsg) + } +} + +func TestDisplayTrustRoot(t *testing.T) { + buffer := new(bytes.Buffer) + trustRoot := "trustme" + err := displayTrustRoot(buffer, swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + TLSInfo: swarm.TLSInfo{TrustRoot: trustRoot}, + }, + }) + assert.NilError(t, err) + assert.Check(t, is.Equal(trustRoot+"\n", buffer.String())) +} + +type swarmUpdateRecorder struct { + spec swarm.Spec +} + +func (s *swarmUpdateRecorder) swarmUpdate(sp swarm.Spec, _ swarm.UpdateFlags) error { + s.spec = sp + return nil +} + +func swarmInspectFuncWithFullCAConfig() (swarm.Swarm, error) { + return swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + Spec: *swarmSpecWithFullCAConfig(), + }, + }, nil +} + +func TestUpdateSwarmSpecDefaultRotate(t *testing.T) { + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{"--rotate", "--detach"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.ForceRotate = 2 + expected.CAConfig.SigningCACert = "" + expected.CAConfig.SigningCAKey = "" + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} + +func TestUpdateSwarmSpecCertAndKey(t *testing.T) { + certfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(certfile) + + keyfile, err := writeFile(key) + assert.NilError(t, err) + defer os.Remove(keyfile) + + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{ + "--rotate", + "--detach", + "--ca-cert=" + certfile, + "--ca-key=" + keyfile, + "--cert-expiry=3m"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.SigningCACert = cert + expected.CAConfig.SigningCAKey = key + expected.CAConfig.NodeCertExpiry = 3 * time.Minute + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} + +func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) { + certfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(certfile) + + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{ + "--rotate", + "--detach", + "--ca-cert=" + certfile, + "--external-ca=protocol=cfssl,url=https://some.external.ca"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.SigningCACert = cert + expected.CAConfig.SigningCAKey = "" + expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://some.external.ca", + CACert: cert, + Options: make(map[string]string), + }, + } + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} + +func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) { + certfile, err := writeFile(cert) + assert.NilError(t, err) + defer os.Remove(certfile) + + keyfile, err := writeFile(key) + assert.NilError(t, err) + defer os.Remove(keyfile) + + s := &swarmUpdateRecorder{} + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: swarmInspectFuncWithFullCAConfig, + swarmUpdateFunc: s.swarmUpdate, + }) + cmd := newCACommand(cli) + cmd.SetArgs([]string{ + "--rotate", + "--detach", + "--ca-cert=" + certfile, + "--ca-key=" + keyfile, + "--external-ca=protocol=cfssl,url=https://some.external.ca"}) + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + + expected := swarmSpecWithFullCAConfig() + expected.CAConfig.SigningCACert = cert + expected.CAConfig.SigningCAKey = key + expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://some.external.ca", + CACert: cert, + Options: make(map[string]string), + }, + } + assert.Check(t, is.DeepEqual(*expected, s.spec)) +} diff --git a/cli/cli/command/swarm/client_test.go b/cli/cli/command/swarm/client_test.go new file mode 100644 index 00000000..8695c895 --- /dev/null +++ b/cli/cli/command/swarm/client_test.go @@ -0,0 +1,85 @@ +package swarm + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + infoFunc func() (types.Info, error) + swarmInitFunc func() (string, error) + swarmInspectFunc func() (swarm.Swarm, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + swarmJoinFunc func() error + swarmLeaveFunc func() error + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmUnlockFunc func(req swarm.UnlockRequest) error +} + +func (cli *fakeClient) Info(ctx context.Context) (types.Info, error) { + if cli.infoFunc != nil { + return cli.infoFunc() + } + return types.Info{}, nil +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectFunc != nil { + return cli.nodeInspectFunc() + } + return swarm.Node{}, []byte{}, nil +} + +func (cli *fakeClient) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + if cli.swarmInitFunc != nil { + return cli.swarmInitFunc() + } + return "", nil +} + +func (cli *fakeClient) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + if cli.swarmInspectFunc != nil { + return cli.swarmInspectFunc() + } + return swarm.Swarm{}, nil +} + +func (cli *fakeClient) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + if cli.swarmGetUnlockKeyFunc != nil { + return cli.swarmGetUnlockKeyFunc() + } + return types.SwarmUnlockKeyResponse{}, nil +} + +func (cli *fakeClient) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + if cli.swarmJoinFunc != nil { + return cli.swarmJoinFunc() + } + return nil +} + +func (cli *fakeClient) SwarmLeave(ctx context.Context, force bool) error { + if cli.swarmLeaveFunc != nil { + return cli.swarmLeaveFunc() + } + return nil +} + +func (cli *fakeClient) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + if cli.swarmUpdateFunc != nil { + return cli.swarmUpdateFunc(swarm, flags) + } + return nil +} + +func (cli *fakeClient) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + if cli.swarmUnlockFunc != nil { + return cli.swarmUnlockFunc(req) + } + return nil +} diff --git a/cli/cli/command/swarm/cmd.go b/cli/cli/command/swarm/cmd.go new file mode 100644 index 00000000..89bf5c3c --- /dev/null +++ b/cli/cli/command/swarm/cmd.go @@ -0,0 +1,33 @@ +package swarm + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewSwarmCommand returns a cobra command for `swarm` subcommands +func NewSwarmCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "swarm", + Short: "Manage Swarm", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{ + "version": "1.24", + "swarm": "", + }, + } + cmd.AddCommand( + newInitCommand(dockerCli), + newJoinCommand(dockerCli), + newJoinTokenCommand(dockerCli), + newUnlockKeyCommand(dockerCli), + newUpdateCommand(dockerCli), + newLeaveCommand(dockerCli), + newUnlockCommand(dockerCli), + newCACommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/swarm/init.go b/cli/cli/command/swarm/init.go new file mode 100644 index 00000000..3c096c66 --- /dev/null +++ b/cli/cli/command/swarm/init.go @@ -0,0 +1,117 @@ +package swarm + +import ( + "context" + "fmt" + "net" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type initOptions struct { + swarmOptions + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + dataPathAddr string + dataPathPort uint32 + forceNewCluster bool + availability string + defaultAddrPools []net.IPNet + DefaultAddrPoolMaskLength uint32 +} + +func newInitCommand(dockerCli command.Cli) *cobra.Command { + opts := initOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "init [OPTIONS]", + Short: "Initialize a swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInit(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.dataPathAddr, flagDataPathAddr, "", "Address or interface to use for data path traffic (format: )") + flags.SetAnnotation(flagDataPathAddr, "version", []string{"1.31"}) + flags.Uint32Var(&opts.dataPathPort, flagDataPathPort, 0, "Port number to use for data path traffic (1024 - 49151). If no value is set or is set to 0, the default port (4789) is used.") + flags.SetAnnotation(flagDataPathPort, "version", []string{"1.40"}) + flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") + flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") + flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`) + flags.IPNetSliceVar(&opts.defaultAddrPools, flagDefaultAddrPool, []net.IPNet{}, "default address pool in CIDR format") + flags.SetAnnotation(flagDefaultAddrPool, "version", []string{"1.39"}) + flags.Uint32Var(&opts.DefaultAddrPoolMaskLength, flagDefaultAddrPoolMaskLength, 24, "default address pool subnet mask length") + flags.SetAnnotation(flagDefaultAddrPoolMaskLength, "version", []string{"1.39"}) + addSwarmFlags(flags, &opts.swarmOptions) + return cmd +} + +func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) error { + var defaultAddrPool []string + + client := dockerCli.Client() + ctx := context.Background() + + for _, p := range opts.defaultAddrPools { + defaultAddrPool = append(defaultAddrPool, p.String()) + } + req := swarm.InitRequest{ + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + DataPathAddr: opts.dataPathAddr, + DataPathPort: opts.dataPathPort, + DefaultAddrPool: defaultAddrPool, + ForceNewCluster: opts.forceNewCluster, + Spec: opts.swarmOptions.ToSpec(flags), + AutoLockManagers: opts.swarmOptions.autolock, + SubnetSize: opts.DefaultAddrPoolMaskLength, + } + if flags.Changed(flagAvailability) { + availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) + switch availability { + case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: + req.Availability = availability + default: + return errors.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + } + + nodeID, err := client.SwarmInit(ctx, req) + if err != nil { + if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { + return errors.New(err.Error() + " - specify one with --advertise-addr") + } + return err + } + + fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) + + if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { + return err + } + + fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") + + if req.AutoLockManagers { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(dockerCli.Out(), unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/cli/cli/command/swarm/init_test.go b/cli/cli/command/swarm/init_test.go new file mode 100644 index 00000000..735cc8da --- /dev/null +++ b/cli/cli/command/swarm/init_test.go @@ -0,0 +1,125 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmInitErrorOnAPIFailure(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + swarmInitFunc func() (string, error) + swarmInspectFunc func() (swarm.Swarm, error) + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + expectedError string + }{ + { + name: "init-failed", + swarmInitFunc: func() (string, error) { + return "", errors.Errorf("error initializing the swarm") + }, + expectedError: "error initializing the swarm", + }, + { + name: "init-failed-with-ip-choice", + swarmInitFunc: func() (string, error) { + return "", errors.Errorf("could not choose an IP address to advertise") + }, + expectedError: "could not choose an IP address to advertise - specify one with --advertise-addr", + }, + { + name: "swarm-inspect-after-init-failed", + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "node-inspect-after-init-failed", + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") + }, + expectedError: "error inspecting the node", + }, + { + name: "swarm-get-unlock-key-after-init-failed", + flags: map[string]string{ + flagAutolock: "true", + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting swarm unlock key") + }, + expectedError: "could not fetch unlock key: error getting swarm unlock key", + }, + } + for _, tc := range testCases { + cmd := newInitCommand( + test.NewFakeCli(&fakeClient{ + swarmInitFunc: tc.swarmInitFunc, + swarmInspectFunc: tc.swarmInspectFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + nodeInspectFunc: tc.nodeInspectFunc, + })) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmInit(t *testing.T) { + testCases := []struct { + name string + flags map[string]string + swarmInitFunc func() (string, error) + swarmInspectFunc func() (swarm.Swarm, error) + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + }{ + { + name: "init", + swarmInitFunc: func() (string, error) { + return "nodeID", nil + }, + }, + { + name: "init-autolock", + flags: map[string]string{ + flagAutolock: "true", + }, + swarmInitFunc: func() (string, error) { + return "nodeID", nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInitFunc: tc.swarmInitFunc, + swarmInspectFunc: tc.swarmInspectFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newInitCommand(cli) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("init-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/swarm/join.go b/cli/cli/command/swarm/join.go new file mode 100644 index 00000000..e6c43f03 --- /dev/null +++ b/cli/cli/command/swarm/join.go @@ -0,0 +1,88 @@ +package swarm + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type joinOptions struct { + remote string + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + dataPathAddr string + token string + availability string +} + +func newJoinCommand(dockerCli command.Cli) *cobra.Command { + opts := joinOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "join [OPTIONS] HOST:PORT", + Short: "Join a swarm as a node and/or manager", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runJoin(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.dataPathAddr, flagDataPathAddr, "", "Address or interface to use for data path traffic (format: )") + flags.SetAnnotation(flagDataPathAddr, "version", []string{"1.31"}) + flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") + flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`) + return cmd +} + +func runJoin(dockerCli command.Cli, flags *pflag.FlagSet, opts joinOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.JoinRequest{ + JoinToken: opts.token, + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + DataPathAddr: opts.dataPathAddr, + RemoteAddrs: []string{opts.remote}, + } + if flags.Changed(flagAvailability) { + availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) + switch availability { + case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: + req.Availability = availability + default: + return errors.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + } + + err := client.SwarmJoin(ctx, req) + if err != nil { + return err + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + if info.Swarm.ControlAvailable { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") + } else { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") + } + return nil +} diff --git a/cli/cli/command/swarm/join_test.go b/cli/cli/command/swarm/join_test.go new file mode 100644 index 00000000..e70d448d --- /dev/null +++ b/cli/cli/command/swarm/join_test.go @@ -0,0 +1,100 @@ +package swarm + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSwarmJoinErrors(t *testing.T) { + testCases := []struct { + name string + args []string + swarmJoinFunc func() error + infoFunc func() (types.Info, error) + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"remote1", "remote2"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "join-failed", + args: []string{"remote"}, + swarmJoinFunc: func() error { + return errors.Errorf("error joining the swarm") + }, + expectedError: "error joining the swarm", + }, + { + name: "join-failed-on-init", + args: []string{"remote"}, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cmd := newJoinCommand( + test.NewFakeCli(&fakeClient{ + swarmJoinFunc: tc.swarmJoinFunc, + infoFunc: tc.infoFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmJoin(t *testing.T) { + testCases := []struct { + name string + infoFunc func() (types.Info, error) + expected string + }{ + { + name: "join-as-manager", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + ControlAvailable: true, + }, + }, nil + }, + expected: "This node joined a swarm as a manager.", + }, + { + name: "join-as-worker", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + ControlAvailable: false, + }, + }, nil + }, + expected: "This node joined a swarm as a worker.", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + }) + cmd := newJoinCommand(cli) + cmd.SetArgs([]string{"remote"}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(strings.TrimSpace(cli.OutBuffer().String()), tc.expected)) + } +} diff --git a/cli/cli/command/swarm/join_token.go b/cli/cli/command/swarm/join_token.go new file mode 100644 index 00000000..f8ed93cf --- /dev/null +++ b/cli/cli/command/swarm/join_token.go @@ -0,0 +1,119 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type joinTokenOptions struct { + role string + rotate bool + quiet bool +} + +func newJoinTokenCommand(dockerCli command.Cli) *cobra.Command { + opts := joinTokenOptions{} + + cmd := &cobra.Command{ + Use: "join-token [OPTIONS] (worker|manager)", + Short: "Manage join tokens", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.role = args[0] + return runJoinToken(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate join token") + flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func runJoinToken(dockerCli command.Cli, opts joinTokenOptions) error { + worker := opts.role == "worker" + manager := opts.role == "manager" + + if !worker && !manager { + return errors.New("unknown role " + opts.role) + } + + client := dockerCli.Client() + ctx := context.Background() + + if opts.rotate { + flags := swarm.UpdateFlags{ + RotateWorkerToken: worker, + RotateManagerToken: manager, + } + + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { + return err + } + + if !opts.quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", opts.role) + } + } + + // second SwarmInspect in this function, + // this is necessary since SwarmUpdate after first changes the join tokens + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if opts.quiet && worker { + fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Worker) + return nil + } + + if opts.quiet && manager { + fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Manager) + return nil + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) +} + +func printJoinCommand(ctx context.Context, dockerCli command.Cli, nodeID string, worker bool, manager bool) error { + client := dockerCli.Client() + + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if node.ManagerStatus != nil { + if worker { + fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join --token %s %s\n\n", sw.JoinTokens.Worker, node.ManagerStatus.Addr) + } + if manager { + fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join --token %s %s\n\n", sw.JoinTokens.Manager, node.ManagerStatus.Addr) + } + } + + return nil +} diff --git a/cli/cli/command/swarm/join_token_test.go b/cli/cli/command/swarm/join_token_test.go new file mode 100644 index 00000000..1bd7ba25 --- /dev/null +++ b/cli/cli/command/swarm/join_token_test.go @@ -0,0 +1,211 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmJoinTokenErrors(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + infoFunc func() (types.Info, error) + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + nodeInspectFunc func() (swarm.Node, []byte, error) + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"worker", "manager"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "invalid-args", + args: []string{"foo"}, + expectedError: "unknown role foo", + }, + { + name: "swarm-inspect-failed", + args: []string{"worker"}, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-inspect-rotate-failed", + args: []string{"worker"}, + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-update-failed", + args: []string{"worker"}, + flags: map[string]string{ + flagRotate: "true", + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + return errors.Errorf("error updating the swarm") + }, + expectedError: "error updating the swarm", + }, + { + name: "node-inspect-failed", + args: []string{"worker"}, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node") + }, + expectedError: "error inspecting node", + }, + { + name: "info-failed", + args: []string{"worker"}, + infoFunc: func() (types.Info, error) { + return types.Info{}, errors.Errorf("error asking for node info") + }, + expectedError: "error asking for node info", + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newJoinTokenCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmJoinToken(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + infoFunc func() (types.Info, error) + swarmInspectFunc func() (swarm.Swarm, error) + nodeInspectFunc func() (swarm.Node, []byte, error) + }{ + { + name: "worker", + args: []string{"worker"}, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID", + }, + }, nil + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "manager", + args: []string{"manager"}, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID", + }, + }, nil + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "manager-rotate", + args: []string{"manager"}, + flags: map[string]string{ + flagRotate: "true", + }, + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + NodeID: "nodeID", + }, + }, nil + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "worker-quiet", + args: []string{"worker"}, + flags: map[string]string{ + flagQuiet: "true", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + { + name: "manager-quiet", + args: []string{"manager"}, + flags: map[string]string{ + flagQuiet: "true", + }, + nodeInspectFunc: func() (swarm.Node, []byte, error) { + return *Node(Manager()), []byte{}, nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + infoFunc: tc.infoFunc, + nodeInspectFunc: tc.nodeInspectFunc, + }) + cmd := newJoinTokenCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("jointoken-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/swarm/leave.go b/cli/cli/command/swarm/leave.go new file mode 100644 index 00000000..af6e0753 --- /dev/null +++ b/cli/cli/command/swarm/leave.go @@ -0,0 +1,43 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type leaveOptions struct { + force bool +} + +func newLeaveCommand(dockerCli command.Cli) *cobra.Command { + opts := leaveOptions{} + + cmd := &cobra.Command{ + Use: "leave [OPTIONS]", + Short: "Leave the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLeave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") + return cmd +} + +func runLeave(dockerCli command.Cli, opts leaveOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if err := client.SwarmLeave(ctx, opts.force); err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") + return nil +} diff --git a/cli/cli/command/swarm/leave_test.go b/cli/cli/command/swarm/leave_test.go new file mode 100644 index 00000000..91ee6e24 --- /dev/null +++ b/cli/cli/command/swarm/leave_test.go @@ -0,0 +1,50 @@ +package swarm + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSwarmLeaveErrors(t *testing.T) { + testCases := []struct { + name string + args []string + swarmLeaveFunc func() error + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "leave-failed", + swarmLeaveFunc: func() error { + return errors.Errorf("error leaving the swarm") + }, + expectedError: "error leaving the swarm", + }, + } + for _, tc := range testCases { + cmd := newLeaveCommand( + test.NewFakeCli(&fakeClient{ + swarmLeaveFunc: tc.swarmLeaveFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmLeave(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newLeaveCommand(cli) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal("Node left the swarm.", strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/swarm/opts.go b/cli/cli/command/swarm/opts.go new file mode 100644 index 00000000..ea66bf39 --- /dev/null +++ b/cli/cli/command/swarm/opts.go @@ -0,0 +1,276 @@ +package swarm + +import ( + "encoding/csv" + "encoding/pem" + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +const ( + defaultListenAddr = "0.0.0.0:2377" + + flagCertExpiry = "cert-expiry" + flagDispatcherHeartbeat = "dispatcher-heartbeat" + flagListenAddr = "listen-addr" + flagAdvertiseAddr = "advertise-addr" + flagDataPathAddr = "data-path-addr" + flagDataPathPort = "data-path-port" + flagDefaultAddrPool = "default-addr-pool" + flagDefaultAddrPoolMaskLength = "default-addr-pool-mask-length" + flagQuiet = "quiet" + flagRotate = "rotate" + flagToken = "token" + flagTaskHistoryLimit = "task-history-limit" + flagExternalCA = "external-ca" + flagMaxSnapshots = "max-snapshots" + flagSnapshotInterval = "snapshot-interval" + flagAutolock = "autolock" + flagAvailability = "availability" + flagCACert = "ca-cert" + flagCAKey = "ca-key" +) + +type swarmOptions struct { + swarmCAOptions + taskHistoryLimit int64 + dispatcherHeartbeat time.Duration + maxSnapshots uint64 + snapshotInterval uint64 + autolock bool +} + +// NodeAddrOption is a pflag.Value for listening addresses +type NodeAddrOption struct { + addr string +} + +// String prints the representation of this flag +func (a *NodeAddrOption) String() string { + return a.Value() +} + +// Set the value for this flag +func (a *NodeAddrOption) Set(value string) error { + addr, err := opts.ParseTCPAddr(value, a.addr) + if err != nil { + return err + } + a.addr = addr + return nil +} + +// Type returns the type of this flag +func (a *NodeAddrOption) Type() string { + return "node-addr" +} + +// Value returns the value of this option as addr:port +func (a *NodeAddrOption) Value() string { + return strings.TrimPrefix(a.addr, "tcp://") +} + +// NewNodeAddrOption returns a new node address option +func NewNodeAddrOption(addr string) NodeAddrOption { + return NodeAddrOption{addr} +} + +// NewListenAddrOption returns a NodeAddrOption with default values +func NewListenAddrOption() NodeAddrOption { + return NewNodeAddrOption(defaultListenAddr) +} + +// ExternalCAOption is a Value type for parsing external CA specifications. +type ExternalCAOption struct { + values []*swarm.ExternalCA +} + +// Set parses an external CA option. +func (m *ExternalCAOption) Set(value string) error { + parsed, err := parseExternalCA(value) + if err != nil { + return err + } + + m.values = append(m.values, parsed) + return nil +} + +// Type returns the type of this option. +func (m *ExternalCAOption) Type() string { + return "external-ca" +} + +// String returns a string repr of this option. +func (m *ExternalCAOption) String() string { + externalCAs := []string{} + for _, externalCA := range m.values { + repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) + externalCAs = append(externalCAs, repr) + } + return strings.Join(externalCAs, ", ") +} + +// Value returns the external CAs +func (m *ExternalCAOption) Value() []*swarm.ExternalCA { + return m.values +} + +// PEMFile represents the path to a pem-formatted file +type PEMFile struct { + path, contents string +} + +// Type returns the type of this option. +func (p *PEMFile) Type() string { + return "pem-file" +} + +// String returns the path to the pem file +func (p *PEMFile) String() string { + return p.path +} + +// Set parses a root rotation option +func (p *PEMFile) Set(value string) error { + contents, err := ioutil.ReadFile(value) + if err != nil { + return err + } + if pemBlock, _ := pem.Decode(contents); pemBlock == nil { + return errors.New("file contents must be in PEM format") + } + p.contents, p.path = string(contents), value + return nil +} + +// Contents returns the contents of the PEM file +func (p *PEMFile) Contents() string { + return p.contents +} + +// parseExternalCA parses an external CA specification from the command line, +// such as protocol=cfssl,url=https://example.com. +func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { + csvReader := csv.NewReader(strings.NewReader(caSpec)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + + externalCA := swarm.ExternalCA{ + Options: make(map[string]string), + } + + var ( + hasProtocol bool + hasURL bool + ) + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + + if len(parts) != 2 { + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key, value := parts[0], parts[1] + + switch strings.ToLower(key) { + case "protocol": + hasProtocol = true + if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { + externalCA.Protocol = swarm.ExternalCAProtocolCFSSL + } else { + return nil, errors.Errorf("unrecognized external CA protocol %s", value) + } + case "url": + hasURL = true + externalCA.URL = value + case "cacert": + cacontents, err := ioutil.ReadFile(value) + if err != nil { + return nil, errors.Wrap(err, "unable to read CA cert for external CA") + } + if pemBlock, _ := pem.Decode(cacontents); pemBlock == nil { + return nil, errors.New("CA cert for external CA must be in PEM format") + } + externalCA.CACert = string(cacontents) + default: + externalCA.Options[key] = value + } + } + + if !hasProtocol { + return nil, errors.New("the external-ca option needs a protocol= parameter") + } + if !hasURL { + return nil, errors.New("the external-ca option needs a url= parameter") + } + + return &externalCA, nil +} + +func addSwarmCAFlags(flags *pflag.FlagSet, opts *swarmCAOptions) { + flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, 90*24*time.Hour, "Validity period for node certificates (ns|us|ms|s|m|h)") + flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") +} + +func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { + flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") + flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, 5*time.Second, "Dispatcher heartbeat period (ns|us|ms|s|m|h)") + flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") + flags.SetAnnotation(flagMaxSnapshots, "version", []string{"1.25"}) + flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") + flags.SetAnnotation(flagSnapshotInterval, "version", []string{"1.25"}) + addSwarmCAFlags(flags, &opts.swarmCAOptions) +} + +func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) { + if flags.Changed(flagTaskHistoryLimit) { + spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit + } + if flags.Changed(flagDispatcherHeartbeat) { + spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat + } + if flags.Changed(flagMaxSnapshots) { + spec.Raft.KeepOldSnapshots = &opts.maxSnapshots + } + if flags.Changed(flagSnapshotInterval) { + spec.Raft.SnapshotInterval = opts.snapshotInterval + } + if flags.Changed(flagAutolock) { + spec.EncryptionConfig.AutoLockManagers = opts.autolock + } + opts.mergeSwarmSpecCAFlags(spec, flags, caCert) +} + +type swarmCAOptions struct { + nodeCertExpiry time.Duration + externalCA ExternalCAOption +} + +func (opts *swarmCAOptions) mergeSwarmSpecCAFlags(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) { + if flags.Changed(flagCertExpiry) { + spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry + } + if flags.Changed(flagExternalCA) { + spec.CAConfig.ExternalCAs = opts.externalCA.Value() + for _, ca := range spec.CAConfig.ExternalCAs { + ca.CACert = caCert + } + } +} + +func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { + var spec swarm.Spec + opts.mergeSwarmSpec(&spec, flags, "") + return spec +} diff --git a/cli/cli/command/swarm/opts_test.go b/cli/cli/command/swarm/opts_test.go new file mode 100644 index 00000000..6382d2a0 --- /dev/null +++ b/cli/cli/command/swarm/opts_test.go @@ -0,0 +1,111 @@ +package swarm + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNodeAddrOptionSetHostAndPort(t *testing.T) { + opt := NewNodeAddrOption("old:123") + addr := "newhost:5555" + assert.NilError(t, opt.Set(addr)) + assert.Check(t, is.Equal(addr, opt.Value())) +} + +func TestNodeAddrOptionSetHostOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("newhost")) + assert.Check(t, is.Equal("newhost:2377", opt.Value())) +} + +func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("::1")) + assert.Check(t, is.Equal("[::1]:2377", opt.Value())) +} + +func TestNodeAddrOptionSetPortOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set(":4545")) + assert.Check(t, is.Equal("0.0.0.0:4545", opt.Value())) +} + +func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { + opt := NewListenAddrOption() + assert.Error(t, opt.Set("http://localhost:4545"), "Invalid proto, expected tcp: http://localhost:4545") +} + +func TestExternalCAOptionErrors(t *testing.T) { + testCases := []struct { + externalCA string + expectedError string + }{ + { + externalCA: "", + expectedError: "EOF", + }, + { + externalCA: "anything", + expectedError: "invalid field 'anything' must be a key=value pair", + }, + { + externalCA: "foo=bar", + expectedError: "the external-ca option needs a protocol= parameter", + }, + { + externalCA: "protocol=baz", + expectedError: "unrecognized external CA protocol baz", + }, + { + externalCA: "protocol=cfssl", + expectedError: "the external-ca option needs a url= parameter", + }, + } + for _, tc := range testCases { + opt := &ExternalCAOption{} + assert.Error(t, opt.Set(tc.externalCA), tc.expectedError) + } +} + +func TestExternalCAOption(t *testing.T) { + testCases := []struct { + externalCA string + expected string + }{ + { + externalCA: "protocol=cfssl,url=anything", + expected: "cfssl: anything", + }, + { + externalCA: "protocol=CFSSL,url=anything", + expected: "cfssl: anything", + }, + { + externalCA: "protocol=Cfssl,url=https://example.com", + expected: "cfssl: https://example.com", + }, + { + externalCA: "protocol=Cfssl,url=https://example.com,foo=bar", + expected: "cfssl: https://example.com", + }, + { + externalCA: "protocol=Cfssl,url=https://example.com,foo=bar,foo=baz", + expected: "cfssl: https://example.com", + }, + } + for _, tc := range testCases { + opt := &ExternalCAOption{} + assert.NilError(t, opt.Set(tc.externalCA)) + assert.Check(t, is.Equal(tc.expected, opt.String())) + } +} + +func TestExternalCAOptionMultiple(t *testing.T) { + opt := &ExternalCAOption{} + assert.NilError(t, opt.Set("protocol=cfssl,url=https://example.com")) + assert.NilError(t, opt.Set("protocol=CFSSL,url=anything")) + assert.Check(t, is.Len(opt.Value(), 2)) + assert.Check(t, is.Equal("cfssl: https://example.com, cfssl: anything", opt.String())) +} diff --git a/cli/cli/command/swarm/progress/root_rotation.go b/cli/cli/command/swarm/progress/root_rotation.go new file mode 100644 index 00000000..e72de1d2 --- /dev/null +++ b/cli/cli/command/swarm/progress/root_rotation.go @@ -0,0 +1,120 @@ +package progress + +import ( + "bytes" + "context" + "io" + "os" + "os/signal" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/opencontainers/go-digest" +) + +const ( + certsRotatedStr = " rotated TLS certificates" + rootsRotatedStr = " rotated CA certificates" + // rootsAction has a single space because rootsRotatedStr is one character shorter than certsRotatedStr. + // This makes sure the progress bar are aligned. + certsAction = "" + rootsAction = " " +) + +// RootRotationProgress outputs progress information for convergence of a root rotation. +func RootRotationProgress(ctx context.Context, dclient client.APIClient, progressWriter io.WriteCloser) error { + defer progressWriter.Close() + + progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false) + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, os.Interrupt) + defer signal.Stop(sigint) + + // draw 2 progress bars, 1 for nodes with the correct cert, 1 for nodes with the correct trust root + progress.Update(progressOut, "desired root digest", "") + progress.Update(progressOut, certsRotatedStr, certsAction) + progress.Update(progressOut, rootsRotatedStr, rootsAction) + + var done bool + + for { + info, err := dclient.SwarmInspect(ctx) + if err != nil { + return err + } + + if done { + return nil + } + + nodes, err := dclient.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + done = updateProgress(progressOut, info.ClusterInfo.TLSInfo, nodes, info.ClusterInfo.RootRotationInProgress) + + select { + case <-time.After(200 * time.Millisecond): + case <-sigint: + if !done { + progress.Message(progressOut, "", "Operation continuing in background.") + progress.Message(progressOut, "", "Use `swarmctl cluster inspect default` to check progress.") + } + return nil + } + } +} + +func updateProgress(progressOut progress.Output, desiredTLSInfo swarm.TLSInfo, nodes []swarm.Node, rootRotationInProgress bool) bool { + // write the current desired root cert's digest, because the desired root certs might be too long + progressOut.WriteProgress(progress.Progress{ + ID: "desired root digest", + Action: digest.FromBytes([]byte(desiredTLSInfo.TrustRoot)).String(), + }) + + // If we had reached a converged state, check if we are still converged. + var certsRight, trustRootsRight int64 + for _, n := range nodes { + if bytes.Equal(n.Description.TLSInfo.CertIssuerPublicKey, desiredTLSInfo.CertIssuerPublicKey) && + bytes.Equal(n.Description.TLSInfo.CertIssuerSubject, desiredTLSInfo.CertIssuerSubject) { + certsRight++ + } + + if n.Description.TLSInfo.TrustRoot == desiredTLSInfo.TrustRoot { + trustRootsRight++ + } + } + + total := int64(len(nodes)) + progressOut.WriteProgress(progress.Progress{ + ID: certsRotatedStr, + Action: certsAction, + Current: certsRight, + Total: total, + Units: "nodes", + }) + + rootsProgress := progress.Progress{ + ID: rootsRotatedStr, + Action: rootsAction, + Current: trustRootsRight, + Total: total, + Units: "nodes", + } + + if certsRight == total && !rootRotationInProgress { + progressOut.WriteProgress(rootsProgress) + return certsRight == total && trustRootsRight == total + } + + // we still have certs that need renewing, so display that there are zero roots rotated yet + rootsProgress.Current = 0 + progressOut.WriteProgress(rootsProgress) + return false +} diff --git a/cli/cli/command/swarm/testdata/init-init-autolock.golden b/cli/cli/command/swarm/testdata/init-init-autolock.golden new file mode 100644 index 00000000..cdd3c666 --- /dev/null +++ b/cli/cli/command/swarm/testdata/init-init-autolock.golden @@ -0,0 +1,11 @@ +Swarm initialized: current node (nodeID) is now a manager. + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/init-init.golden b/cli/cli/command/swarm/testdata/init-init.golden new file mode 100644 index 00000000..6e82be01 --- /dev/null +++ b/cli/cli/command/swarm/testdata/init-init.golden @@ -0,0 +1,4 @@ +Swarm initialized: current node (nodeID) is now a manager. + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. + diff --git a/cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden b/cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden new file mode 100644 index 00000000..0c7cfc60 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-manager-quiet.golden @@ -0,0 +1 @@ +manager-join-token diff --git a/cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden b/cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden new file mode 100644 index 00000000..4a978e76 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-manager-rotate.golden @@ -0,0 +1,6 @@ +Successfully rotated manager join token. + +To add a manager to this swarm, run the following command: + + docker swarm join --token manager-join-token 127.0.0.1 + diff --git a/cli/cli/command/swarm/testdata/jointoken-manager.golden b/cli/cli/command/swarm/testdata/jointoken-manager.golden new file mode 100644 index 00000000..7bcb7337 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-manager.golden @@ -0,0 +1,4 @@ +To add a manager to this swarm, run the following command: + + docker swarm join --token manager-join-token 127.0.0.1 + diff --git a/cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden b/cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden new file mode 100644 index 00000000..b445e191 --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-worker-quiet.golden @@ -0,0 +1 @@ +worker-join-token diff --git a/cli/cli/command/swarm/testdata/jointoken-worker.golden b/cli/cli/command/swarm/testdata/jointoken-worker.golden new file mode 100644 index 00000000..e6c3ab9a --- /dev/null +++ b/cli/cli/command/swarm/testdata/jointoken-worker.golden @@ -0,0 +1,4 @@ +To add a worker to this swarm, run the following command: + + docker swarm join --token worker-join-token 127.0.0.1 + diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden new file mode 100644 index 00000000..ed53505e --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-quiet.golden @@ -0,0 +1 @@ +unlock-key diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden new file mode 100644 index 00000000..ed53505e --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate-quiet.golden @@ -0,0 +1 @@ +unlock-key diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden new file mode 100644 index 00000000..89152b86 --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key-rotate.golden @@ -0,0 +1,9 @@ +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden new file mode 100644 index 00000000..8316df47 --- /dev/null +++ b/cli/cli/command/swarm/testdata/unlockkeys-unlock-key.golden @@ -0,0 +1,7 @@ +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/update-all-flags-quiet.golden b/cli/cli/command/swarm/testdata/update-all-flags-quiet.golden new file mode 100644 index 00000000..3d195a25 --- /dev/null +++ b/cli/cli/command/swarm/testdata/update-all-flags-quiet.golden @@ -0,0 +1 @@ +Swarm updated. diff --git a/cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden b/cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden new file mode 100644 index 00000000..a077b9e1 --- /dev/null +++ b/cli/cli/command/swarm/testdata/update-autolock-unlock-key.golden @@ -0,0 +1,8 @@ +Swarm updated. +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + unlock-key + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. diff --git a/cli/cli/command/swarm/testdata/update-noargs.golden b/cli/cli/command/swarm/testdata/update-noargs.golden new file mode 100644 index 00000000..a2ce7589 --- /dev/null +++ b/cli/cli/command/swarm/testdata/update-noargs.golden @@ -0,0 +1,14 @@ +Update the swarm + +Usage: + update [OPTIONS] [flags] + +Flags: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + -h, --help help for update + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) diff --git a/cli/cli/command/swarm/unlock.go b/cli/cli/command/swarm/unlock.go new file mode 100644 index 00000000..35f995bd --- /dev/null +++ b/cli/cli/command/swarm/unlock.go @@ -0,0 +1,75 @@ +package swarm + +import ( + "bufio" + "context" + "fmt" + "io" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/streams" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" +) + +func newUnlockCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "unlock", + Short: "Unlock swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUnlock(dockerCli) + }, + } + + return cmd +} + +func runUnlock(dockerCli command.Cli) error { + client := dockerCli.Client() + ctx := context.Background() + + // First see if the node is actually part of a swarm, and if it is actually locked first. + // If it's in any other state than locked, don't ask for the key. + info, err := client.Info(ctx) + if err != nil { + return err + } + + switch info.Swarm.LocalNodeState { + case swarm.LocalNodeStateInactive: + return errors.New("Error: This node is not part of a swarm") + case swarm.LocalNodeStateLocked: + break + default: + return errors.New("Error: swarm is not locked") + } + + key, err := readKey(dockerCli.In(), "Please enter unlock key: ") + if err != nil { + return err + } + req := swarm.UnlockRequest{ + UnlockKey: key, + } + + return client.SwarmUnlock(ctx, req) +} + +func readKey(in *streams.In, prompt string) (string, error) { + if in.IsTerminal() { + fmt.Print(prompt) + dt, err := terminal.ReadPassword(int(in.FD())) + fmt.Println() + return string(dt), err + } + key, err := bufio.NewReader(in).ReadString('\n') + if err == io.EOF { + err = nil + } + return strings.TrimSpace(key), err +} diff --git a/cli/cli/command/swarm/unlock_key.go b/cli/cli/command/swarm/unlock_key.go new file mode 100644 index 00000000..be5d9ea2 --- /dev/null +++ b/cli/cli/command/swarm/unlock_key.go @@ -0,0 +1,89 @@ +package swarm + +import ( + "context" + "fmt" + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type unlockKeyOptions struct { + rotate bool + quiet bool +} + +func newUnlockKeyCommand(dockerCli command.Cli) *cobra.Command { + opts := unlockKeyOptions{} + + cmd := &cobra.Command{ + Use: "unlock-key [OPTIONS]", + Short: "Manage the unlock key", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUnlockKey(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.rotate, flagRotate, false, "Rotate unlock key") + flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func runUnlockKey(dockerCli command.Cli, opts unlockKeyOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.rotate { + flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} + + sw, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !sw.Spec.EncryptionConfig.AutoLockManagers { + return errors.New("cannot rotate because autolock is not turned on") + } + + if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { + return err + } + + if !opts.quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") + } + } + + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + + if unlockKeyResp.UnlockKey == "" { + return errors.New("no unlock key is set") + } + + if opts.quiet { + fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) + return nil + } + + printUnlockCommand(dockerCli.Out(), unlockKeyResp.UnlockKey) + return nil +} + +func printUnlockCommand(out io.Writer, unlockKey string) { + if len(unlockKey) > 0 { + fmt.Fprintf(out, "To unlock a swarm manager after it restarts, "+ + "run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\n"+ + "Please remember to store this key in a password manager, since without it you\n"+ + "will not be able to restart the manager.\n", unlockKey) + } +} diff --git a/cli/cli/command/swarm/unlock_key_test.go b/cli/cli/command/swarm/unlock_key_test.go new file mode 100644 index 00000000..d28921a1 --- /dev/null +++ b/cli/cli/command/swarm/unlock_key_test.go @@ -0,0 +1,171 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmUnlockKeyErrors(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "swarm-inspect-rotate-failed", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-rotate-no-autolock-failed", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + expectedError: "cannot rotate because autolock is not turned on", + }, + { + name: "swarm-update-failed", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(Autolock()), nil + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + return errors.Errorf("error updating the swarm") + }, + expectedError: "error updating the swarm", + }, + { + name: "swarm-get-unlock-key-failed", + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting unlock key") + }, + expectedError: "error getting unlock key", + }, + { + name: "swarm-no-unlock-key-failed", + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "", + }, nil + }, + expectedError: "no unlock key is set", + }, + } + for _, tc := range testCases { + cmd := newUnlockKeyCommand( + test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmUnlockKey(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + }{ + { + name: "unlock-key", + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + { + name: "unlock-key-quiet", + flags: map[string]string{ + flagQuiet: "true", + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + { + name: "unlock-key-rotate", + flags: map[string]string{ + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(Autolock()), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + { + name: "unlock-key-rotate-quiet", + flags: map[string]string{ + flagQuiet: "true", + flagRotate: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(Autolock()), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + }) + cmd := newUnlockKeyCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("unlockkeys-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/swarm/unlock_test.go b/cli/cli/command/swarm/unlock_test.go new file mode 100644 index 00000000..f576dc34 --- /dev/null +++ b/cli/cli/command/swarm/unlock_test.go @@ -0,0 +1,98 @@ +package swarm + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/cli/cli/streams" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestSwarmUnlockErrors(t *testing.T) { + testCases := []struct { + name string + args []string + swarmUnlockFunc func(req swarm.UnlockRequest) error + infoFunc func() (types.Info, error) + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "is-not-part-of-a-swarm", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateInactive, + }, + }, nil + }, + expectedError: "This node is not part of a swarm", + }, + { + name: "is-not-locked", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateActive, + }, + }, nil + }, + expectedError: "Error: swarm is not locked", + }, + { + name: "unlockrequest-failed", + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateLocked, + }, + }, nil + }, + swarmUnlockFunc: func(req swarm.UnlockRequest) error { + return errors.Errorf("error unlocking the swarm") + }, + expectedError: "error unlocking the swarm", + }, + } + for _, tc := range testCases { + cmd := newUnlockCommand( + test.NewFakeCli(&fakeClient{ + infoFunc: tc.infoFunc, + swarmUnlockFunc: tc.swarmUnlockFunc, + })) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmUnlock(t *testing.T) { + input := "unlockKey" + dockerCli := test.NewFakeCli(&fakeClient{ + infoFunc: func() (types.Info, error) { + return types.Info{ + Swarm: swarm.Info{ + LocalNodeState: swarm.LocalNodeStateLocked, + }, + }, nil + }, + swarmUnlockFunc: func(req swarm.UnlockRequest) error { + if req.UnlockKey != input { + return errors.Errorf("Invalid unlock key") + } + return nil + }, + }) + dockerCli.SetIn(streams.NewIn(ioutil.NopCloser(strings.NewReader(input)))) + cmd := newUnlockCommand(dockerCli) + assert.NilError(t, cmd.Execute()) +} diff --git a/cli/cli/command/swarm/update.go b/cli/cli/command/swarm/update.go new file mode 100644 index 00000000..6b9ad728 --- /dev/null +++ b/cli/cli/command/swarm/update.go @@ -0,0 +1,71 @@ +package swarm + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli command.Cli) *cobra.Command { + opts := swarmOptions{} + + cmd := &cobra.Command{ + Use: "update [OPTIONS]", + Short: "Update the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), opts) + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Flags().NFlag() == 0 { + return pflag.ErrHelp + } + return nil + }, + } + + cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") + addSwarmFlags(cmd.Flags(), &opts) + return cmd +} + +func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, opts swarmOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var updateFlags swarm.UpdateFlags + + swarmInspect, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + prevAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers + + opts.mergeSwarmSpec(&swarmInspect.Spec, flags, swarmInspect.ClusterInfo.TLSInfo.TrustRoot) + + curAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers + + err = client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, updateFlags) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Swarm updated.") + + if curAutoLock && !prevAutoLock { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(dockerCli.Out(), unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/cli/cli/command/swarm/update_test.go b/cli/cli/command/swarm/update_test.go new file mode 100644 index 00000000..20a5624a --- /dev/null +++ b/cli/cli/command/swarm/update_test.go @@ -0,0 +1,185 @@ +package swarm + +import ( + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestSwarmUpdateErrors(t *testing.T) { + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + expectedError string + }{ + { + name: "too-many-args", + args: []string{"foo"}, + expectedError: "accepts no arguments", + }, + { + name: "swarm-inspect-error", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return swarm.Swarm{}, errors.Errorf("error inspecting the swarm") + }, + expectedError: "error inspecting the swarm", + }, + { + name: "swarm-update-error", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + return errors.Errorf("error updating the swarm") + }, + expectedError: "error updating the swarm", + }, + { + name: "swarm-unlockkey-error", + flags: map[string]string{ + flagAutolock: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{}, errors.Errorf("error getting unlock key") + }, + expectedError: "error getting unlock key", + }, + } + for _, tc := range testCases { + cmd := newUpdateCommand( + test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + })) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSwarmUpdate(t *testing.T) { + swarmInfo := Swarm() + swarmInfo.ClusterInfo.TLSInfo.TrustRoot = "trustroot" + + testCases := []struct { + name string + args []string + flags map[string]string + swarmInspectFunc func() (swarm.Swarm, error) + swarmUpdateFunc func(swarm swarm.Spec, flags swarm.UpdateFlags) error + swarmGetUnlockKeyFunc func() (types.SwarmUnlockKeyResponse, error) + }{ + { + name: "noargs", + }, + { + name: "all-flags-quiet", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + flagDispatcherHeartbeat: "10s", + flagCertExpiry: "20s", + flagExternalCA: "protocol=cfssl,url=https://example.com.", + flagMaxSnapshots: "10", + flagSnapshotInterval: "100", + flagAutolock: "true", + flagQuiet: "true", + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *swarmInfo, nil + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 { + return errors.Errorf("historyLimit not correctly set") + } + heartbeatDuration, err := time.ParseDuration("10s") + if err != nil { + return err + } + if swarm.Dispatcher.HeartbeatPeriod != heartbeatDuration { + return errors.Errorf("heartbeatPeriodLimit not correctly set") + } + certExpiryDuration, err := time.ParseDuration("20s") + if err != nil { + return err + } + if swarm.CAConfig.NodeCertExpiry != certExpiryDuration { + return errors.Errorf("certExpiry not correctly set") + } + if len(swarm.CAConfig.ExternalCAs) != 1 || swarm.CAConfig.ExternalCAs[0].CACert != "trustroot" { + return errors.Errorf("externalCA not correctly set") + } + if *swarm.Raft.KeepOldSnapshots != 10 { + return errors.Errorf("keepOldSnapshots not correctly set") + } + if swarm.Raft.SnapshotInterval != 100 { + return errors.Errorf("snapshotInterval not correctly set") + } + if !swarm.EncryptionConfig.AutoLockManagers { + return errors.Errorf("autolock not correctly set") + } + return nil + }, + }, + { + name: "autolock-unlock-key", + flags: map[string]string{ + flagTaskHistoryLimit: "10", + flagAutolock: "true", + }, + swarmUpdateFunc: func(swarm swarm.Spec, flags swarm.UpdateFlags) error { + if *swarm.Orchestration.TaskHistoryRetentionLimit != 10 { + return errors.Errorf("historyLimit not correctly set") + } + return nil + }, + swarmInspectFunc: func() (swarm.Swarm, error) { + return *Swarm(), nil + }, + swarmGetUnlockKeyFunc: func() (types.SwarmUnlockKeyResponse, error) { + return types.SwarmUnlockKeyResponse{ + UnlockKey: "unlock-key", + }, nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + swarmInspectFunc: tc.swarmInspectFunc, + swarmUpdateFunc: tc.swarmUpdateFunc, + swarmGetUnlockKeyFunc: tc.swarmGetUnlockKeyFunc, + }) + cmd := newUpdateCommand(cli) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(cli.OutBuffer()) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("update-%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/system/client_test.go b/cli/cli/command/system/client_test.go new file mode 100644 index 00000000..20d8dc38 --- /dev/null +++ b/cli/cli/command/system/client_test.go @@ -0,0 +1,23 @@ +package system + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + + version string + serverVersion func(ctx context.Context) (types.Version, error) +} + +func (cli *fakeClient) ServerVersion(ctx context.Context) (types.Version, error) { + return cli.serverVersion(ctx) +} + +func (cli *fakeClient) ClientVersion() string { + return cli.version +} diff --git a/cli/cli/command/system/cmd.go b/cli/cli/command/system/cmd.go new file mode 100644 index 00000000..6accb98f --- /dev/null +++ b/cli/cli/command/system/cmd.go @@ -0,0 +1,26 @@ +package system + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewSystemCommand returns a cobra command for `system` subcommands +func NewSystemCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "Manage Docker", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewEventsCommand(dockerCli), + NewInfoCommand(dockerCli), + newDiskUsageCommand(dockerCli), + newPruneCommand(dockerCli), + newDialStdioCommand(dockerCli), + ) + + return cmd +} diff --git a/cli/cli/command/system/df.go b/cli/cli/command/system/df.go new file mode 100644 index 00000000..116b9093 --- /dev/null +++ b/cli/cli/command/system/df.go @@ -0,0 +1,72 @@ +package system + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type diskUsageOptions struct { + verbose bool + format string +} + +// newDiskUsageCommand creates a new cobra.Command for `docker df` +func newDiskUsageCommand(dockerCli command.Cli) *cobra.Command { + var opts diskUsageOptions + + cmd := &cobra.Command{ + Use: "df [OPTIONS]", + Short: "Show docker disk usage", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiskUsage(dockerCli, opts) + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error { + du, err := dockerCli.Client().DiskUsage(context.Background()) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + var bsz int64 + for _, bc := range du.BuildCache { + if !bc.Shared { + bsz += bc.Size + } + } + + duCtx := formatter.DiskUsageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewDiskUsageFormat(format, opts.verbose), + }, + LayersSize: du.LayersSize, + BuilderSize: bsz, + BuildCache: du.BuildCache, + Images: du.Images, + Containers: du.Containers, + Volumes: du.Volumes, + Verbose: opts.verbose, + } + + return duCtx.Write() +} diff --git a/cli/cli/command/system/dial_stdio.go b/cli/cli/command/system/dial_stdio.go new file mode 100644 index 00000000..2385c145 --- /dev/null +++ b/cli/cli/command/system/dial_stdio.go @@ -0,0 +1,128 @@ +package system + +import ( + "context" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +// newDialStdioCommand creates a new cobra.Command for `docker system dial-stdio` +func newDialStdioCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "dial-stdio", + Short: "Proxy the stdio stream to the daemon connection. Should not be invoked manually.", + Args: cli.NoArgs, + Hidden: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDialStdio(dockerCli) + }, + } + return cmd +} + +func runDialStdio(dockerCli command.Cli) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dialer := dockerCli.Client().Dialer() + conn, err := dialer(ctx) + if err != nil { + return errors.Wrap(err, "failed to open the raw stream connection") + } + defer conn.Close() + + var connHalfCloser halfCloser + switch t := conn.(type) { + case halfCloser: + connHalfCloser = t + case halfReadWriteCloser: + connHalfCloser = &nopCloseReader{t} + default: + return errors.New("the raw stream connection does not implement halfCloser") + } + + stdin2conn := make(chan error, 1) + conn2stdout := make(chan error, 1) + go func() { + stdin2conn <- copier(connHalfCloser, &halfReadCloserWrapper{os.Stdin}, "stdin to stream") + }() + go func() { + conn2stdout <- copier(&halfWriteCloserWrapper{os.Stdout}, connHalfCloser, "stream to stdout") + }() + select { + case err = <-stdin2conn: + if err != nil { + return err + } + // wait for stdout + err = <-conn2stdout + case err = <-conn2stdout: + // return immediately without waiting for stdin to be closed. + // (stdin is never closed when tty) + } + return err +} + +func copier(to halfWriteCloser, from halfReadCloser, debugDescription string) error { + defer func() { + if err := from.CloseRead(); err != nil { + logrus.Errorf("error while CloseRead (%s): %v", debugDescription, err) + } + if err := to.CloseWrite(); err != nil { + logrus.Errorf("error while CloseWrite (%s): %v", debugDescription, err) + } + }() + if _, err := io.Copy(to, from); err != nil { + return errors.Wrapf(err, "error while Copy (%s)", debugDescription) + } + return nil +} + +type halfReadCloser interface { + io.Reader + CloseRead() error +} + +type halfWriteCloser interface { + io.Writer + CloseWrite() error +} + +type halfCloser interface { + halfReadCloser + halfWriteCloser +} + +type halfReadWriteCloser interface { + io.Reader + halfWriteCloser +} + +type nopCloseReader struct { + halfReadWriteCloser +} + +func (x *nopCloseReader) CloseRead() error { + return nil +} + +type halfReadCloserWrapper struct { + io.ReadCloser +} + +func (x *halfReadCloserWrapper) CloseRead() error { + return x.Close() +} + +type halfWriteCloserWrapper struct { + io.WriteCloser +} + +func (x *halfWriteCloserWrapper) CloseWrite() error { + return x.Close() +} diff --git a/cli/cli/command/system/events.go b/cli/cli/command/system/events.go new file mode 100644 index 00000000..37de9722 --- /dev/null +++ b/cli/cli/command/system/events.go @@ -0,0 +1,142 @@ +package system + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "text/template" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/spf13/cobra" +) + +type eventsOptions struct { + since string + until string + filter opts.FilterOpt + format string +} + +// NewEventsCommand creates a new cobra.Command for `docker events` +func NewEventsCommand(dockerCli command.Cli) *cobra.Command { + options := eventsOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "events [OPTIONS]", + Short: "Get real time events from the server", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runEvents(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.since, "since", "", "Show all events created since timestamp") + flags.StringVar(&options.until, "until", "", "Stream events until this timestamp") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&options.format, "format", "", "Format the output using the given Go template") + + return cmd +} + +func runEvents(dockerCli command.Cli, options *eventsOptions) error { + tmpl, err := makeTemplate(options.format) + if err != nil { + return cli.StatusError{ + StatusCode: 64, + Status: "Error parsing format: " + err.Error()} + } + eventOptions := types.EventsOptions{ + Since: options.since, + Until: options.until, + Filters: options.filter.Value(), + } + + ctx, cancel := context.WithCancel(context.Background()) + events, errs := dockerCli.Client().Events(ctx, eventOptions) + defer cancel() + + out := dockerCli.Out() + + for { + select { + case event := <-events: + if err := handleEvent(out, event, tmpl); err != nil { + return err + } + case err := <-errs: + if err == io.EOF { + return nil + } + return err + } + } +} + +func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + if tmpl == nil { + return prettyPrintEvent(out, event) + } + + return formatEvent(out, event, tmpl) +} + +func makeTemplate(format string) (*template.Template, error) { + if format == "" { + return nil, nil + } + tmpl, err := templates.Parse(format) + if err != nil { + return tmpl, err + } + // we execute the template for an empty message, so as to validate + // a bad template like "{{.badFieldString}}" + return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) +} + +// rfc3339NanoFixed is similar to time.RFC3339Nano, except it pads nanoseconds +// zeros to maintain a fixed number of characters +const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// prettyPrintEvent prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { + if event.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(rfc3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(rfc3339NanoFixed)) + } + + fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(out, "\n") + return nil +} + +func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + defer out.Write([]byte{'\n'}) + return tmpl.Execute(out, event) +} diff --git a/cli/cli/command/system/info.go b/cli/cli/command/system/info.go new file mode 100644 index 00000000..7adf92bd --- /dev/null +++ b/cli/cli/command/system/info.go @@ -0,0 +1,491 @@ +package system + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + + "github.com/docker/cli/cli" + pluginmanager "github.com/docker/cli/cli-plugins/manager" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/debug" + "github.com/docker/cli/templates" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type infoOptions struct { + format string +} + +type clientInfo struct { + Debug bool + Plugins []pluginmanager.Plugin + Warnings []string +} + +type info struct { + // This field should/could be ServerInfo but is anonymous to + // preserve backwards compatibility in the JSON rendering + // which has ServerInfo immediately within the top-level + // object. + *types.Info `json:",omitempty"` + ServerErrors []string `json:",omitempty"` + + ClientInfo *clientInfo `json:",omitempty"` + ClientErrors []string `json:",omitempty"` +} + +// NewInfoCommand creates a new cobra.Command for `docker info` +func NewInfoCommand(dockerCli command.Cli) *cobra.Command { + var opts infoOptions + + cmd := &cobra.Command{ + Use: "info [OPTIONS]", + Short: "Display system-wide information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInfo(cmd, dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInfo(cmd *cobra.Command, dockerCli command.Cli, opts *infoOptions) error { + var info info + + ctx := context.Background() + if dinfo, err := dockerCli.Client().Info(ctx); err == nil { + info.Info = &dinfo + } else { + info.ServerErrors = append(info.ServerErrors, err.Error()) + } + + info.ClientInfo = &clientInfo{ + Debug: debug.IsEnabled(), + } + if plugins, err := pluginmanager.ListPlugins(dockerCli, cmd.Root()); err == nil { + info.ClientInfo.Plugins = plugins + } else { + info.ClientErrors = append(info.ClientErrors, err.Error()) + } + + if opts.format == "" { + return prettyPrintInfo(dockerCli, info) + } + return formatInfo(dockerCli, info, opts.format) +} + +func prettyPrintInfo(dockerCli command.Cli, info info) error { + fmt.Fprintln(dockerCli.Out(), "Client:") + if info.ClientInfo != nil { + if err := prettyPrintClientInfo(dockerCli, *info.ClientInfo); err != nil { + info.ClientErrors = append(info.ClientErrors, err.Error()) + } + } + for _, err := range info.ClientErrors { + fmt.Fprintln(dockerCli.Out(), "ERROR:", err) + } + + fmt.Fprintln(dockerCli.Out()) + fmt.Fprintln(dockerCli.Out(), "Server:") + if info.Info != nil { + for _, err := range prettyPrintServerInfo(dockerCli, *info.Info) { + info.ServerErrors = append(info.ServerErrors, err.Error()) + } + } + for _, err := range info.ServerErrors { + fmt.Fprintln(dockerCli.Out(), "ERROR:", err) + } + + if len(info.ServerErrors) > 0 || len(info.ClientErrors) > 0 { + return fmt.Errorf("errors pretty printing info") + } + return nil +} + +func prettyPrintClientInfo(dockerCli command.Cli, info clientInfo) error { + fmt.Fprintln(dockerCli.Out(), " Debug Mode:", info.Debug) + + if len(info.Plugins) > 0 { + fmt.Fprintln(dockerCli.Out(), " Plugins:") + for _, p := range info.Plugins { + if p.Err == nil { + var version string + if p.Version != "" { + version = ", " + p.Version + } + fmt.Fprintf(dockerCli.Out(), " %s: %s (%s%s)\n", p.Name, p.ShortDescription, p.Vendor, version) + } else { + info.Warnings = append(info.Warnings, fmt.Sprintf("WARNING: Plugin %q is not valid: %s", p.Path, p.Err)) + } + } + } + + if len(info.Warnings) > 0 { + fmt.Fprintln(dockerCli.Err(), strings.Join(info.Warnings, "\n")) + } + + return nil +} + +// nolint: gocyclo +func prettyPrintServerInfo(dockerCli command.Cli, info types.Info) []error { + var errs []error + + fmt.Fprintln(dockerCli.Out(), " Containers:", info.Containers) + fmt.Fprintln(dockerCli.Out(), " Running:", info.ContainersRunning) + fmt.Fprintln(dockerCli.Out(), " Paused:", info.ContainersPaused) + fmt.Fprintln(dockerCli.Out(), " Stopped:", info.ContainersStopped) + fmt.Fprintln(dockerCli.Out(), " Images:", info.Images) + fprintlnNonEmpty(dockerCli.Out(), " Server Version:", info.ServerVersion) + fprintlnNonEmpty(dockerCli.Out(), " Storage Driver:", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + } + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + } + } + fprintlnNonEmpty(dockerCli.Out(), " Logging Driver:", info.LoggingDriver) + fprintlnNonEmpty(dockerCli.Out(), " Cgroup Driver:", info.CgroupDriver) + + fmt.Fprintln(dockerCli.Out(), " Plugins:") + fmt.Fprintln(dockerCli.Out(), " Volume:", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintln(dockerCli.Out(), " Network:", strings.Join(info.Plugins.Network, " ")) + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintln(dockerCli.Out(), " Authorization:", strings.Join(info.Plugins.Authorization, " ")) + } + + fmt.Fprintln(dockerCli.Out(), " Log:", strings.Join(info.Plugins.Log, " ")) + + fmt.Fprintln(dockerCli.Out(), " Swarm:", info.Swarm.LocalNodeState) + printSwarmInfo(dockerCli, info) + + if len(info.Runtimes) > 0 { + fmt.Fprint(dockerCli.Out(), " Runtimes:") + for name := range info.Runtimes { + fmt.Fprintf(dockerCli.Out(), " %s", name) + } + fmt.Fprint(dockerCli.Out(), "\n") + fmt.Fprintln(dockerCli.Out(), " Default Runtime:", info.DefaultRuntime) + } + + if info.OSType == "linux" { + fmt.Fprintln(dockerCli.Out(), " Init Binary:", info.InitBinary) + + for _, ci := range []struct { + Name string + Commit types.Commit + }{ + {"containerd", info.ContainerdCommit}, + {"runc", info.RuncCommit}, + {"init", info.InitCommit}, + } { + fmt.Fprintf(dockerCli.Out(), " %s version: %s", ci.Name, ci.Commit.ID) + if ci.Commit.ID != ci.Commit.Expected { + fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) + } + fmt.Fprint(dockerCli.Out(), "\n") + } + if len(info.SecurityOptions) != 0 { + if kvs, err := types.DecodeSecurityOptions(info.SecurityOptions); err != nil { + errs = append(errs, err) + } else { + fmt.Fprintln(dockerCli.Out(), " Security Options:") + for _, so := range kvs { + fmt.Fprintln(dockerCli.Out(), " "+so.Name) + for _, o := range so.Options { + switch o.Key { + case "profile": + if o.Value != "default" { + fmt.Fprintln(dockerCli.Err(), " WARNING: You're not using the default seccomp profile") + } + fmt.Fprintln(dockerCli.Out(), " Profile:", o.Value) + } + } + } + } + } + } + + // Isolation only has meaning on a Windows daemon. + if info.OSType == "windows" { + fmt.Fprintln(dockerCli.Out(), " Default Isolation:", info.Isolation) + } + + fprintlnNonEmpty(dockerCli.Out(), " Kernel Version:", info.KernelVersion) + fprintlnNonEmpty(dockerCli.Out(), " Operating System:", info.OperatingSystem) + fprintlnNonEmpty(dockerCli.Out(), " OSType:", info.OSType) + fprintlnNonEmpty(dockerCli.Out(), " Architecture:", info.Architecture) + fmt.Fprintln(dockerCli.Out(), " CPUs:", info.NCPU) + fmt.Fprintln(dockerCli.Out(), " Total Memory:", units.BytesSize(float64(info.MemTotal))) + fprintlnNonEmpty(dockerCli.Out(), " Name:", info.Name) + fprintlnNonEmpty(dockerCli.Out(), " ID:", info.ID) + fmt.Fprintln(dockerCli.Out(), " Docker Root Dir:", info.DockerRootDir) + fmt.Fprintln(dockerCli.Out(), " Debug Mode:", info.Debug) + + if info.Debug { + fmt.Fprintln(dockerCli.Out(), " File Descriptors:", info.NFd) + fmt.Fprintln(dockerCli.Out(), " Goroutines:", info.NGoroutines) + fmt.Fprintln(dockerCli.Out(), " System Time:", info.SystemTime) + fmt.Fprintln(dockerCli.Out(), " EventsListeners:", info.NEventsListener) + } + + fprintlnNonEmpty(dockerCli.Out(), " HTTP Proxy:", info.HTTPProxy) + fprintlnNonEmpty(dockerCli.Out(), " HTTPS Proxy:", info.HTTPSProxy) + fprintlnNonEmpty(dockerCli.Out(), " No Proxy:", info.NoProxy) + + if info.IndexServerAddress != "" { + u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintln(dockerCli.Out(), " Username:", u) + } + fmt.Fprintln(dockerCli.Out(), " Registry:", info.IndexServerAddress) + } + + if info.Labels != nil { + fmt.Fprintln(dockerCli.Out(), " Labels:") + for _, lbl := range info.Labels { + fmt.Fprintln(dockerCli.Out(), " "+lbl) + } + } + + fmt.Fprintln(dockerCli.Out(), " Experimental:", info.ExperimentalBuild) + fprintlnNonEmpty(dockerCli.Out(), " Cluster Store:", info.ClusterStore) + fprintlnNonEmpty(dockerCli.Out(), " Cluster Advertise:", info.ClusterAdvertise) + + if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { + fmt.Fprintln(dockerCli.Out(), " Insecure Registries:") + for _, registry := range info.RegistryConfig.IndexConfigs { + if !registry.Secure { + fmt.Fprintln(dockerCli.Out(), " "+registry.Name) + } + } + + for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { + mask, _ := registry.Mask.Size() + fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) + } + } + + if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { + fmt.Fprintln(dockerCli.Out(), " Registry Mirrors:") + for _, mirror := range info.RegistryConfig.Mirrors { + fmt.Fprintln(dockerCli.Out(), " "+mirror) + } + } + + fmt.Fprintln(dockerCli.Out(), " Live Restore Enabled:", info.LiveRestoreEnabled) + if info.ProductLicense != "" { + fmt.Fprintln(dockerCli.Out(), " Product License:", info.ProductLicense) + } + fmt.Fprint(dockerCli.Out(), "\n") + + printServerWarnings(dockerCli, info) + return errs +} + +// nolint: gocyclo +func printSwarmInfo(dockerCli command.Cli, info types.Info) { + if info.Swarm.LocalNodeState == swarm.LocalNodeStateInactive || info.Swarm.LocalNodeState == swarm.LocalNodeStateLocked { + return + } + fmt.Fprintln(dockerCli.Out(), " NodeID:", info.Swarm.NodeID) + if info.Swarm.Error != "" { + fmt.Fprintln(dockerCli.Out(), " Error:", info.Swarm.Error) + } + fmt.Fprintln(dockerCli.Out(), " Is Manager:", info.Swarm.ControlAvailable) + if info.Swarm.Cluster != nil && info.Swarm.ControlAvailable && info.Swarm.Error == "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateError { + fmt.Fprintln(dockerCli.Out(), " ClusterID:", info.Swarm.Cluster.ID) + fmt.Fprintln(dockerCli.Out(), " Managers:", info.Swarm.Managers) + fmt.Fprintln(dockerCli.Out(), " Nodes:", info.Swarm.Nodes) + var strAddrPool strings.Builder + if info.Swarm.Cluster.DefaultAddrPool != nil { + for _, p := range info.Swarm.Cluster.DefaultAddrPool { + strAddrPool.WriteString(p + " ") + } + fmt.Fprintln(dockerCli.Out(), " Default Address Pool:", strAddrPool.String()) + fmt.Fprintln(dockerCli.Out(), " SubnetSize:", info.Swarm.Cluster.SubnetSize) + } + if info.Swarm.Cluster.DataPathPort > 0 { + fmt.Fprintln(dockerCli.Out(), " Data Path Port:", info.Swarm.Cluster.DataPathPort) + } + fmt.Fprintln(dockerCli.Out(), " Orchestration:") + + taskHistoryRetentionLimit := int64(0) + if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { + taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + fmt.Fprintln(dockerCli.Out(), " Task History Retention Limit:", taskHistoryRetentionLimit) + fmt.Fprintln(dockerCli.Out(), " Raft:") + fmt.Fprintln(dockerCli.Out(), " Snapshot Interval:", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) + if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { + fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) + } + fmt.Fprintln(dockerCli.Out(), " Heartbeat Tick:", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) + fmt.Fprintln(dockerCli.Out(), " Election Tick:", info.Swarm.Cluster.Spec.Raft.ElectionTick) + fmt.Fprintln(dockerCli.Out(), " Dispatcher:") + fmt.Fprintln(dockerCli.Out(), " Heartbeat Period:", units.HumanDuration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod)) + fmt.Fprintln(dockerCli.Out(), " CA Configuration:") + fmt.Fprintln(dockerCli.Out(), " Expiry Duration:", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) + fmt.Fprintln(dockerCli.Out(), " Force Rotate:", info.Swarm.Cluster.Spec.CAConfig.ForceRotate) + if caCert := strings.TrimSpace(info.Swarm.Cluster.Spec.CAConfig.SigningCACert); caCert != "" { + fmt.Fprintf(dockerCli.Out(), " Signing CA Certificate: \n%s\n\n", caCert) + } + if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintln(dockerCli.Out(), " External CAs:") + for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) + } + } + fmt.Fprintln(dockerCli.Out(), " Autolock Managers:", info.Swarm.Cluster.Spec.EncryptionConfig.AutoLockManagers) + fmt.Fprintln(dockerCli.Out(), " Root Rotation In Progress:", info.Swarm.Cluster.RootRotationInProgress) + } + fmt.Fprintln(dockerCli.Out(), " Node Address:", info.Swarm.NodeAddr) + if len(info.Swarm.RemoteManagers) > 0 { + managers := []string{} + for _, entry := range info.Swarm.RemoteManagers { + managers = append(managers, entry.Addr) + } + sort.Strings(managers) + fmt.Fprintln(dockerCli.Out(), " Manager Addresses:") + for _, entry := range managers { + fmt.Fprintf(dockerCli.Out(), " %s\n", entry) + } + } +} + +func printServerWarnings(dockerCli command.Cli, info types.Info) { + if len(info.Warnings) > 0 { + fmt.Fprintln(dockerCli.Err(), strings.Join(info.Warnings, "\n")) + return + } + // daemon didn't return warnings. Fallback to old behavior + printStorageDriverWarnings(dockerCli, info) + printServerWarningsLegacy(dockerCli, info) +} + +// printServerWarningsLegacy generates warnings based on information returned by the daemon. +// DEPRECATED: warnings are now generated by the daemon, and returned in +// info.Warnings. This function is used to provide backward compatibility with +// daemons that do not provide these warnings. No new warnings should be added +// here. +func printServerWarningsLegacy(dockerCli command.Cli, info types.Info) { + if info.OSType == "windows" { + return + } + if !info.MemoryLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") + } +} + +// printStorageDriverWarnings generates warnings based on storage-driver information +// returned by the daemon. +// DEPRECATED: warnings are now generated by the daemon, and returned in +// info.Warnings. This function is used to provide backward compatibility with +// daemons that do not provide these warnings. No new warnings should be added +// here. +func printStorageDriverWarnings(dockerCli command.Cli, info types.Info) { + if info.OSType == "windows" { + return + } + if info.DriverStatus == nil { + return + } + for _, pair := range info.DriverStatus { + if pair[0] == "Data loop file" { + fmt.Fprintf(dockerCli.Err(), "WARNING: %s: usage of loopback devices is "+ + "strongly discouraged for production use.\n "+ + "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\n", info.Driver) + } + if pair[0] == "Supports d_type" && pair[1] == "false" { + backingFs := getBackingFs(info) + + msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", info.Driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" + } + msg += " Running without d_type support will not be supported in future releases." + fmt.Fprintln(dockerCli.Err(), msg) + } + } +} + +func getBackingFs(info types.Info) string { + if info.DriverStatus == nil { + return "" + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Backing Filesystem" { + return pair[1] + } + } + return "" +} + +func formatInfo(dockerCli command.Cli, info info, format string) error { + // Ensure slice/array fields render as `[]` not `null` + if info.ClientInfo != nil && info.ClientInfo.Plugins == nil { + info.ClientInfo.Plugins = make([]pluginmanager.Plugin, 0) + } + + tmpl, err := templates.Parse(format) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + err = tmpl.Execute(dockerCli.Out(), info) + dockerCli.Out().Write([]byte{'\n'}) + return err +} + +func fprintlnNonEmpty(w io.Writer, label, value string) { + if value != "" { + fmt.Fprintln(w, label, value) + } +} diff --git a/cli/cli/command/system/info_test.go b/cli/cli/command/system/info_test.go new file mode 100644 index 00000000..3e71bab0 --- /dev/null +++ b/cli/cli/command/system/info_test.go @@ -0,0 +1,398 @@ +package system + +import ( + "encoding/base64" + "net" + "testing" + "time" + + pluginmanager "github.com/docker/cli/cli-plugins/manager" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +// helper function that base64 decodes a string and ignores the error +func base64Decode(val string) []byte { + decoded, _ := base64.StdEncoding.DecodeString(val) + return decoded +} + +const sampleID = "EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX" + +var sampleInfoNoSwarm = types.Info{ + ID: sampleID, + Containers: 0, + ContainersRunning: 0, + ContainersPaused: 0, + ContainersStopped: 0, + Images: 0, + Driver: "aufs", + DriverStatus: [][2]string{ + {"Root Dir", "/var/lib/docker/aufs"}, + {"Backing Filesystem", "extfs"}, + {"Dirs", "0"}, + {"Dirperm1 Supported", "true"}, + }, + SystemStatus: nil, + Plugins: types.PluginsInfo{ + Volume: []string{"local"}, + Network: []string{"bridge", "host", "macvlan", "null", "overlay"}, + Authorization: nil, + Log: []string{"awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"}, + }, + MemoryLimit: true, + SwapLimit: true, + KernelMemory: true, + CPUCfsPeriod: true, + CPUCfsQuota: true, + CPUShares: true, + CPUSet: true, + IPv4Forwarding: true, + BridgeNfIptables: true, + BridgeNfIP6tables: true, + Debug: true, + NFd: 33, + OomKillDisable: true, + NGoroutines: 135, + SystemTime: "2017-08-24T17:44:34.077811894Z", + LoggingDriver: "json-file", + CgroupDriver: "cgroupfs", + NEventsListener: 0, + KernelVersion: "4.4.0-87-generic", + OperatingSystem: "Ubuntu 16.04.3 LTS", + OSType: "linux", + Architecture: "x86_64", + IndexServerAddress: "https://index.docker.io/v1/", + RegistryConfig: ®istry.ServiceConfig{ + AllowNondistributableArtifactsCIDRs: nil, + AllowNondistributableArtifactsHostnames: nil, + InsecureRegistryCIDRs: []*registry.NetIPNet{ + { + IP: net.ParseIP("127.0.0.0"), + Mask: net.IPv4Mask(255, 0, 0, 0), + }, + }, + IndexConfigs: map[string]*registry.IndexInfo{ + "docker.io": { + Name: "docker.io", + Mirrors: nil, + Secure: true, + Official: true, + }, + }, + Mirrors: nil, + }, + NCPU: 2, + MemTotal: 2097356800, + DockerRootDir: "/var/lib/docker", + HTTPProxy: "", + HTTPSProxy: "", + NoProxy: "", + Name: "system-sample", + Labels: []string{"provider=digitalocean"}, + ExperimentalBuild: false, + ServerVersion: "17.06.1-ce", + ClusterStore: "", + ClusterAdvertise: "", + Runtimes: map[string]types.Runtime{ + "runc": { + Path: "docker-runc", + Args: nil, + }, + }, + DefaultRuntime: "runc", + Swarm: swarm.Info{LocalNodeState: "inactive"}, + LiveRestoreEnabled: false, + Isolation: "", + InitBinary: "docker-init", + ContainerdCommit: types.Commit{ + ID: "6e23458c129b551d5c9871e5174f6b1b7f6d1170", + Expected: "6e23458c129b551d5c9871e5174f6b1b7f6d1170", + }, + RuncCommit: types.Commit{ + ID: "810190ceaa507aa2727d7ae6f4790c76ec150bd2", + Expected: "810190ceaa507aa2727d7ae6f4790c76ec150bd2", + }, + InitCommit: types.Commit{ + ID: "949e6fa", + Expected: "949e6fa", + }, + SecurityOptions: []string{"name=apparmor", "name=seccomp,profile=default"}, +} + +var sampleSwarmInfo = swarm.Info{ + NodeID: "qo2dfdig9mmxqkawulggepdih", + NodeAddr: "165.227.107.89", + LocalNodeState: "active", + ControlAvailable: true, + Error: "", + RemoteManagers: []swarm.Peer{ + { + NodeID: "qo2dfdig9mmxqkawulggepdih", + Addr: "165.227.107.89:2377", + }, + }, + Nodes: 1, + Managers: 1, + Cluster: &swarm.ClusterInfo{ + ID: "9vs5ygs0gguyyec4iqf2314c0", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 11}, + CreatedAt: time.Date(2017, 8, 24, 17, 34, 19, 278062352, time.UTC), + UpdatedAt: time.Date(2017, 8, 24, 17, 34, 42, 398815481, time.UTC), + }, + Spec: swarm.Spec{ + Annotations: swarm.Annotations{ + Name: "default", + Labels: nil, + }, + Orchestration: swarm.OrchestrationConfig{ + TaskHistoryRetentionLimit: &[]int64{5}[0], + }, + Raft: swarm.RaftConfig{ + SnapshotInterval: 10000, + KeepOldSnapshots: &[]uint64{0}[0], + LogEntriesForSlowFollowers: 500, + ElectionTick: 3, + HeartbeatTick: 1, + }, + Dispatcher: swarm.DispatcherConfig{ + HeartbeatPeriod: 5000000000, + }, + CAConfig: swarm.CAConfig{ + NodeCertExpiry: 7776000000000000, + }, + TaskDefaults: swarm.TaskDefaults{}, + EncryptionConfig: swarm.EncryptionConfig{ + AutoLockManagers: true, + }, + }, + TLSInfo: swarm.TLSInfo{ + TrustRoot: ` +-----BEGIN CERTIFICATE----- +MIIBajCCARCgAwIBAgIUaFCW5xsq8eyiJ+Pmcv3MCflMLnMwCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwODI0MTcyOTAwWhcNMzcwODE5MTcy +OTAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABDy7NebyUJyUjWJDBUdnZoV6GBxEGKO4TZPNDwnxDxJcUdLVaB7WGa4/DLrW +UfsVgh1JGik2VTiLuTMA1tLlNPOjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQl16XFtaaXiUAwEuJptJlDjfKskDAKBggqhkjO +PQQDAgNIADBFAiEAo9fTQNM5DP9bHVcTJYfl2Cay1bFu1E+lnpmN+EYJfeACIGKH +1pCUkZ+D0IB6CiEZGWSHyLuXPM1rlP+I5KuS7sB8 +-----END CERTIFICATE----- +`, + CertIssuerSubject: base64Decode("MBMxETAPBgNVBAMTCHN3YXJtLWNh"), + CertIssuerPublicKey: base64Decode( + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPLs15vJQnJSNYkMFR2dmhXoYHEQYo7hNk80PCfEPElxR0tVoHtYZrj8MutZR+xWCHUkaKTZVOIu5MwDW0uU08w=="), + }, + RootRotationInProgress: false, + }, +} + +var samplePluginsInfo = []pluginmanager.Plugin{ + { + Name: "goodplugin", + Path: "/path/to/docker-goodplugin", + Metadata: pluginmanager.Metadata{ + SchemaVersion: "0.1.0", + ShortDescription: "unit test is good", + Vendor: "ACME Corp", + Version: "0.1.0", + }, + }, + { + Name: "unversionedplugin", + Path: "/path/to/docker-unversionedplugin", + Metadata: pluginmanager.Metadata{ + SchemaVersion: "0.1.0", + ShortDescription: "this plugin has no version", + Vendor: "ACME Corp", + }, + }, + { + Name: "badplugin", + Path: "/path/to/docker-badplugin", + Err: pluginmanager.NewPluginError("something wrong"), + }, +} + +func TestPrettyPrintInfo(t *testing.T) { + infoWithSwarm := sampleInfoNoSwarm + infoWithSwarm.Swarm = sampleSwarmInfo + + infoWithWarningsLinux := sampleInfoNoSwarm + infoWithWarningsLinux.MemoryLimit = false + infoWithWarningsLinux.SwapLimit = false + infoWithWarningsLinux.KernelMemory = false + infoWithWarningsLinux.OomKillDisable = false + infoWithWarningsLinux.CPUCfsQuota = false + infoWithWarningsLinux.CPUCfsPeriod = false + infoWithWarningsLinux.CPUShares = false + infoWithWarningsLinux.CPUSet = false + infoWithWarningsLinux.IPv4Forwarding = false + infoWithWarningsLinux.BridgeNfIptables = false + infoWithWarningsLinux.BridgeNfIP6tables = false + + sampleInfoDaemonWarnings := sampleInfoNoSwarm + sampleInfoDaemonWarnings.Warnings = []string{ + "WARNING: No memory limit support", + "WARNING: No swap limit support", + "WARNING: No kernel memory limit support", + "WARNING: No oom kill disable support", + "WARNING: No cpu cfs quota support", + "WARNING: No cpu cfs period support", + "WARNING: No cpu shares support", + "WARNING: No cpuset support", + "WARNING: IPv4 forwarding is disabled", + "WARNING: bridge-nf-call-iptables is disabled", + "WARNING: bridge-nf-call-ip6tables is disabled", + } + + sampleInfoBadSecurity := sampleInfoNoSwarm + sampleInfoBadSecurity.SecurityOptions = []string{"foo="} + + for _, tc := range []struct { + doc string + dockerInfo info + + prettyGolden string + warningsGolden string + jsonGolden string + expectedError string + }{ + { + doc: "info without swarm", + dockerInfo: info{ + Info: &sampleInfoNoSwarm, + ClientInfo: &clientInfo{Debug: true}, + }, + prettyGolden: "docker-info-no-swarm", + jsonGolden: "docker-info-no-swarm", + }, + { + doc: "info with plugins", + dockerInfo: info{ + Info: &sampleInfoNoSwarm, + ClientInfo: &clientInfo{ + Plugins: samplePluginsInfo, + }, + }, + prettyGolden: "docker-info-plugins", + jsonGolden: "docker-info-plugins", + warningsGolden: "docker-info-plugins-warnings", + }, + { + + doc: "info with swarm", + dockerInfo: info{ + Info: &infoWithSwarm, + ClientInfo: &clientInfo{Debug: false}, + }, + prettyGolden: "docker-info-with-swarm", + jsonGolden: "docker-info-with-swarm", + }, + { + doc: "info with legacy warnings", + dockerInfo: info{ + Info: &infoWithWarningsLinux, + ClientInfo: &clientInfo{Debug: true}, + }, + prettyGolden: "docker-info-no-swarm", + warningsGolden: "docker-info-warnings", + jsonGolden: "docker-info-legacy-warnings", + }, + { + doc: "info with daemon warnings", + dockerInfo: info{ + Info: &sampleInfoDaemonWarnings, + ClientInfo: &clientInfo{Debug: true}, + }, + prettyGolden: "docker-info-no-swarm", + warningsGolden: "docker-info-warnings", + jsonGolden: "docker-info-daemon-warnings", + }, + { + doc: "errors for both", + dockerInfo: info{ + ServerErrors: []string{"a server error occurred"}, + ClientErrors: []string{"a client error occurred"}, + }, + prettyGolden: "docker-info-errors", + jsonGolden: "docker-info-errors", + expectedError: "errors pretty printing info", + }, + { + doc: "bad security info", + dockerInfo: info{ + Info: &sampleInfoBadSecurity, + ServerErrors: []string{"an error happened"}, + ClientInfo: &clientInfo{Debug: false}, + }, + prettyGolden: "docker-info-badsec", + jsonGolden: "docker-info-badsec", + expectedError: "errors pretty printing info", + }, + } { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + err := prettyPrintInfo(cli, tc.dockerInfo) + if tc.expectedError == "" { + assert.NilError(t, err) + } else { + assert.Error(t, err, tc.expectedError) + } + golden.Assert(t, cli.OutBuffer().String(), tc.prettyGolden+".golden") + if tc.warningsGolden != "" { + golden.Assert(t, cli.ErrBuffer().String(), tc.warningsGolden+".golden") + } else { + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) + } + + cli = test.NewFakeCli(&fakeClient{}) + assert.NilError(t, formatInfo(cli, tc.dockerInfo, "{{json .}}")) + golden.Assert(t, cli.OutBuffer().String(), tc.jsonGolden+".json.golden") + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) + }) + } +} + +func TestFormatInfo(t *testing.T) { + for _, tc := range []struct { + doc string + template string + expectedError string + expectedOut string + }{ + { + doc: "basic", + template: "{{.ID}}", + expectedOut: sampleID + "\n", + }, + { + doc: "syntax", + template: "{{}", + expectedError: `Status: Template parsing error: template: :1: unexpected "}" in command, Code: 64`, + }, + } { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + info := info{ + Info: &sampleInfoNoSwarm, + ClientInfo: &clientInfo{Debug: true}, + } + err := formatInfo(cli, info, tc.template) + if tc.expectedOut != "" { + assert.NilError(t, err) + assert.Equal(t, cli.OutBuffer().String(), tc.expectedOut) + } else if tc.expectedError != "" { + assert.Error(t, err, tc.expectedError) + } else { + t.Fatal("test expected to neither pass nor fail") + } + }) + } +} diff --git a/cli/cli/command/system/inspect.go b/cli/cli/command/system/inspect.go new file mode 100644 index 00000000..b49b4b33 --- /dev/null +++ b/cli/cli/command/system/inspect.go @@ -0,0 +1,218 @@ +package system + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + inspectType string + size bool + ids []string +} + +// NewInspectCommand creates a new cobra.Command for `docker inspect` +func NewInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", + Short: "Return low-level information on Docker objects", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ids = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + var elementSearcher inspect.GetRefFunc + switch opts.inspectType { + case "", "container", "image", "node", "network", "service", "volume", "task", "plugin", "secret": + elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) + default: + return errors.Errorf("%q is not a valid value for --type", opts.inspectType) + } + return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) +} + +func inspectContainers(ctx context.Context, dockerCli command.Cli, getSize bool) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) + } +} + +func inspectImages(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ImageInspectWithRaw(ctx, ref) + } +} + +func inspectNetwork(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NetworkInspectWithRaw(ctx, ref, types.NetworkInspectOptions{}) + } +} + +func inspectNode(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NodeInspectWithRaw(ctx, ref) + } +} + +func inspectService(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + // Service inspect shows defaults values in empty fields. + return dockerCli.Client().ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) + } +} + +func inspectTasks(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().TaskInspectWithRaw(ctx, ref) + } +} + +func inspectVolume(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) + } +} + +func inspectPlugin(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().PluginInspectWithRaw(ctx, ref) + } +} + +func inspectSecret(ctx context.Context, dockerCli command.Cli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().SecretInspectWithRaw(ctx, ref) + } +} + +func inspectAll(ctx context.Context, dockerCli command.Cli, getSize bool, typeConstraint string) inspect.GetRefFunc { + var inspectAutodetect = []struct { + objectType string + isSizeSupported bool + isSwarmObject bool + objectInspector func(string) (interface{}, []byte, error) + }{ + { + objectType: "container", + isSizeSupported: true, + objectInspector: inspectContainers(ctx, dockerCli, getSize), + }, + { + objectType: "image", + objectInspector: inspectImages(ctx, dockerCli), + }, + { + objectType: "network", + objectInspector: inspectNetwork(ctx, dockerCli), + }, + { + objectType: "volume", + objectInspector: inspectVolume(ctx, dockerCli), + }, + { + objectType: "service", + isSwarmObject: true, + objectInspector: inspectService(ctx, dockerCli), + }, + { + objectType: "task", + isSwarmObject: true, + objectInspector: inspectTasks(ctx, dockerCli), + }, + { + objectType: "node", + isSwarmObject: true, + objectInspector: inspectNode(ctx, dockerCli), + }, + { + objectType: "plugin", + objectInspector: inspectPlugin(ctx, dockerCli), + }, + { + objectType: "secret", + isSwarmObject: true, + objectInspector: inspectSecret(ctx, dockerCli), + }, + } + + // isSwarmManager does an Info API call to verify that the daemon is + // a swarm manager. + isSwarmManager := func() bool { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return false + } + return info.Swarm.ControlAvailable + } + + return func(ref string) (interface{}, []byte, error) { + const ( + swarmSupportUnknown = iota + swarmSupported + swarmUnsupported + ) + + isSwarmSupported := swarmSupportUnknown + + for _, inspectData := range inspectAutodetect { + if typeConstraint != "" && inspectData.objectType != typeConstraint { + continue + } + if typeConstraint == "" && inspectData.isSwarmObject { + if isSwarmSupported == swarmSupportUnknown { + if isSwarmManager() { + isSwarmSupported = swarmSupported + } else { + isSwarmSupported = swarmUnsupported + } + } + if isSwarmSupported == swarmUnsupported { + continue + } + } + v, raw, err := inspectData.objectInspector(ref) + if err != nil { + if typeConstraint == "" && isErrSkippable(err) { + continue + } + return v, raw, err + } + if getSize && !inspectData.isSizeSupported { + fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) + } + return v, raw, err + } + return nil, nil, errors.Errorf("Error: No such object: %s", ref) + } +} + +func isErrSkippable(err error) bool { + return apiclient.IsErrNotFound(err) || + strings.Contains(err.Error(), "not supported") || + strings.Contains(err.Error(), "invalid reference format") +} diff --git a/cli/cli/command/system/prune.go b/cli/cli/command/system/prune.go new file mode 100644 index 00000000..0e8f115c --- /dev/null +++ b/cli/cli/command/system/prune.go @@ -0,0 +1,148 @@ +package system + +import ( + "bytes" + "fmt" + "sort" + "text/template" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/builder" + "github.com/docker/cli/cli/command/container" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/network" + "github.com/docker/cli/cli/command/volume" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type pruneOptions struct { + force bool + all bool + pruneVolumes bool + pruneBuildCache bool + filter opts.FilterOpt +} + +// newPruneCommand creates a new cobra.Command for `docker prune` +func newPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused data", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + options.pruneBuildCache = versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31") + return runPrune(dockerCli, options) + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images not just dangling ones") + flags.BoolVar(&options.pruneVolumes, "volumes", false, "Prune volumes") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'label==')") + // "filter" flag is available in 1.28 (docker 17.04) and up + flags.SetAnnotation("filter", "version", []string{"1.28"}) + + return cmd +} + +const confirmationTemplate = `WARNING! This will remove: +{{- range $_, $warning := .warnings }} + - {{ $warning }} +{{- end }} +{{if .filters}} + Items to be pruned will be filtered with: +{{- range $_, $filters := .filters }} + - {{ $filters }} +{{- end }} +{{end}} +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) error { + // TODO version this once "until" filter is supported for volumes + if options.pruneVolumes && options.filter.Value().Contains("until") { + return fmt.Errorf(`ERROR: The "until" filter is not supported with "--volumes"`) + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), confirmationMessage(dockerCli, options)) { + return nil + } + pruneFuncs := []func(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error){ + container.RunPrune, + network.RunPrune, + } + if options.pruneVolumes { + pruneFuncs = append(pruneFuncs, volume.RunPrune) + } + pruneFuncs = append(pruneFuncs, image.RunPrune) + if options.pruneBuildCache { + pruneFuncs = append(pruneFuncs, builder.CachePrune) + } + + var spaceReclaimed uint64 + for _, pruneFn := range pruneFuncs { + spc, output, err := pruneFn(dockerCli, options.all, options.filter) + if err != nil { + return err + } + spaceReclaimed += spc + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + } + + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + + return nil +} + +// confirmationMessage constructs a confirmation message that depends on the cli options. +func confirmationMessage(dockerCli command.Cli, options pruneOptions) string { + t := template.Must(template.New("confirmation message").Parse(confirmationTemplate)) + + warnings := []string{ + "all stopped containers", + "all networks not used by at least one container", + } + if options.pruneVolumes { + warnings = append(warnings, "all volumes not used by at least one container") + } + if options.all { + warnings = append(warnings, "all images without at least one container associated to them") + } else { + warnings = append(warnings, "all dangling images") + } + if options.pruneBuildCache { + if options.all { + warnings = append(warnings, "all build cache") + } else { + warnings = append(warnings, "all dangling build cache") + } + } + + var filters []string + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + if pruneFilters.Len() > 0 { + // TODO remove fixed list of filters, and print all filters instead, + // because the list of filters that is supported by the engine may evolve over time. + for _, name := range []string{"label", "label!", "until"} { + for _, v := range pruneFilters.Get(name) { + filters = append(filters, name+"="+v) + } + } + sort.Slice(filters, func(i, j int) bool { + return sortorder.NaturalLess(filters[i], filters[j]) + }) + } + + var buffer bytes.Buffer + t.Execute(&buffer, map[string][]string{"warnings": warnings, "filters": filters}) + return buffer.String() +} diff --git a/cli/cli/command/system/prune_test.go b/cli/cli/command/system/prune_test.go new file mode 100644 index 00000000..761549fc --- /dev/null +++ b/cli/cli/command/system/prune_test.go @@ -0,0 +1,51 @@ +package system + +import ( + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPrunePromptPre131DoesNotIncludeBuildCache(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{version: "1.30"}) + cmd := newPruneCommand(cli) + cmd.SetArgs([]string{}) + assert.NilError(t, cmd.Execute()) + expected := `WARNING! This will remove: + - all stopped containers + - all networks not used by at least one container + - all dangling images + +Are you sure you want to continue? [y/N] ` + assert.Check(t, is.Equal(expected, cli.OutBuffer().String())) +} + +func TestPrunePromptFilters(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{version: "1.31"}) + cli.SetConfigFile(&configfile.ConfigFile{ + PruneFilters: []string{"label!=never=remove-me", "label=remove=me"}, + }) + cmd := newPruneCommand(cli) + cmd.SetArgs([]string{"--filter", "until=24h", "--filter", "label=hello-world", "--filter", "label!=foo=bar", "--filter", "label=bar=baz"}) + + assert.NilError(t, cmd.Execute()) + expected := `WARNING! This will remove: + - all stopped containers + - all networks not used by at least one container + - all dangling images + - all dangling build cache + + Items to be pruned will be filtered with: + - label!=foo=bar + - label!=never=remove-me + - label=bar=baz + - label=hello-world + - label=remove=me + - until=24h + +Are you sure you want to continue? [y/N] ` + assert.Check(t, is.Equal(expected, cli.OutBuffer().String())) +} diff --git a/cli/cli/command/system/testdata/docker-client-version.golden b/cli/cli/command/system/testdata/docker-client-version.golden new file mode 100644 index 00000000..04cc88a6 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-client-version.golden @@ -0,0 +1,44 @@ +Client: + Version: 18.99.5-ce + API version: 1.38 + Go version: go1.10.2 + Git commit: deadbeef + Built: Wed May 30 22:21:05 2018 + OS/Arch: linux/amd64 + Experimental: true + +Server: Docker Enterprise Edition (EE) 2.0 + Engine: + Version: 17.06.2-ee-15 + API version: 1.30 (minimum version 1.12) + Go version: go1.8.7 + Git commit: 64ddfa6 + Built: Mon Jul 9 23:38:38 2018 + OS/Arch: linux/amd64 + Experimental: false + Universal Control Plane: + Version: 17.06.2-ee-15 + ApiVersion: 1.30 + Arch: amd64 + BuildTime: Mon Jul 2 21:24:07 UTC 2018 + GitCommit: 4513922 + GoVersion: go1.9.4 + MinApiVersion: 1.20 + Os: linux + Version: 3.0.3-tp2 + Kubernetes: + Version: 1.8+ + buildDate: 2018-04-26T16:51:21Z + compiler: gc + gitCommit: 8d637aedf46b9c21dde723e29c645b9f27106fa5 + gitTreeState: clean + gitVersion: v1.8.11-docker-8d637ae + goVersion: go1.8.3 + major: 1 + minor: 8+ + platform: linux/amd64 + Calico: + Version: v3.0.8 + cni: v2.0.6 + kube-controllers: v2.0.5 + node: v3.0.8 diff --git a/cli/cli/command/system/testdata/docker-info-badsec.golden b/cli/cli/command/system/testdata/docker-info-badsec.golden new file mode 100644 index 00000000..7c074e8d --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-badsec.golden @@ -0,0 +1,52 @@ +Client: + Debug Mode: false + +Server: + Containers: 0 + Running: 0 + Paused: 0 + Stopped: 0 + Images: 0 + Server Version: 17.06.1-ce + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 0 + Dirperm1 Supported: true + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog + Swarm: inactive + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 6e23458c129b551d5c9871e5174f6b1b7f6d1170 + runc version: 810190ceaa507aa2727d7ae6f4790c76ec150bd2 + init version: 949e6fa + Kernel Version: 4.4.0-87-generic + Operating System: Ubuntu 16.04.3 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.953GiB + Name: system-sample + ID: EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX + Docker Root Dir: /var/lib/docker + Debug Mode: true + File Descriptors: 33 + Goroutines: 135 + System Time: 2017-08-24T17:44:34.077811894Z + EventsListeners: 0 + Registry: https://index.docker.io/v1/ + Labels: + provider=digitalocean + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Live Restore Enabled: false + +ERROR: an error happened +ERROR: invalid empty security option diff --git a/cli/cli/command/system/testdata/docker-info-badsec.json.golden b/cli/cli/command/system/testdata/docker-info-badsec.json.golden new file mode 100644 index 00000000..54b86606 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-badsec.json.golden @@ -0,0 +1 @@ +{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"SystemStatus":null,"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["foo="],"Warnings":null,"ServerErrors":["an error happened"],"ClientInfo":{"Debug":false,"Plugins":[],"Warnings":null}} diff --git a/cli/cli/command/system/testdata/docker-info-daemon-warnings.json.golden b/cli/cli/command/system/testdata/docker-info-daemon-warnings.json.golden new file mode 100644 index 00000000..ac22a7b9 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-daemon-warnings.json.golden @@ -0,0 +1 @@ +{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"SystemStatus":null,"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"Warnings":["WARNING: No memory limit support","WARNING: No swap limit support","WARNING: No kernel memory limit support","WARNING: No oom kill disable support","WARNING: No cpu cfs quota support","WARNING: No cpu cfs period support","WARNING: No cpu shares support","WARNING: No cpuset support","WARNING: IPv4 forwarding is disabled","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"],"ClientInfo":{"Debug":true,"Plugins":[],"Warnings":null}} diff --git a/cli/cli/command/system/testdata/docker-info-errors.golden b/cli/cli/command/system/testdata/docker-info-errors.golden new file mode 100644 index 00000000..549611ee --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-errors.golden @@ -0,0 +1,5 @@ +Client: +ERROR: a client error occurred + +Server: +ERROR: a server error occurred diff --git a/cli/cli/command/system/testdata/docker-info-errors.json.golden b/cli/cli/command/system/testdata/docker-info-errors.json.golden new file mode 100644 index 00000000..3dcddd78 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-errors.json.golden @@ -0,0 +1 @@ +{"ServerErrors":["a server error occurred"],"ClientErrors":["a client error occurred"]} diff --git a/cli/cli/command/system/testdata/docker-info-legacy-warnings.json.golden b/cli/cli/command/system/testdata/docker-info-legacy-warnings.json.golden new file mode 100644 index 00000000..cc5a9b78 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-legacy-warnings.json.golden @@ -0,0 +1 @@ +{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"SystemStatus":null,"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":false,"SwapLimit":false,"KernelMemory":false,"KernelMemoryTCP":false,"CpuCfsPeriod":false,"CpuCfsQuota":false,"CPUShares":false,"CPUSet":false,"PidsLimit":false,"IPv4Forwarding":false,"BridgeNfIptables":false,"BridgeNfIp6tables":false,"Debug":true,"NFd":33,"OomKillDisable":false,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"Warnings":null,"ClientInfo":{"Debug":true,"Plugins":[],"Warnings":null}} diff --git a/cli/cli/command/system/testdata/docker-info-no-swarm.golden b/cli/cli/command/system/testdata/docker-info-no-swarm.golden new file mode 100644 index 00000000..b1539de0 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-no-swarm.golden @@ -0,0 +1,54 @@ +Client: + Debug Mode: true + +Server: + Containers: 0 + Running: 0 + Paused: 0 + Stopped: 0 + Images: 0 + Server Version: 17.06.1-ce + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 0 + Dirperm1 Supported: true + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog + Swarm: inactive + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 6e23458c129b551d5c9871e5174f6b1b7f6d1170 + runc version: 810190ceaa507aa2727d7ae6f4790c76ec150bd2 + init version: 949e6fa + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-87-generic + Operating System: Ubuntu 16.04.3 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.953GiB + Name: system-sample + ID: EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX + Docker Root Dir: /var/lib/docker + Debug Mode: true + File Descriptors: 33 + Goroutines: 135 + System Time: 2017-08-24T17:44:34.077811894Z + EventsListeners: 0 + Registry: https://index.docker.io/v1/ + Labels: + provider=digitalocean + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Live Restore Enabled: false + diff --git a/cli/cli/command/system/testdata/docker-info-no-swarm.json.golden b/cli/cli/command/system/testdata/docker-info-no-swarm.json.golden new file mode 100644 index 00000000..38851801 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-no-swarm.json.golden @@ -0,0 +1 @@ +{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"SystemStatus":null,"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"Warnings":null,"ClientInfo":{"Debug":true,"Plugins":[],"Warnings":null}} diff --git a/cli/cli/command/system/testdata/docker-info-plugins-warnings.golden b/cli/cli/command/system/testdata/docker-info-plugins-warnings.golden new file mode 100644 index 00000000..be6c8342 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-plugins-warnings.golden @@ -0,0 +1 @@ +WARNING: Plugin "/path/to/docker-badplugin" is not valid: something wrong diff --git a/cli/cli/command/system/testdata/docker-info-plugins.golden b/cli/cli/command/system/testdata/docker-info-plugins.golden new file mode 100644 index 00000000..182a076b --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-plugins.golden @@ -0,0 +1,57 @@ +Client: + Debug Mode: false + Plugins: + goodplugin: unit test is good (ACME Corp, 0.1.0) + unversionedplugin: this plugin has no version (ACME Corp) + +Server: + Containers: 0 + Running: 0 + Paused: 0 + Stopped: 0 + Images: 0 + Server Version: 17.06.1-ce + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 0 + Dirperm1 Supported: true + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog + Swarm: inactive + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 6e23458c129b551d5c9871e5174f6b1b7f6d1170 + runc version: 810190ceaa507aa2727d7ae6f4790c76ec150bd2 + init version: 949e6fa + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-87-generic + Operating System: Ubuntu 16.04.3 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.953GiB + Name: system-sample + ID: EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX + Docker Root Dir: /var/lib/docker + Debug Mode: true + File Descriptors: 33 + Goroutines: 135 + System Time: 2017-08-24T17:44:34.077811894Z + EventsListeners: 0 + Registry: https://index.docker.io/v1/ + Labels: + provider=digitalocean + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Live Restore Enabled: false + diff --git a/cli/cli/command/system/testdata/docker-info-plugins.json.golden b/cli/cli/command/system/testdata/docker-info-plugins.json.golden new file mode 100644 index 00000000..90dc30fe --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-plugins.json.golden @@ -0,0 +1 @@ +{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"SystemStatus":null,"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"Warnings":null,"ClientInfo":{"Debug":false,"Plugins":[{"SchemaVersion":"0.1.0","Vendor":"ACME Corp","Version":"0.1.0","ShortDescription":"unit test is good","Name":"goodplugin","Path":"/path/to/docker-goodplugin"},{"SchemaVersion":"0.1.0","Vendor":"ACME Corp","ShortDescription":"this plugin has no version","Name":"unversionedplugin","Path":"/path/to/docker-unversionedplugin"},{"Name":"badplugin","Path":"/path/to/docker-badplugin","Err":"something wrong"}],"Warnings":null}} diff --git a/cli/cli/command/system/testdata/docker-info-warnings.golden b/cli/cli/command/system/testdata/docker-info-warnings.golden new file mode 100644 index 00000000..a7a4d792 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-warnings.golden @@ -0,0 +1,11 @@ +WARNING: No memory limit support +WARNING: No swap limit support +WARNING: No kernel memory limit support +WARNING: No oom kill disable support +WARNING: No cpu cfs quota support +WARNING: No cpu cfs period support +WARNING: No cpu shares support +WARNING: No cpuset support +WARNING: IPv4 forwarding is disabled +WARNING: bridge-nf-call-iptables is disabled +WARNING: bridge-nf-call-ip6tables is disabled diff --git a/cli/cli/command/system/testdata/docker-info-with-swarm.golden b/cli/cli/command/system/testdata/docker-info-with-swarm.golden new file mode 100644 index 00000000..e6c04f69 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-with-swarm.golden @@ -0,0 +1,76 @@ +Client: + Debug Mode: false + +Server: + Containers: 0 + Running: 0 + Paused: 0 + Stopped: 0 + Images: 0 + Server Version: 17.06.1-ce + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 0 + Dirperm1 Supported: true + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog + Swarm: active + NodeID: qo2dfdig9mmxqkawulggepdih + Is Manager: true + ClusterID: 9vs5ygs0gguyyec4iqf2314c0 + Managers: 1 + Nodes: 1 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Force Rotate: 0 + Autolock Managers: true + Root Rotation In Progress: false + Node Address: 165.227.107.89 + Manager Addresses: + 165.227.107.89:2377 + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 6e23458c129b551d5c9871e5174f6b1b7f6d1170 + runc version: 810190ceaa507aa2727d7ae6f4790c76ec150bd2 + init version: 949e6fa + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-87-generic + Operating System: Ubuntu 16.04.3 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.953GiB + Name: system-sample + ID: EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX + Docker Root Dir: /var/lib/docker + Debug Mode: true + File Descriptors: 33 + Goroutines: 135 + System Time: 2017-08-24T17:44:34.077811894Z + EventsListeners: 0 + Registry: https://index.docker.io/v1/ + Labels: + provider=digitalocean + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Live Restore Enabled: false + diff --git a/cli/cli/command/system/testdata/docker-info-with-swarm.json.golden b/cli/cli/command/system/testdata/docker-info-with-swarm.json.golden new file mode 100644 index 00000000..1498c888 --- /dev/null +++ b/cli/cli/command/system/testdata/docker-info-with-swarm.json.golden @@ -0,0 +1 @@ +{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"SystemStatus":null,"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","ClusterStore":"","ClusterAdvertise":"","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"qo2dfdig9mmxqkawulggepdih","NodeAddr":"165.227.107.89","LocalNodeState":"active","ControlAvailable":true,"Error":"","RemoteManagers":[{"NodeID":"qo2dfdig9mmxqkawulggepdih","Addr":"165.227.107.89:2377"}],"Nodes":1,"Managers":1,"Cluster":{"ID":"9vs5ygs0gguyyec4iqf2314c0","Version":{"Index":11},"CreatedAt":"2017-08-24T17:34:19.278062352Z","UpdatedAt":"2017-08-24T17:34:42.398815481Z","Spec":{"Name":"default","Labels":null,"Orchestration":{"TaskHistoryRetentionLimit":5},"Raft":{"SnapshotInterval":10000,"KeepOldSnapshots":0,"LogEntriesForSlowFollowers":500,"ElectionTick":3,"HeartbeatTick":1},"Dispatcher":{"HeartbeatPeriod":5000000000},"CAConfig":{"NodeCertExpiry":7776000000000000},"TaskDefaults":{},"EncryptionConfig":{"AutoLockManagers":true}},"TLSInfo":{"TrustRoot":"\n-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUaFCW5xsq8eyiJ+Pmcv3MCflMLnMwCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwODI0MTcyOTAwWhcNMzcwODE5MTcy\nOTAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABDy7NebyUJyUjWJDBUdnZoV6GBxEGKO4TZPNDwnxDxJcUdLVaB7WGa4/DLrW\nUfsVgh1JGik2VTiLuTMA1tLlNPOjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBQl16XFtaaXiUAwEuJptJlDjfKskDAKBggqhkjO\nPQQDAgNIADBFAiEAo9fTQNM5DP9bHVcTJYfl2Cay1bFu1E+lnpmN+EYJfeACIGKH\n1pCUkZ+D0IB6CiEZGWSHyLuXPM1rlP+I5KuS7sB8\n-----END CERTIFICATE-----\n","CertIssuerSubject":"MBMxETAPBgNVBAMTCHN3YXJtLWNh","CertIssuerPublicKey":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPLs15vJQnJSNYkMFR2dmhXoYHEQYo7hNk80PCfEPElxR0tVoHtYZrj8MutZR+xWCHUkaKTZVOIu5MwDW0uU08w=="},"RootRotationInProgress":false,"DefaultAddrPool":null,"SubnetSize":0,"DataPathPort":0}},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"Warnings":null,"ClientInfo":{"Debug":false,"Plugins":[],"Warnings":null}} diff --git a/cli/cli/command/system/version.go b/cli/cli/command/system/version.go new file mode 100644 index 00000000..62170ca0 --- /dev/null +++ b/cli/cli/command/system/version.go @@ -0,0 +1,286 @@ +package system + +import ( + "context" + "fmt" + "runtime" + "sort" + "text/tabwriter" + "text/template" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + kubecontext "github.com/docker/cli/cli/context/kubernetes" + "github.com/docker/cli/cli/version" + "github.com/docker/cli/kubernetes" + "github.com/docker/cli/templates" + kubeapi "github.com/docker/compose-on-kubernetes/api" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + kubernetesClient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +var versionTemplate = `{{with .Client -}} +Client:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}} + Version: {{.Version}} + API version: {{.APIVersion}}{{if ne .APIVersion .DefaultAPIVersion}} (downgraded from {{.DefaultAPIVersion}}){{end}} + Go version: {{.GoVersion}} + Git commit: {{.GitCommit}} + Built: {{.BuildTime}} + OS/Arch: {{.Os}}/{{.Arch}} + Experimental: {{.Experimental}} +{{- end}} + +{{- if .ServerOK}}{{with .Server}} + +Server:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}} + {{- range $component := .Components}} + {{$component.Name}}: + {{- if eq $component.Name "Engine" }} + Version: {{.Version}} + API version: {{index .Details "ApiVersion"}} (minimum version {{index .Details "MinAPIVersion"}}) + Go version: {{index .Details "GoVersion"}} + Git commit: {{index .Details "GitCommit"}} + Built: {{index .Details "BuildTime"}} + OS/Arch: {{index .Details "Os"}}/{{index .Details "Arch"}} + Experimental: {{index .Details "Experimental"}} + {{- else }} + Version: {{$component.Version}} + {{- $detailsOrder := getDetailsOrder $component}} + {{- range $key := $detailsOrder}} + {{$key}}: {{index $component.Details $key}} + {{- end}} + {{- end}} + {{- end}} + {{- end}}{{- end}}` + +type versionOptions struct { + format string + kubeConfig string +} + +// versionInfo contains version information of both the Client, and Server +type versionInfo struct { + Client clientVersion + Server *types.Version +} + +type clientVersion struct { + Platform struct{ Name string } `json:",omitempty"` + + Version string + APIVersion string `json:"ApiVersion"` + DefaultAPIVersion string `json:"DefaultAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + BuildTime string `json:",omitempty"` + Experimental bool +} + +type kubernetesVersion struct { + Kubernetes string + StackAPI string +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v versionInfo) ServerOK() bool { + return v.Server != nil +} + +// NewVersionCommand creates a new cobra.Command for `docker version` +func NewVersionCommand(dockerCli command.Cli) *cobra.Command { + var opts versionOptions + + cmd := &cobra.Command{ + Use: "version [OPTIONS]", + Short: "Show the Docker version information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runVersion(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.kubeConfig, "kubeconfig", "", "Kubernetes config file") + flags.SetAnnotation("kubeconfig", "kubernetes", nil) + + return cmd +} + +func reformatDate(buildTime string) string { + t, errTime := time.Parse(time.RFC3339Nano, buildTime) + if errTime == nil { + return t.Format(time.ANSIC) + } + return buildTime +} + +func runVersion(dockerCli command.Cli, opts *versionOptions) error { + var err error + tmpl, err := newVersionTemplate(opts.format) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + orchestrator, err := dockerCli.StackOrchestrator("") + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + vd := versionInfo{ + Client: clientVersion{ + Platform: struct{ Name string }{version.PlatformName}, + Version: version.Version, + APIVersion: dockerCli.Client().ClientVersion(), + DefaultAPIVersion: dockerCli.DefaultVersion(), + GoVersion: runtime.Version(), + GitCommit: version.GitCommit, + BuildTime: reformatDate(version.BuildTime), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: dockerCli.ClientInfo().HasExperimental, + }, + } + + sv, err := dockerCli.Client().ServerVersion(context.Background()) + if err == nil { + vd.Server = &sv + var kubeVersion *kubernetesVersion + if orchestrator.HasKubernetes() { + kubeVersion = getKubernetesVersion(dockerCli, opts.kubeConfig) + } + foundEngine := false + foundKubernetes := false + for _, component := range sv.Components { + switch component.Name { + case "Engine": + foundEngine = true + buildTime, ok := component.Details["BuildTime"] + if ok { + component.Details["BuildTime"] = reformatDate(buildTime) + } + case "Kubernetes": + foundKubernetes = true + if _, ok := component.Details["StackAPI"]; !ok && kubeVersion != nil { + component.Details["StackAPI"] = kubeVersion.StackAPI + } + } + } + + if !foundEngine { + vd.Server.Components = append(vd.Server.Components, types.ComponentVersion{ + Name: "Engine", + Version: sv.Version, + Details: map[string]string{ + "ApiVersion": sv.APIVersion, + "MinAPIVersion": sv.MinAPIVersion, + "GitCommit": sv.GitCommit, + "GoVersion": sv.GoVersion, + "Os": sv.Os, + "Arch": sv.Arch, + "BuildTime": reformatDate(vd.Server.BuildTime), + "Experimental": fmt.Sprintf("%t", sv.Experimental), + }, + }) + } + if !foundKubernetes && kubeVersion != nil { + vd.Server.Components = append(vd.Server.Components, types.ComponentVersion{ + Name: "Kubernetes", + Version: kubeVersion.Kubernetes, + Details: map[string]string{ + "StackAPI": kubeVersion.StackAPI, + }, + }) + } + } + if err2 := prettyPrintVersion(dockerCli, vd, tmpl); err2 != nil && err == nil { + err = err2 + } + return err +} + +func prettyPrintVersion(dockerCli command.Cli, vd versionInfo, tmpl *template.Template) error { + t := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 1, ' ', 0) + err := tmpl.Execute(t, vd) + t.Write([]byte("\n")) + t.Flush() + return err +} + +func newVersionTemplate(templateFormat string) (*template.Template, error) { + if templateFormat == "" { + templateFormat = versionTemplate + } + tmpl := templates.New("version").Funcs(template.FuncMap{"getDetailsOrder": getDetailsOrder}) + tmpl, err := tmpl.Parse(templateFormat) + + return tmpl, errors.Wrap(err, "Template parsing error") +} + +func getDetailsOrder(v types.ComponentVersion) []string { + out := make([]string, 0, len(v.Details)) + for k := range v.Details { + out = append(out, k) + } + sort.Strings(out) + return out +} + +func getKubernetesVersion(dockerCli command.Cli, kubeConfig string) *kubernetesVersion { + version := kubernetesVersion{ + Kubernetes: "Unknown", + StackAPI: "Unknown", + } + var ( + clientConfig clientcmd.ClientConfig + err error + ) + if dockerCli.CurrentContext() == "" { + clientConfig = kubeapi.NewKubernetesConfig(kubeConfig) + } else { + clientConfig, err = kubecontext.ConfigFromContext(dockerCli.CurrentContext(), dockerCli.ContextStore()) + } + if err != nil { + logrus.Debugf("failed to get Kubernetes configuration: %s", err) + return &version + } + config, err := clientConfig.ClientConfig() + if err != nil { + logrus.Debugf("failed to get Kubernetes client config: %s", err) + return &version + } + kubeClient, err := kubernetesClient.NewForConfig(config) + if err != nil { + logrus.Debugf("failed to get Kubernetes client: %s", err) + return &version + } + version.StackAPI = getStackVersion(kubeClient, dockerCli.ClientInfo().HasExperimental) + version.Kubernetes = getKubernetesServerVersion(kubeClient) + return &version +} + +func getStackVersion(client *kubernetesClient.Clientset, experimental bool) string { + apiVersion, err := kubernetes.GetStackAPIVersion(client, experimental) + if err != nil { + logrus.Debugf("failed to get Stack API version: %s", err) + return "Unknown" + } + return string(apiVersion) +} + +func getKubernetesServerVersion(client *kubernetesClient.Clientset) string { + kubeVersion, err := client.DiscoveryClient.ServerVersion() + if err != nil { + logrus.Debugf("failed to get Kubernetes server version: %s", err) + return "Unknown" + } + return kubeVersion.String() +} diff --git a/cli/cli/command/system/version_test.go b/cli/cli/command/system/version_test.go new file mode 100644 index 00000000..0a4f47e3 --- /dev/null +++ b/cli/cli/command/system/version_test.go @@ -0,0 +1,113 @@ +package system + +import ( + "context" + "fmt" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" +) + +func TestVersionWithoutServer(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + serverVersion: func(ctx context.Context) (types.Version, error) { + return types.Version{}, fmt.Errorf("no server") + }, + }) + cmd := NewVersionCommand(cli) + cmd.SetOutput(cli.Err()) + assert.ErrorContains(t, cmd.Execute(), "no server") + out := cli.OutBuffer().String() + // TODO: use an assertion like e2e/image/build_test.go:assertBuildOutput() + // instead of contains/not contains + assert.Check(t, is.Contains(out, "Client:")) + assert.Assert(t, !strings.Contains(out, "Server:"), "actual: %s", out) +} + +func TestVersionAlign(t *testing.T) { + vi := versionInfo{ + Client: clientVersion{ + Version: "18.99.5-ce", + APIVersion: "1.38", + DefaultAPIVersion: "1.38", + GitCommit: "deadbeef", + GoVersion: "go1.10.2", + Os: "linux", + Arch: "amd64", + BuildTime: "Wed May 30 22:21:05 2018", + Experimental: true, + }, + Server: &types.Version{}, + } + + vi.Server.Platform.Name = "Docker Enterprise Edition (EE) 2.0" + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Engine", + Version: "17.06.2-ee-15", + Details: map[string]string{ + "ApiVersion": "1.30", + "MinAPIVersion": "1.12", + "GitCommit": "64ddfa6", + "GoVersion": "go1.8.7", + "Os": "linux", + "Arch": "amd64", + "BuildTime": "Mon Jul 9 23:38:38 2018", + "Experimental": "false", + }, + }) + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Universal Control Plane", + Version: "17.06.2-ee-15", + Details: map[string]string{ + "Version": "3.0.3-tp2", + "ApiVersion": "1.30", + "Arch": "amd64", + "BuildTime": "Mon Jul 2 21:24:07 UTC 2018", + "GitCommit": "4513922", + "GoVersion": "go1.9.4", + "MinApiVersion": "1.20", + "Os": "linux", + }, + }) + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Kubernetes", + Version: "1.8+", + Details: map[string]string{ + "buildDate": "2018-04-26T16:51:21Z", + "compiler": "gc", + "gitCommit": "8d637aedf46b9c21dde723e29c645b9f27106fa5", + "gitTreeState": "clean", + "gitVersion": "v1.8.11-docker-8d637ae", + "goVersion": "go1.8.3", + "major": "1", + "minor": "8+", + "platform": "linux/amd64", + }, + }) + + vi.Server.Components = append(vi.Server.Components, types.ComponentVersion{ + Name: "Calico", + Version: "v3.0.8", + Details: map[string]string{ + "cni": "v2.0.6", + "kube-controllers": "v2.0.5", + "node": "v3.0.8", + }, + }) + + cli := test.NewFakeCli(&fakeClient{}) + tmpl, err := newVersionTemplate("") + assert.NilError(t, err) + assert.NilError(t, prettyPrintVersion(cli, vi, tmpl)) + assert.Check(t, golden.String(cli.OutBuffer().String(), "docker-client-version.golden")) + assert.Check(t, is.Equal("", cli.ErrBuffer().String())) +} diff --git a/cli/cli/command/task/client_test.go b/cli/cli/command/task/client_test.go new file mode 100644 index 00000000..9aa84977 --- /dev/null +++ b/cli/cli/command/task/client_test.go @@ -0,0 +1,29 @@ +package task + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.APIClient + nodeInspectWithRaw func(ref string) (swarm.Node, []byte, error) + serviceInspectWithRaw func(ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) +} + +func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, ref string) (swarm.Node, []byte, error) { + if cli.nodeInspectWithRaw != nil { + return cli.nodeInspectWithRaw(ref) + } + return swarm.Node{}, nil, nil +} + +func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if cli.serviceInspectWithRaw != nil { + return cli.serviceInspectWithRaw(ref, options) + } + return swarm.Service{}, nil, nil +} diff --git a/cli/cli/command/task/formatter.go b/cli/cli/command/task/formatter.go new file mode 100644 index 00000000..0958a1e8 --- /dev/null +++ b/cli/cli/command/task/formatter.go @@ -0,0 +1,149 @@ +package task + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + defaultTaskTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Image}}\t{{.Node}}\t{{.DesiredState}}\t{{.CurrentState}}\t{{.Error}}\t{{.Ports}}" + + nodeHeader = "NODE" + taskIDHeader = "ID" + desiredStateHeader = "DESIRED STATE" + currentStateHeader = "CURRENT STATE" + errorHeader = "ERROR" + + maxErrLength = 30 +) + +// NewTaskFormat returns a Format for rendering using a task Context +func NewTaskFormat(source string, quiet bool) formatter.Format { + switch source { + case formatter.TableFormatKey: + if quiet { + return formatter.DefaultQuietFormat + } + return defaultTaskTableFormat + case formatter.RawFormatKey: + if quiet { + return `id: {{.ID}}` + } + return `id: {{.ID}}\nname: {{.Name}}\nimage: {{.Image}}\nnode: {{.Node}}\ndesired_state: {{.DesiredState}}\ncurrent_state: {{.CurrentState}}\nerror: {{.Error}}\nports: {{.Ports}}\n` + } + return formatter.Format(source) +} + +// FormatWrite writes the context +func FormatWrite(ctx formatter.Context, tasks []swarm.Task, names map[string]string, nodes map[string]string) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, task := range tasks { + taskCtx := &taskContext{trunc: ctx.Trunc, task: task, name: names[task.ID], node: nodes[task.ID]} + if err := format(taskCtx); err != nil { + return err + } + } + return nil + } + taskCtx := taskContext{} + taskCtx.Header = formatter.SubHeaderContext{ + "ID": taskIDHeader, + "Name": formatter.NameHeader, + "Image": formatter.ImageHeader, + "Node": nodeHeader, + "DesiredState": desiredStateHeader, + "CurrentState": currentStateHeader, + "Error": errorHeader, + "Ports": formatter.PortsHeader, + } + return ctx.Write(&taskCtx, render) +} + +type taskContext struct { + formatter.HeaderContext + trunc bool + task swarm.Task + name string + node string +} + +func (c *taskContext) MarshalJSON() ([]byte, error) { + return formatter.MarshalJSON(c) +} + +func (c *taskContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.task.ID) + } + return c.task.ID +} + +func (c *taskContext) Name() string { + return c.name +} + +func (c *taskContext) Image() string { + image := c.task.Spec.ContainerSpec.Image + if c.trunc { + ref, err := reference.ParseNormalizedNamed(image) + if err == nil { + // update image string for display, (strips any digest) + if nt, ok := ref.(reference.NamedTagged); ok { + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + image = reference.FamiliarString(namedTagged) + } + } + } + } + return image +} + +func (c *taskContext) Node() string { + return c.node +} + +func (c *taskContext) DesiredState() string { + return command.PrettyPrint(c.task.DesiredState) +} + +func (c *taskContext) CurrentState() string { + return fmt.Sprintf("%s %s ago", + command.PrettyPrint(c.task.Status.State), + strings.ToLower(units.HumanDuration(time.Since(c.task.Status.Timestamp))), + ) +} + +func (c *taskContext) Error() string { + // Trim and quote the error message. + taskErr := c.task.Status.Err + if c.trunc && len(taskErr) > maxErrLength { + taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) + } + if len(taskErr) > 0 { + taskErr = fmt.Sprintf("\"%s\"", taskErr) + } + return taskErr +} + +func (c *taskContext) Ports() string { + if len(c.task.Status.PortStatus.Ports) == 0 { + return "" + } + ports := []string{} + for _, pConfig := range c.task.Status.PortStatus.Ports { + ports = append(ports, fmt.Sprintf("*:%d->%d/%s", + pConfig.PublishedPort, + pConfig.TargetPort, + pConfig.Protocol, + )) + } + return strings.Join(ports, ",") +} diff --git a/cli/cli/command/task/formatter_test.go b/cli/cli/command/task/formatter_test.go new file mode 100644 index 00000000..2de2acc4 --- /dev/null +++ b/cli/cli/command/task/formatter_test.go @@ -0,0 +1,107 @@ +package task + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +func TestTaskContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + { + formatter.Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + formatter.Context{Format: NewTaskFormat("table", true)}, + `taskID1 +taskID2 +`, + }, + { + formatter.Context{Format: NewTaskFormat("table {{.Name}}\t{{.Node}}\t{{.Ports}}", false)}, + string(golden.Get(t, "task-context-write-table-custom.golden")), + }, + { + formatter.Context{Format: NewTaskFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + formatter.Context{Format: NewTaskFormat("raw", true)}, + `id: taskID1 +id: taskID2 +`, + }, + { + formatter.Context{Format: NewTaskFormat("{{.Name}} {{.Node}}", false)}, + `foobar_baz foo1 +foobar_bar foo2 +`, + }, + } + + for _, testcase := range cases { + tasks := []swarm.Task{ + {ID: "taskID1"}, + {ID: "taskID2"}, + } + names := map[string]string{ + "taskID1": "foobar_baz", + "taskID2": "foobar_bar", + } + nodes := map[string]string{ + "taskID1": "foo1", + "taskID2": "foo2", + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := FormatWrite(testcase.context, tasks, names, nodes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +func TestTaskContextWriteJSONField(t *testing.T) { + tasks := []swarm.Task{ + {ID: "taskID1"}, + {ID: "taskID2"}, + } + names := map[string]string{ + "taskID1": "foobar_baz", + "taskID2": "foobar_bar", + } + out := bytes.NewBufferString("") + err := FormatWrite(formatter.Context{Format: "{{json .ID}}", Output: out}, tasks, names, map[string]string{}) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal(tasks[i].ID, s)) + } +} diff --git a/cli/cli/command/task/print.go b/cli/cli/command/task/print.go new file mode 100644 index 00000000..761a5e8f --- /dev/null +++ b/cli/cli/command/task/print.go @@ -0,0 +1,93 @@ +package task + +import ( + "context" + "fmt" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/docker/api/types/swarm" +) + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +// Print task information in a format. +// Besides this, command `docker node ps ` +// and `docker stack ps` will call this, too. +func Print(ctx context.Context, dockerCli command.Cli, tasks []swarm.Task, resolver *idresolver.IDResolver, trunc, quiet bool, format string) error { + sort.Stable(tasksBySlot(tasks)) + + names := map[string]string{} + nodes := map[string]string{} + + tasksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: NewTaskFormat(format, quiet), + Trunc: trunc, + } + + prevName := "" + for _, task := range tasks { + serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) + if err != nil { + return err + } + + nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) + if err != nil { + return err + } + + var name string + if task.Slot != 0 { + name = fmt.Sprintf("%v.%v", serviceName, task.Slot) + } else { + name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) + } + + // Indent the name if necessary + indentedName := name + if name == prevName { + indentedName = fmt.Sprintf(" \\_ %s", indentedName) + } + prevName = name + + names[task.ID] = name + if tasksCtx.Format.IsTable() { + names[task.ID] = indentedName + } + nodes[task.ID] = nodeValue + } + + return FormatWrite(tasksCtx, tasks, names, nodes) +} + +// DefaultFormat returns the default format from the config file, or table +// format if nothing is set in the config. +func DefaultFormat(configFile *configfile.ConfigFile, quiet bool) string { + if len(configFile.TasksFormat) > 0 && !quiet { + return configFile.TasksFormat + } + return formatter.TableFormatKey +} diff --git a/cli/cli/command/task/print_test.go b/cli/cli/command/task/print_test.go new file mode 100644 index 00000000..6fa6e586 --- /dev/null +++ b/cli/cli/command/task/print_test.go @@ -0,0 +1,128 @@ +package task + +import ( + "context" + "testing" + "time" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/cli/command/idresolver" + "github.com/docker/cli/internal/test" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestTaskPrintWithQuietOption(t *testing.T) { + quiet := true + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{*Task(TaskID("id-foo"))} + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, formatter.TableFormatKey) + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-quiet-option.golden") +} + +func TestTaskPrintWithNoTruncOption(t *testing.T) { + quiet := false + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskID("id-foo-yov6omdek8fg3k5stosyp2m50")), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .ID }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-no-trunc-option.golden") +} + +func TestTaskPrintWithGlobalService(t *testing.T) { + quiet := false + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskServiceID("service-id-foo"), TaskNodeID("node-id-bar"), TaskSlot(0)), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-global-service.golden") +} + +func TestTaskPrintWithReplicatedService(t *testing.T) { + quiet := false + trunc := false + noResolve := true + apiClient := &fakeClient{} + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskServiceID("service-id-foo"), TaskSlot(1)), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-replicated-service.golden") +} + +func TestTaskPrintWithIndentation(t *testing.T) { + quiet := false + trunc := false + noResolve := false + apiClient := &fakeClient{ + serviceInspectWithRaw: func(ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return *Service(ServiceName("service-name-foo")), nil, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + } + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task( + TaskID("id-foo"), + TaskServiceID("service-id-foo"), + TaskNodeID("id-node"), + WithTaskSpec(TaskImage("myimage:mytag")), + TaskDesiredState(swarm.TaskStateReady), + WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))), + ), + *Task( + TaskID("id-bar"), + TaskServiceID("service-id-foo"), + TaskNodeID("id-node"), + WithTaskSpec(TaskImage("myimage:mytag")), + TaskDesiredState(swarm.TaskStateReady), + WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))), + ), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, formatter.TableFormatKey) + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-indentation.golden") +} + +func TestTaskPrintWithResolution(t *testing.T) { + quiet := false + trunc := false + noResolve := false + apiClient := &fakeClient{ + serviceInspectWithRaw: func(ref string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) { + return *Service(ServiceName("service-name-foo")), nil, nil + }, + nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) { + return *Node(NodeName("node-name-bar")), nil, nil + }, + } + cli := test.NewFakeCli(apiClient) + tasks := []swarm.Task{ + *Task(TaskServiceID("service-id-foo"), TaskSlot(1)), + } + err := Print(context.Background(), cli, tasks, idresolver.New(apiClient, noResolve), trunc, quiet, "{{ .Name }} {{ .Node }}") + assert.NilError(t, err) + golden.Assert(t, cli.OutBuffer().String(), "task-print-with-resolution.golden") +} diff --git a/cli/cli/command/task/testdata/task-context-write-table-custom.golden b/cli/cli/command/task/testdata/task-context-write-table-custom.golden new file mode 100644 index 00000000..0f931ea9 --- /dev/null +++ b/cli/cli/command/task/testdata/task-context-write-table-custom.golden @@ -0,0 +1,3 @@ +NAME NODE PORTS +foobar_baz foo1 +foobar_bar foo2 diff --git a/cli/cli/command/task/testdata/task-print-with-global-service.golden b/cli/cli/command/task/testdata/task-print-with-global-service.golden new file mode 100644 index 00000000..fbc81248 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-global-service.golden @@ -0,0 +1 @@ +service-id-foo.node-id-bar diff --git a/cli/cli/command/task/testdata/task-print-with-indentation.golden b/cli/cli/command/task/testdata/task-print-with-indentation.golden new file mode 100644 index 00000000..8fa174a4 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-indentation.golden @@ -0,0 +1,3 @@ +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +id-foo service-name-foo.1 myimage:mytag node-name-bar Ready Failed 2 hours ago +id-bar \_ service-name-foo.1 myimage:mytag node-name-bar Ready Failed 2 hours ago diff --git a/cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden b/cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden new file mode 100644 index 00000000..184d2de2 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-no-trunc-option.golden @@ -0,0 +1 @@ +id-foo-yov6omdek8fg3k5stosyp2m50 diff --git a/cli/cli/command/task/testdata/task-print-with-quiet-option.golden b/cli/cli/command/task/testdata/task-print-with-quiet-option.golden new file mode 100644 index 00000000..e2faeb60 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-quiet-option.golden @@ -0,0 +1 @@ +id-foo diff --git a/cli/cli/command/task/testdata/task-print-with-replicated-service.golden b/cli/cli/command/task/testdata/task-print-with-replicated-service.golden new file mode 100644 index 00000000..9ecebdaf --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-replicated-service.golden @@ -0,0 +1 @@ +service-id-foo.1 diff --git a/cli/cli/command/task/testdata/task-print-with-resolution.golden b/cli/cli/command/task/testdata/task-print-with-resolution.golden new file mode 100644 index 00000000..747d1af4 --- /dev/null +++ b/cli/cli/command/task/testdata/task-print-with-resolution.golden @@ -0,0 +1 @@ +service-name-foo.1 node-name-bar diff --git a/cli/cli/command/testdata/ca.pem b/cli/cli/command/testdata/ca.pem new file mode 100644 index 00000000..a289ce7c --- /dev/null +++ b/cli/cli/command/testdata/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC+TCCAeGgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMQswCQYDVQQGEwJGUjEP +MA0GA1UEChMGRG9ja2VyMB4XDTE5MDMwMzIzMDAwMFoXDTI0MDMwMTIzMDAwMFow +HjELMAkGA1UEBhMCRlIxDzANBgNVBAoTBkRvY2tlcjCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMkifL8Ne9B9LQ8+pKD20meVuV34Ol/xUcH/OfxbiBMa +HrlIKsGIaO9GraBLq1DJyaZ6sP6ntfwXqwBYQrAN2fQL1AmwMetqpNjby307XqRa +GUQekjG710LfAFKsS/yD/R8L944MFmTbYwGyjROExs8ZAA4fkA8SATzRXhM3a8dE +YcrXacZQqd5dwFFS/UyJQbMoNx7IgzrXySqpt3rV8qD8MAUebgshd2p9CQO6zzoU +ImOJImMc/15LFZymemm2KvzXTM4J9UYdibXZGzpxcnyGNCb4FVV0HF0Ya+NMDwvY +nNpW5rea64ppS8McejePRCmLS8DxMxKTLB7eW97LuDECAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQU6zuJSXHxniajbNcc4SHoM+fatvMwDgYDVR0P +AQH/BAQDAgE+MA0GCSqGSIb3DQEBCwUAA4IBAQB2l46NnKzoTOCuUjxGmUv3s1np +rENRWlq0mHjCzoYSocg/IcwY7fz41XkwTVV8O3h/Jm25YGnj4lqaXlEKYJ63W8eI +wGLcirUAORSspcf+jd7OOjluzYCtuvVtOKR8w22pp5oE/AooGaO5y0ysefZBopr4 +CNUNsEYhDKFg7tfj6Govi6+0PNxvB53we4nU7NhJNMaClhh/pi8zbeaEf67S6eKn +Z3DFqO+8FW4wEePLwhCftESCTwx6Q24v/WIYnzYOXC5mb2B9MwkyJXJIJxxPIeSs +PycNQ2kw7gk/TKkLMNQbX4fgFB0zfdofidTAkqOIqFHq/8iD2DYEZQFgCD3v +-----END CERTIFICATE----- diff --git a/cli/cli/command/trust.go b/cli/cli/command/trust.go new file mode 100644 index 00000000..65f24085 --- /dev/null +++ b/cli/cli/command/trust.go @@ -0,0 +1,15 @@ +package command + +import ( + "github.com/spf13/pflag" +) + +// AddTrustVerificationFlags adds content trust flags to the provided flagset +func AddTrustVerificationFlags(fs *pflag.FlagSet, v *bool, trusted bool) { + fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image verification") +} + +// AddTrustSigningFlags adds "signing" flags to the provided flagset +func AddTrustSigningFlags(fs *pflag.FlagSet, v *bool, trusted bool) { + fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image signing") +} diff --git a/cli/cli/command/trust/cmd.go b/cli/cli/command/trust/cmd.go new file mode 100644 index 00000000..bb6ceace --- /dev/null +++ b/cli/cli/command/trust/cmd.go @@ -0,0 +1,25 @@ +package trust + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewTrustCommand returns a cobra command for `trust` subcommands +func NewTrustCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "trust", + Short: "Manage trust on Docker images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newRevokeCommand(dockerCli), + newSignCommand(dockerCli), + newTrustKeyCommand(dockerCli), + newTrustSignerCommand(dockerCli), + newInspectCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/trust/common.go b/cli/cli/command/trust/common.go new file mode 100644 index 00000000..0a5b5471 --- /dev/null +++ b/cli/cli/command/trust/common.go @@ -0,0 +1,156 @@ +package trust + +import ( + "context" + "encoding/hex" + "fmt" + "sort" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + "vbom.ml/util/sortorder" +) + +// trustTagKey represents a unique signed tag and hex-encoded hash pair +type trustTagKey struct { + SignedTag string + Digest string +} + +// trustTagRow encodes all human-consumable information for a signed tag, including signers +type trustTagRow struct { + trustTagKey + Signers []string +} + +// trustRepo represents consumable information about a trusted repository +type trustRepo struct { + Name string + SignedTags []trustTagRow + Signers []trustSigner + AdministrativeKeys []trustSigner +} + +// trustSigner represents a trusted signer in a trusted repository +// a signer is defined by a name and list of trustKeys +type trustSigner struct { + Name string `json:",omitempty"` + Keys []trustKey `json:",omitempty"` +} + +// trustKey contains information about trusted keys +type trustKey struct { + ID string `json:",omitempty"` +} + +// lookupTrustInfo returns processed signature and role information about a notary repository. +// This information is to be pretty printed or serialized into a machine-readable format. +func lookupTrustInfo(cli command.Cli, remote string) ([]trustTagRow, []client.RoleWithSignatures, []data.Role, error) { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), remote) + if err != nil { + return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, err + } + tag := imgRefAndAuth.Tag() + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly) + if err != nil { + return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + + if err = clearChangeList(notaryRepo); err != nil { + return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, err + } + defer clearChangeList(notaryRepo) + + // Retrieve all released signatures, match them, and pretty print them + allSignedTargets, err := notaryRepo.GetAllTargetMetadataByName(tag) + if err != nil { + logrus.Debug(trust.NotaryError(remote, err)) + // print an empty table if we don't have signed targets, but have an initialized notary repo + if _, ok := err.(client.ErrNoSuchTarget); !ok { + return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signatures or cannot access %s", remote) + } + } + signatureRows := matchReleasedSignatures(allSignedTargets) + + // get the administrative roles + adminRolesWithSigs, err := notaryRepo.ListRoles() + if err != nil { + return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signers for %s", remote) + } + + // get delegation roles with the canonical key IDs + delegationRoles, err := notaryRepo.GetDelegationRoles() + if err != nil { + logrus.Debugf("no delegation roles found, or error fetching them for %s: %v", remote, err) + } + + return signatureRows, adminRolesWithSigs, delegationRoles, nil +} + +func formatAdminRole(roleWithSigs client.RoleWithSignatures) string { + adminKeyList := roleWithSigs.KeyIDs + sort.Strings(adminKeyList) + + var role string + switch roleWithSigs.Name { + case data.CanonicalTargetsRole: + role = "Repository Key" + case data.CanonicalRootRole: + role = "Root Key" + default: + return "" + } + return fmt.Sprintf("%s:\t%s\n", role, strings.Join(adminKeyList, ", ")) +} + +func getDelegationRoleToKeyMap(rawDelegationRoles []data.Role) map[string][]string { + signerRoleToKeyIDs := make(map[string][]string) + for _, delRole := range rawDelegationRoles { + switch delRole.Name { + case trust.ReleasesRole, data.CanonicalRootRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole, data.CanonicalTimestampRole: + continue + default: + signerRoleToKeyIDs[notaryRoleToSigner(delRole.Name)] = delRole.KeyIDs + } + } + return signerRoleToKeyIDs +} + +// aggregate all signers for a "released" hash+tagname pair. To be "released," the tag must have been +// signed into the "targets" or "targets/releases" role. Output is sorted by tag name +func matchReleasedSignatures(allTargets []client.TargetSignedStruct) []trustTagRow { + signatureRows := []trustTagRow{} + // do a first pass to get filter on tags signed into "targets" or "targets/releases" + releasedTargetRows := map[trustTagKey][]string{} + for _, tgt := range allTargets { + if isReleasedTarget(tgt.Role.Name) { + releasedKey := trustTagKey{tgt.Target.Name, hex.EncodeToString(tgt.Target.Hashes[notary.SHA256])} + releasedTargetRows[releasedKey] = []string{} + } + } + + // now fill out all signers on released keys + for _, tgt := range allTargets { + targetKey := trustTagKey{tgt.Target.Name, hex.EncodeToString(tgt.Target.Hashes[notary.SHA256])} + // only considered released targets + if _, ok := releasedTargetRows[targetKey]; ok && !isReleasedTarget(tgt.Role.Name) { + releasedTargetRows[targetKey] = append(releasedTargetRows[targetKey], notaryRoleToSigner(tgt.Role.Name)) + } + } + + // compile the final output as a sorted slice + for targetKey, signers := range releasedTargetRows { + signatureRows = append(signatureRows, trustTagRow{targetKey, signers}) + } + sort.Slice(signatureRows, func(i, j int) bool { + return sortorder.NaturalLess(signatureRows[i].SignedTag, signatureRows[j].SignedTag) + }) + return signatureRows +} diff --git a/cli/cli/command/trust/common_test.go b/cli/cli/command/trust/common_test.go new file mode 100644 index 00000000..279e7fa7 --- /dev/null +++ b/cli/cli/command/trust/common_test.go @@ -0,0 +1,33 @@ +package trust + +import ( + "testing" + + "github.com/docker/cli/cli/trust" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestMatchReleasedSignaturesSortOrder(t *testing.T) { + var releasesRole = data.DelegationRole{BaseRole: data.BaseRole{Name: trust.ReleasesRole}} + targets := []client.TargetSignedStruct{ + {Target: client.Target{Name: "target10-foo"}, Role: releasesRole}, + {Target: client.Target{Name: "target1-foo"}, Role: releasesRole}, + {Target: client.Target{Name: "target2-foo"}, Role: releasesRole}, + } + + rows := matchReleasedSignatures(targets) + + var targetNames []string + for _, r := range rows { + targetNames = append(targetNames, r.SignedTag) + } + expected := []string{ + "target1-foo", + "target2-foo", + "target10-foo", + } + assert.Check(t, is.DeepEqual(expected, targetNames)) +} diff --git a/cli/cli/command/trust/formatter.go b/cli/cli/command/trust/formatter.go new file mode 100644 index 00000000..5cf9e9d3 --- /dev/null +++ b/cli/cli/command/trust/formatter.go @@ -0,0 +1,132 @@ +package trust + +import ( + "sort" + "strings" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultTrustTagTableFormat = "table {{.SignedTag}}\t{{.Digest}}\t{{.Signers}}" + signedTagNameHeader = "SIGNED TAG" + trustedDigestHeader = "DIGEST" + signersHeader = "SIGNERS" + defaultSignerInfoTableFormat = "table {{.Signer}}\t{{.Keys}}" + signerNameHeader = "SIGNER" + keysHeader = "KEYS" +) + +// SignedTagInfo represents all formatted information needed to describe a signed tag: +// Name: name of the signed tag +// Digest: hex encoded digest of the contents +// Signers: list of entities who signed the tag +type SignedTagInfo struct { + Name string + Digest string + Signers []string +} + +// SignerInfo represents all formatted information needed to describe a signer: +// Name: name of the signer role +// Keys: the keys associated with the signer +type SignerInfo struct { + Name string + Keys []string +} + +// NewTrustTagFormat returns a Format for rendering using a trusted tag Context +func NewTrustTagFormat() formatter.Format { + return defaultTrustTagTableFormat +} + +// NewSignerInfoFormat returns a Format for rendering a signer role info Context +func NewSignerInfoFormat() formatter.Format { + return defaultSignerInfoTableFormat +} + +// TagWrite writes the context +func TagWrite(ctx formatter.Context, signedTagInfoList []SignedTagInfo) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, signedTag := range signedTagInfoList { + if err := format(&trustTagContext{s: signedTag}); err != nil { + return err + } + } + return nil + } + trustTagCtx := trustTagContext{} + trustTagCtx.Header = formatter.SubHeaderContext{ + "SignedTag": signedTagNameHeader, + "Digest": trustedDigestHeader, + "Signers": signersHeader, + } + return ctx.Write(&trustTagCtx, render) +} + +type trustTagContext struct { + formatter.HeaderContext + s SignedTagInfo +} + +// SignedTag returns the name of the signed tag +func (c *trustTagContext) SignedTag() string { + return c.s.Name +} + +// Digest returns the hex encoded digest associated with this signed tag +func (c *trustTagContext) Digest() string { + return c.s.Digest +} + +// Signers returns the sorted list of entities who signed this tag +func (c *trustTagContext) Signers() string { + sort.Strings(c.s.Signers) + return strings.Join(c.s.Signers, ", ") +} + +// SignerInfoWrite writes the context +func SignerInfoWrite(ctx formatter.Context, signerInfoList []SignerInfo) error { + render := func(format func(subContext formatter.SubContext) error) error { + for _, signerInfo := range signerInfoList { + if err := format(&signerInfoContext{ + trunc: ctx.Trunc, + s: signerInfo, + }); err != nil { + return err + } + } + return nil + } + signerInfoCtx := signerInfoContext{} + signerInfoCtx.Header = formatter.SubHeaderContext{ + "Signer": signerNameHeader, + "Keys": keysHeader, + } + return ctx.Write(&signerInfoCtx, render) +} + +type signerInfoContext struct { + formatter.HeaderContext + trunc bool + s SignerInfo +} + +// Keys returns the sorted list of keys associated with the signer +func (c *signerInfoContext) Keys() string { + sort.Strings(c.s.Keys) + truncatedKeys := []string{} + if c.trunc { + for _, keyID := range c.s.Keys { + truncatedKeys = append(truncatedKeys, stringid.TruncateID(keyID)) + } + return strings.Join(truncatedKeys, ", ") + } + return strings.Join(c.s.Keys, ", ") +} + +// Signer returns the name of the signer +func (c *signerInfoContext) Signer() string { + return c.s.Name +} diff --git a/cli/cli/command/trust/formatter_test.go b/cli/cli/command/trust/formatter_test.go new file mode 100644 index 00000000..300f6c8c --- /dev/null +++ b/cli/cli/command/trust/formatter_test.go @@ -0,0 +1,240 @@ +package trust + +import ( + "bytes" + "testing" + + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustTag(t *testing.T) { + digest := stringid.GenerateRandomID() + trustedTag := "tag" + + var ctx trustTagContext + + cases := []struct { + trustTagCtx trustTagContext + expValue string + call func() string + }{ + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: nil, + }, + }, + digest, + ctx.Digest, + }, + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: nil, + }, + }, + trustedTag, + ctx.SignedTag, + }, + // Empty signers makes a row with empty string + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: nil, + }, + }, + "", + ctx.Signers, + }, + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: []string{"alice", "bob", "claire"}, + }, + }, + "alice, bob, claire", + ctx.Signers, + }, + // alphabetic signing on Signers + { + trustTagContext{ + s: SignedTagInfo{Name: trustedTag, + Digest: digest, + Signers: []string{"claire", "bob", "alice"}, + }, + }, + "alice, bob, claire", + ctx.Signers, + }, + } + + for _, c := range cases { + ctx = c.trustTagCtx + v := c.call() + if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + } +} + +func TestTrustTagContextWrite(t *testing.T) { + + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{ + Format: "{{InvalidFunction}}", + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{ + Format: "{{nil}}", + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + formatter.Context{ + Format: NewTrustTagFormat(), + }, + `SIGNED TAG DIGEST SIGNERS +tag1 deadbeef alice +tag2 aaaaaaaa alice, bob +tag3 bbbbbbbb +`, + }, + } + + for _, testcase := range cases { + signedTags := []SignedTagInfo{ + {Name: "tag1", Digest: "deadbeef", Signers: []string{"alice"}}, + {Name: "tag2", Digest: "aaaaaaaa", Signers: []string{"alice", "bob"}}, + {Name: "tag3", Digest: "bbbbbbbb", Signers: []string{}}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := TagWrite(testcase.context, signedTags) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} + +// With no trust data, the TagWrite will print an empty table: +// it's up to the caller to decide whether or not to print this versus an error +func TestTrustTagContextEmptyWrite(t *testing.T) { + + emptyCase := struct { + context formatter.Context + expected string + }{ + formatter.Context{ + Format: NewTrustTagFormat(), + }, + `SIGNED TAG DIGEST SIGNERS +`, + } + + emptySignedTags := []SignedTagInfo{} + out := bytes.NewBufferString("") + emptyCase.context.Output = out + err := TagWrite(emptyCase.context, emptySignedTags) + assert.NilError(t, err) + assert.Check(t, is.Equal(emptyCase.expected, out.String())) +} + +func TestSignerInfoContextEmptyWrite(t *testing.T) { + emptyCase := struct { + context formatter.Context + expected string + }{ + formatter.Context{ + Format: NewSignerInfoFormat(), + }, + `SIGNER KEYS +`, + } + emptySignerInfo := []SignerInfo{} + out := bytes.NewBufferString("") + emptyCase.context.Output = out + err := SignerInfoWrite(emptyCase.context, emptySignerInfo) + assert.NilError(t, err) + assert.Check(t, is.Equal(emptyCase.expected, out.String())) +} + +func TestSignerInfoContextWrite(t *testing.T) { + cases := []struct { + context formatter.Context + expected string + }{ + // Errors + { + formatter.Context{ + Format: "{{InvalidFunction}}", + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + formatter.Context{ + Format: "{{nil}}", + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + formatter.Context{ + Format: NewSignerInfoFormat(), + Trunc: true, + }, + `SIGNER KEYS +alice key11, key12 +bob key21 +eve foobarbazqux, key31, key32 +`, + }, + // No truncation + { + formatter.Context{ + Format: NewSignerInfoFormat(), + }, + `SIGNER KEYS +alice key11, key12 +bob key21 +eve foobarbazquxquux, key31, key32 +`, + }, + } + + for _, testcase := range cases { + signerInfo := []SignerInfo{ + {Name: "alice", Keys: []string{"key11", "key12"}}, + {Name: "bob", Keys: []string{"key21"}}, + {Name: "eve", Keys: []string{"key31", "key32", "foobarbazquxquux"}}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := SignerInfoWrite(testcase.context, signerInfo) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Check(t, is.Equal(testcase.expected, out.String())) + } + } +} diff --git a/cli/cli/command/trust/helpers.go b/cli/cli/command/trust/helpers.go new file mode 100644 index 00000000..b2819d2e --- /dev/null +++ b/cli/cli/command/trust/helpers.go @@ -0,0 +1,47 @@ +package trust + +import ( + "strings" + + "github.com/docker/cli/cli/trust" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +const releasedRoleName = "Repo Admin" +const releasesRoleTUFName = "targets/releases" + +// isReleasedTarget checks if a role name is "released": +// either targets/releases or targets TUF roles +func isReleasedTarget(role data.RoleName) bool { + return role == data.CanonicalTargetsRole || role == trust.ReleasesRole +} + +// notaryRoleToSigner converts TUF role name to a human-understandable signer name +func notaryRoleToSigner(tufRole data.RoleName) string { + // don't show a signer for "targets" or "targets/releases" + if isReleasedTarget(data.RoleName(tufRole.String())) { + return releasedRoleName + } + return strings.TrimPrefix(tufRole.String(), "targets/") +} + +// clearChangelist clears the notary staging changelist. +func clearChangeList(notaryRepo client.Repository) error { + cl, err := notaryRepo.GetChangelist() + if err != nil { + return err + } + return cl.Clear("") +} + +// getOrGenerateRootKeyAndInitRepo initializes the notary repository +// with a remotely managed snapshot key. The initialization will use +// an existing root key if one is found, else a new one will be generated. +func getOrGenerateRootKeyAndInitRepo(notaryRepo client.Repository) error { + rootKey, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + if err != nil { + return err + } + return notaryRepo.Initialize([]string{rootKey.ID()}, data.CanonicalSnapshotRole) +} diff --git a/cli/cli/command/trust/helpers_test.go b/cli/cli/command/trust/helpers_test.go new file mode 100644 index 00000000..fab61214 --- /dev/null +++ b/cli/cli/command/trust/helpers_test.go @@ -0,0 +1,24 @@ +package trust + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "gotest.tools/assert" +) + +func TestGetOrGenerateNotaryKeyAndInitRepo(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + + err = getOrGenerateRootKeyAndInitRepo(notaryRepo) + assert.Error(t, err, "client is offline") +} diff --git a/cli/cli/command/trust/inspect.go b/cli/cli/command/trust/inspect.go new file mode 100644 index 00000000..d7370bd0 --- /dev/null +++ b/cli/cli/command/trust/inspect.go @@ -0,0 +1,115 @@ +package trust + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/tuf/data" +) + +type inspectOptions struct { + remotes []string + // FIXME(n4ss): this is consistent with `docker service inspect` but we should provide + // a `--format` flag too. (format and pretty-print should be exclusive) + prettyPrint bool +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + options := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect IMAGE[:TAG] [IMAGE[:TAG]...]", + Short: "Return low-level information about keys and signatures", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remotes = args + + return runInspect(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&options.prettyPrint, "pretty", false, "Print the information in a human friendly format") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + if opts.prettyPrint { + var err error + + for index, remote := range opts.remotes { + if err = prettyPrintTrustInfo(dockerCli, remote); err != nil { + return err + } + + // Additional separator between the inspection output of each image + if index < len(opts.remotes)-1 { + fmt.Fprint(dockerCli.Out(), "\n\n") + } + } + + return err + } + + getRefFunc := func(ref string) (interface{}, []byte, error) { + i, err := getRepoTrustInfo(dockerCli, ref) + return nil, i, err + } + return inspect.Inspect(dockerCli.Out(), opts.remotes, "", getRefFunc) +} + +func getRepoTrustInfo(cli command.Cli, remote string) ([]byte, error) { + signatureRows, adminRolesWithSigs, delegationRoles, err := lookupTrustInfo(cli, remote) + if err != nil { + return []byte{}, err + } + // process the signatures to include repo admin if signed by the base targets role + for idx, sig := range signatureRows { + if len(sig.Signers) == 0 { + signatureRows[idx].Signers = append(sig.Signers, releasedRoleName) + } + } + + signerList, adminList := []trustSigner{}, []trustSigner{} + + signerRoleToKeyIDs := getDelegationRoleToKeyMap(delegationRoles) + + for signerName, signerKeys := range signerRoleToKeyIDs { + signerKeyList := []trustKey{} + for _, keyID := range signerKeys { + signerKeyList = append(signerKeyList, trustKey{ID: keyID}) + } + signerList = append(signerList, trustSigner{signerName, signerKeyList}) + } + sort.Slice(signerList, func(i, j int) bool { return signerList[i].Name > signerList[j].Name }) + + for _, adminRole := range adminRolesWithSigs { + switch adminRole.Name { + case data.CanonicalRootRole: + rootKeys := []trustKey{} + for _, keyID := range adminRole.KeyIDs { + rootKeys = append(rootKeys, trustKey{ID: keyID}) + } + adminList = append(adminList, trustSigner{"Root", rootKeys}) + case data.CanonicalTargetsRole: + targetKeys := []trustKey{} + for _, keyID := range adminRole.KeyIDs { + targetKeys = append(targetKeys, trustKey{ID: keyID}) + } + adminList = append(adminList, trustSigner{"Repository", targetKeys}) + } + } + sort.Slice(adminList, func(i, j int) bool { return adminList[i].Name > adminList[j].Name }) + + return json.Marshal(trustRepo{ + Name: remote, + SignedTags: signatureRows, + Signers: signerList, + AdministrativeKeys: adminList, + }) +} diff --git a/cli/cli/command/trust/inspect_pretty.go b/cli/cli/command/trust/inspect_pretty.go new file mode 100644 index 00000000..dbe0f795 --- /dev/null +++ b/cli/cli/command/trust/inspect_pretty.go @@ -0,0 +1,93 @@ +package trust + +import ( + "fmt" + "io" + "sort" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/theupdateframework/notary/client" + "vbom.ml/util/sortorder" +) + +func prettyPrintTrustInfo(cli command.Cli, remote string) error { + signatureRows, adminRolesWithSigs, delegationRoles, err := lookupTrustInfo(cli, remote) + if err != nil { + return err + } + + if len(signatureRows) > 0 { + fmt.Fprintf(cli.Out(), "\nSignatures for %s\n\n", remote) + + if err := printSignatures(cli.Out(), signatureRows); err != nil { + return err + } + } else { + fmt.Fprintf(cli.Out(), "\nNo signatures for %s\n\n", remote) + } + signerRoleToKeyIDs := getDelegationRoleToKeyMap(delegationRoles) + + // If we do not have additional signers, do not display + if len(signerRoleToKeyIDs) > 0 { + fmt.Fprintf(cli.Out(), "\nList of signers and their keys for %s\n\n", remote) + if err := printSignerInfo(cli.Out(), signerRoleToKeyIDs); err != nil { + return err + } + } + + // This will always have the root and targets information + fmt.Fprintf(cli.Out(), "\nAdministrative keys for %s\n\n", remote) + printSortedAdminKeys(cli.Out(), adminRolesWithSigs) + return nil +} + +func printSortedAdminKeys(out io.Writer, adminRoles []client.RoleWithSignatures) { + sort.Slice(adminRoles, func(i, j int) bool { return adminRoles[i].Name > adminRoles[j].Name }) + for _, adminRole := range adminRoles { + if formattedAdminRole := formatAdminRole(adminRole); formattedAdminRole != "" { + fmt.Fprintf(out, " %s", formattedAdminRole) + } + } +} + +// pretty print with ordered rows +func printSignatures(out io.Writer, signatureRows []trustTagRow) error { + trustTagCtx := formatter.Context{ + Output: out, + Format: NewTrustTagFormat(), + } + // convert the formatted type before printing + formattedTags := []SignedTagInfo{} + for _, sigRow := range signatureRows { + formattedSigners := sigRow.Signers + if len(formattedSigners) == 0 { + formattedSigners = append(formattedSigners, fmt.Sprintf("(%s)", releasedRoleName)) + } + formattedTags = append(formattedTags, SignedTagInfo{ + Name: sigRow.SignedTag, + Digest: sigRow.Digest, + Signers: formattedSigners, + }) + } + return TagWrite(trustTagCtx, formattedTags) +} + +func printSignerInfo(out io.Writer, roleToKeyIDs map[string][]string) error { + signerInfoCtx := formatter.Context{ + Output: out, + Format: NewSignerInfoFormat(), + Trunc: true, + } + formattedSignerInfo := []SignerInfo{} + for name, keyIDs := range roleToKeyIDs { + formattedSignerInfo = append(formattedSignerInfo, SignerInfo{ + Name: name, + Keys: keyIDs, + }) + } + sort.Slice(formattedSignerInfo, func(i, j int) bool { + return sortorder.NaturalLess(formattedSignerInfo[i].Name, formattedSignerInfo[j].Name) + }) + return SignerInfoWrite(signerInfoCtx, formattedSignerInfo) +} diff --git a/cli/cli/command/trust/inspect_pretty_test.go b/cli/cli/command/trust/inspect_pretty_test.go new file mode 100644 index 00000000..13d436d1 --- /dev/null +++ b/cli/cli/command/trust/inspect_pretty_test.go @@ -0,0 +1,476 @@ +package trust + +import ( + "bytes" + "context" + "encoding/hex" + "io" + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/docker/docker/api/types" + dockerClient "github.com/docker/docker/client" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/utils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/golden" +) + +// TODO(n4ss): remove common tests with the regular inspect command + +type fakeClient struct { + dockerClient.Client +} + +func (c *fakeClient) Info(ctx context.Context) (types.Info, error) { + return types.Info{}, nil +} + +func (c *fakeClient) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + return types.ImageInspect{}, []byte{}, nil +} + +func (c *fakeClient) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + return &utils.NoopCloser{Reader: bytes.NewBuffer([]byte{})}, nil +} + +func TestTrustInspectPrettyCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires at least 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE"}, + expectedError: "invalid reference format", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + cmd.Flags().Set("pretty", "true") + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustInspectPrettyCommandOfflineErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"nonexistent-reg-name.io/image"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access nonexistent-reg-name.io/image") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd = newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"nonexistent-reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access nonexistent-reg-name.io/image") +} + +func TestTrustInspectPrettyCommandUninitializedErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetUninitializedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/unsigned-img"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access reg/unsigned-img") + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetUninitializedNotaryRepository) + cmd = newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/unsigned-img:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "No signatures or cannot access reg/unsigned-img:tag") +} + +func TestTrustInspectPrettyCommandEmptyNotaryRepoErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/img:unsigned-tag"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "No signatures for reg/img:unsigned-tag")) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Administrative keys for reg/img")) + + cli = test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd = newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"reg/img"}) + cmd.SetOutput(ioutil.Discard) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "No signatures for reg/img")) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Administrative keys for reg/img")) +} + +func TestTrustInspectPrettyCommandFullRepoWithoutSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedWithNoSignersNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-full-repo-no-signers.golden") +} + +func TestTrustInspectPrettyCommandOneTagWithoutSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedWithNoSignersNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo:green"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-one-tag-no-signers.golden") +} + +func TestTrustInspectPrettyCommandFullRepoWithSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-full-repo-with-signers.golden") +} + +func TestTrustInspectPrettyCommandUnsignedTagInSignedRepo(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + cmd := newInspectCommand(cli) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs([]string{"signed-repo:unsigned"}) + assert.NilError(t, cmd.Execute()) + + golden.Assert(t, cli.OutBuffer().String(), "trust-inspect-pretty-unsigned-tag-with-signers.golden") +} + +func TestNotaryRoleToSigner(t *testing.T) { + assert.Check(t, is.Equal(releasedRoleName, notaryRoleToSigner(data.CanonicalTargetsRole))) + assert.Check(t, is.Equal(releasedRoleName, notaryRoleToSigner(trust.ReleasesRole))) + assert.Check(t, is.Equal("signer", notaryRoleToSigner("targets/signer"))) + assert.Check(t, is.Equal("docker/signer", notaryRoleToSigner("targets/docker/signer"))) + + // It's nonsense for other base roles to have signed off on a target, but this function leaves role names intact + for _, role := range data.BaseRoles { + if role == data.CanonicalTargetsRole { + continue + } + assert.Check(t, is.Equal(role.String(), notaryRoleToSigner(role))) + } + assert.Check(t, is.Equal("notarole", notaryRoleToSigner(data.RoleName("notarole")))) +} + +// check if a role name is "released": either targets/releases or targets TUF roles +func TestIsReleasedTarget(t *testing.T) { + assert.Check(t, isReleasedTarget(trust.ReleasesRole)) + for _, role := range data.BaseRoles { + assert.Check(t, is.Equal(role == data.CanonicalTargetsRole, isReleasedTarget(role))) + } + assert.Check(t, !isReleasedTarget(data.RoleName("targets/not-releases"))) + assert.Check(t, !isReleasedTarget(data.RoleName("random"))) + assert.Check(t, !isReleasedTarget(data.RoleName("targets/releases/subrole"))) +} + +// creates a mock delegation with a given name and no keys +func mockDelegationRoleWithName(name string) data.DelegationRole { + baseRole := data.NewBaseRole( + data.RoleName(name), + notary.MinThreshold, + ) + return data.DelegationRole{BaseRole: baseRole, Paths: []string{}} +} + +func TestMatchEmptySignatures(t *testing.T) { + // first try empty targets + emptyTgts := []client.TargetSignedStruct{} + + matchedSigRows := matchReleasedSignatures(emptyTgts) + assert.Check(t, is.Len(matchedSigRows, 0)) +} + +func TestMatchUnreleasedSignatures(t *testing.T) { + // try an "unreleased" target with 3 signatures, 0 rows will appear + unreleasedTgts := []client.TargetSignedStruct{} + + tgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + unreleasedTgts = append(unreleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: tgt}) + } + + matchedSigRows := matchReleasedSignatures(unreleasedTgts) + assert.Check(t, is.Len(matchedSigRows, 0)) +} + +func TestMatchOneReleasedSingleSignature(t *testing.T) { + // now try only 1 "released" target with no additional sigs, 1 row will appear with 0 signers + oneReleasedTgt := []client.TargetSignedStruct{} + + // make and append the "released" target to our mock input + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: releasedTgt}) + + // make and append 3 non-released signatures on the "unreleased" target + unreleasedTgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: unreleasedTgt}) + } + + matchedSigRows := matchReleasedSignatures(oneReleasedTgt) + assert.Check(t, is.Len(matchedSigRows, 1)) + + outputRow := matchedSigRows[0] + // Empty signers because "targets/releases" doesn't show up + assert.Check(t, is.Len(outputRow.Signers, 0)) + assert.Check(t, is.Equal(releasedTgt.Name, outputRow.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(releasedTgt.Hashes[notary.SHA256]), outputRow.Digest)) +} + +func TestMatchOneReleasedMultiSignature(t *testing.T) { + // now try only 1 "released" target with 3 additional sigs, 1 row will appear with 3 signers + oneReleasedTgt := []client.TargetSignedStruct{} + + // make and append the "released" target to our mock input + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: releasedTgt}) + + // make and append 3 non-released signatures on both the "released" and "unreleased" targets + unreleasedTgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: unreleasedTgt}) + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: releasedTgt}) + } + + matchedSigRows := matchReleasedSignatures(oneReleasedTgt) + assert.Check(t, is.Len(matchedSigRows, 1)) + + outputRow := matchedSigRows[0] + // We should have three signers + assert.Check(t, is.DeepEqual(outputRow.Signers, []string{"a", "b", "c"})) + assert.Check(t, is.Equal(releasedTgt.Name, outputRow.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(releasedTgt.Hashes[notary.SHA256]), outputRow.Digest)) +} + +func TestMatchMultiReleasedMultiSignature(t *testing.T) { + // now try 3 "released" targets with additional sigs to show 3 rows as follows: + // target-a is signed by targets/releases and targets/a - a will be the signer + // target-b is signed by targets/releases, targets/a, targets/b - a and b will be the signers + // target-c is signed by targets/releases, targets/a, targets/b, targets/c - a, b, and c will be the signers + multiReleasedTgts := []client.TargetSignedStruct{} + // make target-a, target-b, and target-c + targetA := client.Target{Name: "target-a", Hashes: data.Hashes{notary.SHA256: []byte("target-a-hash")}} + targetB := client.Target{Name: "target-b", Hashes: data.Hashes{notary.SHA256: []byte("target-b-hash")}} + targetC := client.Target{Name: "target-c", Hashes: data.Hashes{notary.SHA256: []byte("target-c-hash")}} + + // have targets/releases "sign" on all of these targets so they are released + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: targetA}) + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: targetB}) + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: targetC}) + + // targets/a signs off on all three targets (target-a, target-b, target-c): + for _, tgt := range []client.Target{targetA, targetB, targetC} { + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/a"), Target: tgt}) + } + + // targets/b signs off on the final two targets (target-b, target-c): + for _, tgt := range []client.Target{targetB, targetC} { + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/b"), Target: tgt}) + } + + // targets/c only signs off on the last target (target-c): + multiReleasedTgts = append(multiReleasedTgts, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/c"), Target: targetC}) + + matchedSigRows := matchReleasedSignatures(multiReleasedTgts) + assert.Check(t, is.Len(matchedSigRows, 3)) + + // note that the output is sorted by tag name, so we can reliably index to validate data: + outputTargetA := matchedSigRows[0] + assert.Check(t, is.DeepEqual(outputTargetA.Signers, []string{"a"})) + assert.Check(t, is.Equal(targetA.Name, outputTargetA.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(targetA.Hashes[notary.SHA256]), outputTargetA.Digest)) + + outputTargetB := matchedSigRows[1] + assert.Check(t, is.DeepEqual(outputTargetB.Signers, []string{"a", "b"})) + assert.Check(t, is.Equal(targetB.Name, outputTargetB.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(targetB.Hashes[notary.SHA256]), outputTargetB.Digest)) + + outputTargetC := matchedSigRows[2] + assert.Check(t, is.DeepEqual(outputTargetC.Signers, []string{"a", "b", "c"})) + assert.Check(t, is.Equal(targetC.Name, outputTargetC.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(targetC.Hashes[notary.SHA256]), outputTargetC.Digest)) +} + +func TestMatchReleasedSignatureFromTargets(t *testing.T) { + // now try only 1 "released" target with no additional sigs, one rows will appear + oneReleasedTgt := []client.TargetSignedStruct{} + // make and append the "released" target to our mock input + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(data.CanonicalTargetsRole.String()), Target: releasedTgt}) + matchedSigRows := matchReleasedSignatures(oneReleasedTgt) + assert.Check(t, is.Len(matchedSigRows, 1)) + outputRow := matchedSigRows[0] + // Empty signers because "targets" doesn't show up + assert.Check(t, is.Len(outputRow.Signers, 0)) + assert.Check(t, is.Equal(releasedTgt.Name, outputRow.SignedTag)) + assert.Check(t, is.Equal(hex.EncodeToString(releasedTgt.Hashes[notary.SHA256]), outputRow.Digest)) +} + +func TestGetSignerRolesWithKeyIDs(t *testing.T) { + roles := []data.Role{ + { + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: "targets/alice", + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key21", "key22"}, + }, + Name: "targets/releases", + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key31"}, + }, + Name: data.CanonicalTargetsRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key41", "key01"}, + }, + Name: data.CanonicalRootRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key51"}, + }, + Name: data.CanonicalSnapshotRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key61"}, + }, + Name: data.CanonicalTimestampRole, + }, + { + RootRole: data.RootRole{ + KeyIDs: []string{"key71", "key72"}, + }, + Name: "targets/bob", + }, + } + expectedSignerRoleToKeyIDs := map[string][]string{ + "alice": {"key11"}, + "bob": {"key71", "key72"}, + } + + var roleWithSigs []client.RoleWithSignatures + for _, role := range roles { + roleWithSig := client.RoleWithSignatures{Role: role, Signatures: nil} + roleWithSigs = append(roleWithSigs, roleWithSig) + } + signerRoleToKeyIDs := getDelegationRoleToKeyMap(roles) + assert.Check(t, is.DeepEqual(expectedSignerRoleToKeyIDs, signerRoleToKeyIDs)) +} + +func TestFormatAdminRole(t *testing.T) { + aliceRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: "targets/alice", + } + aliceRoleWithSigs := client.RoleWithSignatures{Role: aliceRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(aliceRoleWithSigs))) + + releasesRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: "targets/releases", + } + releasesRoleWithSigs := client.RoleWithSignatures{Role: releasesRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(releasesRoleWithSigs))) + + timestampRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: data.CanonicalTimestampRole, + } + timestampRoleWithSigs := client.RoleWithSignatures{Role: timestampRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(timestampRoleWithSigs))) + + snapshotRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: data.CanonicalSnapshotRole, + } + snapshotRoleWithSigs := client.RoleWithSignatures{Role: snapshotRole, Signatures: nil} + assert.Check(t, is.Equal("", formatAdminRole(snapshotRoleWithSigs))) + + rootRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key11"}, + }, + Name: data.CanonicalRootRole, + } + rootRoleWithSigs := client.RoleWithSignatures{Role: rootRole, Signatures: nil} + assert.Check(t, is.Equal("Root Key:\tkey11\n", formatAdminRole(rootRoleWithSigs))) + + targetsRole := data.Role{ + RootRole: data.RootRole{ + KeyIDs: []string{"key99", "abc", "key11"}, + }, + Name: data.CanonicalTargetsRole, + } + targetsRoleWithSigs := client.RoleWithSignatures{Role: targetsRole, Signatures: nil} + assert.Check(t, is.Equal("Repository Key:\tabc, key11, key99\n", formatAdminRole(targetsRoleWithSigs))) +} + +func TestPrintSignerInfoSortOrder(t *testing.T) { + roleToKeyIDs := map[string][]string{ + "signer2-foo": {"B"}, + "signer10-foo": {"C"}, + "signer1-foo": {"A"}, + } + + expected := `SIGNER KEYS +signer1-foo A +signer2-foo B +signer10-foo C +` + buf := new(bytes.Buffer) + assert.NilError(t, printSignerInfo(buf, roleToKeyIDs)) + assert.Check(t, is.Equal(expected, buf.String())) +} diff --git a/cli/cli/command/trust/inspect_test.go b/cli/cli/command/trust/inspect_test.go new file mode 100644 index 00000000..4ab89b65 --- /dev/null +++ b/cli/cli/command/trust/inspect_test.go @@ -0,0 +1,152 @@ +package trust + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary/client" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestTrustInspectCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires at least 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE"}, + expectedError: "invalid reference format", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{})) + cmd.Flags().Set("pretty", "true") + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustInspectCommandRepositoryErrors(t *testing.T) { + testCases := []struct { + doc string + args []string + notaryRepository func(trust.ImageRefAndAuth, []string) (client.Repository, error) + err string + golden string + }{ + { + doc: "OfflineErrors", + args: []string{"nonexistent-reg-name.io/image"}, + notaryRepository: notary.GetOfflineNotaryRepository, + err: "No signatures or cannot access nonexistent-reg-name.io/image", + }, + { + doc: "OfflineErrorsWithImageTag", + args: []string{"nonexistent-reg-name.io/image:tag"}, + notaryRepository: notary.GetOfflineNotaryRepository, + err: "No signatures or cannot access nonexistent-reg-name.io/image:tag", + }, + { + doc: "UninitializedErrors", + args: []string{"reg/unsigned-img"}, + notaryRepository: notary.GetUninitializedNotaryRepository, + err: "No signatures or cannot access reg/unsigned-img", + golden: "trust-inspect-uninitialized.golden", + }, + { + doc: "UninitializedErrorsWithImageTag", + args: []string{"reg/unsigned-img:tag"}, + notaryRepository: notary.GetUninitializedNotaryRepository, + err: "No signatures or cannot access reg/unsigned-img:tag", + golden: "trust-inspect-uninitialized.golden", + }, + } + + for _, tc := range testCases { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(tc.notaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.err) + if tc.golden != "" { + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + } + }) + } +} + +func TestTrustInspectCommand(t *testing.T) { + testCases := []struct { + doc string + args []string + notaryRepository func(trust.ImageRefAndAuth, []string) (client.Repository, error) + golden string + }{ + { + doc: "EmptyNotaryRepo", + args: []string{"reg/img:unsigned-tag"}, + notaryRepository: notary.GetEmptyTargetsNotaryRepository, + golden: "trust-inspect-empty-repo.golden", + }, + { + doc: "FullRepoWithoutSigners", + args: []string{"signed-repo"}, + notaryRepository: notary.GetLoadedWithNoSignersNotaryRepository, + golden: "trust-inspect-full-repo-no-signers.golden", + }, + { + doc: "OneTagWithoutSigners", + args: []string{"signed-repo:green"}, + notaryRepository: notary.GetLoadedWithNoSignersNotaryRepository, + golden: "trust-inspect-one-tag-no-signers.golden", + }, + { + doc: "FullRepoWithSigners", + args: []string{"signed-repo"}, + notaryRepository: notary.GetLoadedNotaryRepository, + golden: "trust-inspect-full-repo-with-signers.golden", + }, + { + doc: "MultipleFullReposWithSigners", + args: []string{"signed-repo", "signed-repo"}, + notaryRepository: notary.GetLoadedNotaryRepository, + golden: "trust-inspect-multiple-repos-with-signers.golden", + }, + { + doc: "UnsignedTagInSignedRepo", + args: []string{"signed-repo:unsigned"}, + notaryRepository: notary.GetLoadedNotaryRepository, + golden: "trust-inspect-unsigned-tag-with-signers.golden", + }, + } + + for _, tc := range testCases { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(tc.notaryRepository) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), tc.golden) + }) + } +} diff --git a/cli/cli/command/trust/key.go b/cli/cli/command/trust/key.go new file mode 100644 index 00000000..f57b44c7 --- /dev/null +++ b/cli/cli/command/trust/key.go @@ -0,0 +1,22 @@ +package trust + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// newTrustKeyCommand returns a cobra command for `trust key` subcommands +func newTrustKeyCommand(dockerCli command.Streams) *cobra.Command { + cmd := &cobra.Command{ + Use: "key", + Short: "Manage keys for signing Docker images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newKeyGenerateCommand(dockerCli), + newKeyLoadCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/trust/key_generate.go b/cli/cli/command/trust/key_generate.go new file mode 100644 index 00000000..47223c3d --- /dev/null +++ b/cli/cli/command/trust/key_generate.go @@ -0,0 +1,134 @@ +package trust + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/tuf/data" + tufutils "github.com/theupdateframework/notary/tuf/utils" +) + +type keyGenerateOptions struct { + name string + directory string +} + +func newKeyGenerateCommand(dockerCli command.Streams) *cobra.Command { + options := keyGenerateOptions{} + cmd := &cobra.Command{ + Use: "generate NAME", + Short: "Generate and load a signing key-pair", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + return setupPassphraseAndGenerateKeys(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.StringVar(&options.directory, "dir", "", "Directory to generate key in, defaults to current directory") + return cmd +} + +// key names can use lowercase alphanumeric + _ + - characters +var validKeyName = regexp.MustCompile(`^[a-z0-9][a-z0-9\_\-]*$`).MatchString + +// validate that all of the key names are unique and are alphanumeric + _ + - +// and that we do not already have public key files in the target dir on disk +func validateKeyArgs(keyName string, targetDir string) error { + if !validKeyName(keyName) { + return fmt.Errorf("key name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", keyName) + } + + pubKeyFileName := keyName + ".pub" + if _, err := os.Stat(targetDir); err != nil { + return fmt.Errorf("public key path does not exist: \"%s\"", targetDir) + } + targetPath := filepath.Join(targetDir, pubKeyFileName) + if _, err := os.Stat(targetPath); err == nil { + return fmt.Errorf("public key file already exists: \"%s\"", targetPath) + } + return nil +} + +func setupPassphraseAndGenerateKeys(streams command.Streams, opts keyGenerateOptions) error { + targetDir := opts.directory + if targetDir == "" { + cwd, err := os.Getwd() + if err != nil { + return err + } + targetDir = cwd + } + return validateAndGenerateKey(streams, opts.name, targetDir) +} + +func validateAndGenerateKey(streams command.Streams, keyName string, workingDir string) error { + freshPassRetGetter := func() notary.PassRetriever { return trust.GetPassphraseRetriever(streams.In(), streams.Out()) } + if err := validateKeyArgs(keyName, workingDir); err != nil { + return err + } + fmt.Fprintf(streams.Out(), "Generating key for %s...\n", keyName) + // Automatically load the private key to local storage for use + privKeyFileStore, err := trustmanager.NewKeyFileStore(trust.GetTrustDirectory(), freshPassRetGetter()) + if err != nil { + return err + } + + pubPEM, err := generateKeyAndOutputPubPEM(keyName, privKeyFileStore) + if err != nil { + fmt.Fprintf(streams.Out(), err.Error()) + return errors.Wrapf(err, "failed to generate key for %s", keyName) + } + + // Output the public key to a file in the CWD or specified dir + writtenPubFile, err := writePubKeyPEMToDir(pubPEM, keyName, workingDir) + if err != nil { + return err + } + fmt.Fprintf(streams.Out(), "Successfully generated and loaded private key. Corresponding public key available: %s\n", writtenPubFile) + + return nil +} + +func generateKeyAndOutputPubPEM(keyName string, privKeyStore trustmanager.KeyStore) (pem.Block, error) { + privKey, err := tufutils.GenerateKey(data.ECDSAKey) + if err != nil { + return pem.Block{}, err + } + + privKeyStore.AddKey(trustmanager.KeyInfo{Role: data.RoleName(keyName)}, privKey) + if err != nil { + return pem.Block{}, err + } + + pubKey := data.PublicKeyFromPrivate(privKey) + return pem.Block{ + Type: "PUBLIC KEY", + Headers: map[string]string{ + "role": keyName, + }, + Bytes: pubKey.Public(), + }, nil +} + +func writePubKeyPEMToDir(pubPEM pem.Block, keyName, workingDir string) (string, error) { + // Output the public key to a file in the CWD or specified dir + pubFileName := strings.Join([]string{keyName, "pub"}, ".") + pubFilePath := filepath.Join(workingDir, pubFileName) + if err := ioutil.WriteFile(pubFilePath, pem.EncodeToMemory(&pubPEM), notary.PrivNoExecPerms); err != nil { + return "", errors.Wrapf(err, "failed to write public key to %s", pubFilePath) + } + return pubFilePath, nil +} diff --git a/cli/cli/command/trust/key_generate_test.go b/cli/cli/command/trust/key_generate_test.go new file mode 100644 index 00000000..b4c798d8 --- /dev/null +++ b/cli/cli/command/trust/key_generate_test.go @@ -0,0 +1,134 @@ +package trust + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/internal/test" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustmanager" + tufutils "github.com/theupdateframework/notary/tuf/utils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustKeyGenerateErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"key-1", "key-2"}, + expectedError: "requires exactly 1 argument", + }, + } + + tmpDir, err := ioutil.TempDir("", "docker-key-generate-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newKeyGenerateCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestGenerateKeySuccess(t *testing.T) { + pubKeyCWD, err := ioutil.TempDir("", "pub-keys-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyCWD) + + privKeyStorageDir, err := ioutil.TempDir("", "priv-keys-") + assert.NilError(t, err) + defer os.RemoveAll(privKeyStorageDir) + + passwd := "password" + cannedPasswordRetriever := passphrase.ConstantRetriever(passwd) + // generate a single key + keyName := "alice" + privKeyFileStore, err := trustmanager.NewKeyFileStore(privKeyStorageDir, cannedPasswordRetriever) + assert.NilError(t, err) + + pubKeyPEM, err := generateKeyAndOutputPubPEM(keyName, privKeyFileStore) + assert.NilError(t, err) + + assert.Check(t, is.Equal(keyName, pubKeyPEM.Headers["role"])) + // the default GUN is empty + assert.Check(t, is.Equal("", pubKeyPEM.Headers["gun"])) + // assert public key header + assert.Check(t, is.Equal("PUBLIC KEY", pubKeyPEM.Type)) + + // check that an appropriate ~//private/.key file exists + expectedPrivKeyDir := filepath.Join(privKeyStorageDir, notary.PrivDir) + _, err = os.Stat(expectedPrivKeyDir) + assert.NilError(t, err) + + keyFiles, err := ioutil.ReadDir(expectedPrivKeyDir) + assert.NilError(t, err) + assert.Check(t, is.Len(keyFiles, 1)) + privKeyFilePath := filepath.Join(expectedPrivKeyDir, keyFiles[0].Name()) + + // verify the key content + privFrom, _ := os.OpenFile(privKeyFilePath, os.O_RDONLY, notary.PrivExecPerms) + defer privFrom.Close() + fromBytes, _ := ioutil.ReadAll(privFrom) + privKeyPEM, _ := pem.Decode(fromBytes) + assert.Check(t, is.Equal(keyName, privKeyPEM.Headers["role"])) + // the default GUN is empty + assert.Check(t, is.Equal("", privKeyPEM.Headers["gun"])) + // assert encrypted header + assert.Check(t, is.Equal("ENCRYPTED PRIVATE KEY", privKeyPEM.Type)) + // check that the passphrase matches + _, err = tufutils.ParsePKCS8ToTufKey(privKeyPEM.Bytes, []byte(passwd)) + assert.NilError(t, err) + + // check that the public key exists at the correct path if we use the helper: + returnedPath, err := writePubKeyPEMToDir(pubKeyPEM, keyName, pubKeyCWD) + assert.NilError(t, err) + expectedPubKeyPath := filepath.Join(pubKeyCWD, keyName+".pub") + assert.Check(t, is.Equal(returnedPath, expectedPubKeyPath)) + _, err = os.Stat(expectedPubKeyPath) + assert.NilError(t, err) + // check that the public key is the only file output in CWD + cwdKeyFiles, err := ioutil.ReadDir(pubKeyCWD) + assert.NilError(t, err) + assert.Check(t, is.Len(cwdKeyFiles, 1)) +} + +func TestValidateKeyArgs(t *testing.T) { + pubKeyCWD, err := ioutil.TempDir("", "pub-keys-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyCWD) + + err = validateKeyArgs("a", pubKeyCWD) + assert.NilError(t, err) + + err = validateKeyArgs("a/b", pubKeyCWD) + assert.Error(t, err, "key name \"a/b\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character") + + err = validateKeyArgs("-", pubKeyCWD) + assert.Error(t, err, "key name \"-\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character") + + assert.NilError(t, ioutil.WriteFile(filepath.Join(pubKeyCWD, "a.pub"), []byte("abc"), notary.PrivExecPerms)) + err = validateKeyArgs("a", pubKeyCWD) + assert.Error(t, err, fmt.Sprintf("public key file already exists: \"%s\"", filepath.Join(pubKeyCWD, "a.pub"))) + + err = validateKeyArgs("a", "/random/dir/") + assert.Error(t, err, "public key path does not exist: \"/random/dir/\"") +} diff --git a/cli/cli/command/trust/key_load.go b/cli/cli/command/trust/key_load.go new file mode 100644 index 00000000..3b2c04ba --- /dev/null +++ b/cli/cli/command/trust/key_load.go @@ -0,0 +1,118 @@ +package trust + +import ( + "bytes" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustmanager" + tufutils "github.com/theupdateframework/notary/tuf/utils" +) + +const ( + nonOwnerReadWriteMask = 0077 +) + +type keyLoadOptions struct { + keyName string +} + +func newKeyLoadCommand(dockerCli command.Streams) *cobra.Command { + var options keyLoadOptions + cmd := &cobra.Command{ + Use: "load [OPTIONS] KEYFILE", + Short: "Load a private key file for signing", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return loadPrivKey(dockerCli, args[0], options) + }, + } + flags := cmd.Flags() + flags.StringVar(&options.keyName, "name", "signer", "Name for the loaded key") + return cmd +} + +func loadPrivKey(streams command.Streams, keyPath string, options keyLoadOptions) error { + // validate the key name if provided + if options.keyName != "" && !validKeyName(options.keyName) { + return fmt.Errorf("key name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", options.keyName) + } + trustDir := trust.GetTrustDirectory() + keyFileStore, err := storage.NewPrivateKeyFileStorage(trustDir, notary.KeyExtension) + if err != nil { + return err + } + privKeyImporters := []trustmanager.Importer{keyFileStore} + + fmt.Fprintf(streams.Out(), "Loading key from \"%s\"...\n", keyPath) + + // Always use a fresh passphrase retriever for each import + passRet := trust.GetPassphraseRetriever(streams.In(), streams.Out()) + keyBytes, err := getPrivKeyBytesFromPath(keyPath) + if err != nil { + return errors.Wrapf(err, "refusing to load key from %s", keyPath) + } + if err := loadPrivKeyBytesToStore(keyBytes, privKeyImporters, keyPath, options.keyName, passRet); err != nil { + return errors.Wrapf(err, "error importing key from %s", keyPath) + } + fmt.Fprintf(streams.Out(), "Successfully imported key from %s\n", keyPath) + return nil +} + +func getPrivKeyBytesFromPath(keyPath string) ([]byte, error) { + if runtime.GOOS != "windows" { + fileInfo, err := os.Stat(keyPath) + if err != nil { + return nil, err + } + if fileInfo.Mode()&nonOwnerReadWriteMask != 0 { + return nil, fmt.Errorf("private key file %s must not be readable or writable by others", keyPath) + } + } + + from, err := os.OpenFile(keyPath, os.O_RDONLY, notary.PrivExecPerms) + if err != nil { + return nil, err + } + defer from.Close() + + return ioutil.ReadAll(from) +} + +func loadPrivKeyBytesToStore(privKeyBytes []byte, privKeyImporters []trustmanager.Importer, keyPath, keyName string, passRet notary.PassRetriever) error { + var err error + if _, _, err = tufutils.ExtractPrivateKeyAttributes(privKeyBytes); err != nil { + return fmt.Errorf("provided file %s is not a supported private key - to add a signer's public key use docker trust signer add", keyPath) + } + if privKeyBytes, err = decodePrivKeyIfNecessary(privKeyBytes, passRet); err != nil { + return errors.Wrapf(err, "cannot load key from provided file %s", keyPath) + } + // Make a reader, rewind the file pointer + return trustmanager.ImportKeys(bytes.NewReader(privKeyBytes), privKeyImporters, keyName, "", passRet) +} + +func decodePrivKeyIfNecessary(privPemBytes []byte, passRet notary.PassRetriever) ([]byte, error) { + pemBlock, _ := pem.Decode(privPemBytes) + _, containsDEKInfo := pemBlock.Headers["DEK-Info"] + if containsDEKInfo || pemBlock.Type == "ENCRYPTED PRIVATE KEY" { + // if we do not have enough information to properly import, try to decrypt the key + if _, ok := pemBlock.Headers["path"]; !ok { + privKey, _, err := trustmanager.GetPasswdDecryptBytes(passRet, privPemBytes, "", "encrypted") + if err != nil { + return []byte{}, fmt.Errorf("could not decrypt key") + } + privPemBytes = privKey.Private() + } + } + return privPemBytes, nil +} diff --git a/cli/cli/command/trust/key_load_test.go b/cli/cli/command/trust/key_load_test.go new file mode 100644 index 00000000..e0e35aab --- /dev/null +++ b/cli/cli/command/trust/key_load_test.go @@ -0,0 +1,253 @@ +package trust + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/internal/test" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustmanager" + tufutils "github.com/theupdateframework/notary/tuf/utils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestTrustKeyLoadErrors(t *testing.T) { + noSuchFile := "stat iamnotakey: no such file or directory" + if runtime.GOOS == "windows" { + noSuchFile = "CreateFile iamnotakey: The system cannot find the file specified." + } + testCases := []struct { + name string + args []string + expectedError string + expectedOutput string + }{ + { + name: "not-enough-args", + expectedError: "exactly 1 argument", + expectedOutput: "", + }, + { + name: "too-many-args", + args: []string{"iamnotakey", "alsonotakey"}, + expectedError: "exactly 1 argument", + expectedOutput: "", + }, + { + name: "not-a-key", + args: []string{"iamnotakey"}, + expectedError: "refusing to load key from iamnotakey: " + noSuchFile, + expectedOutput: "Loading key from \"iamnotakey\"...\n", + }, + { + name: "bad-key-name", + args: []string{"iamnotakey", "--name", "KEYNAME"}, + expectedError: "key name \"KEYNAME\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + expectedOutput: "", + }, + } + tmpDir, err := ioutil.TempDir("", "docker-key-load-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cmd := newKeyLoadCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + assert.Check(t, is.Contains(cli.OutBuffer().String(), tc.expectedOutput)) + } +} + +var rsaPrivKeyFixture = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAs7yVMzCw8CBZPoN+QLdx3ZzbVaHnouHIKu+ynX60IZ3stpbb +6rowu78OWON252JcYJqe++2GmdIgbBhg+mZDwhX0ZibMVztJaZFsYL+Ch/2J9KqD +A5NtE1s/XdhYoX5hsv7W4ok9jLFXRYIMj+T4exJRlR4f4GP9p0fcqPWd9/enPnlJ +JFTmu0DXJTZUMVS1UrXUy5t/DPXdrwyl8pM7VCqO3bqK7jqE6mWawdTkEeiku1fJ +ydP0285uiYTbj1Q38VVhPwXzMuLbkaUgRJhCI4BcjfQIjtJLbWpS+VdhUEvtgMVx +XJMKxCVGG69qjXyj9TjI7pxanb/bWglhovJN9wIDAQABAoIBAQCSnMsLxbUfOxPx +RWuwOLN+NZxIvtfnastQEtSdWiRvo5Xa3zYmw5hLHa8DXRC57+cwug/jqr54LQpb +gotg1hiBck05In7ezTK2FXTVeoJskal91bUnLpP0DSOkVnz9xszFKNF6Wr7FTEfH +IC1FF16Fbcz0mW0hKg9X6+uYOzqPcKpQRwli5LAwhT18Alf9h4/3NCeKotiJyr2J +xvcEH1eY2m2c/jQZurBkys7qBC3+i8LJEOW8MBQt7mxajwfbU91wtP2YoqMcoYiS +zsPbYp7Ui2t4G9Yn+OJw+uj4RGP1Bo4nSyRxWDtg+8Zug/JYU6/s+8kVRpiGffd3 +T1GvoxUhAoGBAOnPDWG/g1xlJf65Rh71CxMs638zhYbIloU2K4Rqr05DHe7GryTS +9hLVrwhHddK+KwfVbR8HFMPo1DC/NVbuKt8StTAadAu3HsC088gWd28nOiGAWuvH +Bo3x/DYQGYwGFfoo4rzCOgMj6DJjXmcWEXNv3NDMoXoYpkxa0g6zZDyHAoGBAMTL +t7EUneJT+Mm7wyL1I5bmaT/HFwqoUQB2ccBPVD8p1el62NgLdfhOa8iNlBVhMrlh +2aTjrMlSPcjr9sCgKrLcenSWw+2qFsf4+SmV01ntB9kWes2phXpnB0ynXIcbeG05 ++BLxbqDTVV0Iqh4r/dGeplyV2WyL3mTpkT3hRq8RAoGAZ93degEUICWnHWO9LN97 +Dge0joua0+ekRoVsC6VBP6k9UOfewqMdQfy/hxQH2Zk1kINVuKTyqp1yNj2bOoUP +co3jA/2cc9/jv4QjkE26vRxWDK/ytC90T/aiLno0fyns9XbYUzaNgvuemVPfijgZ +hIi7Nd7SFWWB6wWlr3YuH10CgYEAwh7JVa2mh8iZEjVaKTNyJbmmfDjgq6yYKkKr +ti0KRzv3O9Xn7ERx27tPaobtWaGFLYQt8g57NCMhuv23aw8Sz1fYmwTUw60Rx7P5 +42FdF8lOAn/AJvpfJfxXIO+9v7ADPIr//3+TxqRwAdM4K4btWkaKh61wyTe26gfT +MxzyYmECgYAnlU5zsGyiZqwoXVktkhtZrE7Qu0SoztzFb8KpvFNmMTPF1kAAYmJY +GIhbizeGJ3h4cUdozKmt8ZWIt6uFDEYCqEA7XF4RH75dW25x86mpIPO7iRl9eisY +IsLeMYqTIwXAwGx6Ka9v5LOL1kzcHQ2iVj6+QX+yoptSft1dYa9jOA== +-----END RSA PRIVATE KEY-----`) + +const rsaPrivKeyID = "ee69e8e07a14756ad5ff0aca2336b37f86b0ac1710d1f3e94440081e080aecd7" + +var ecPrivKeyFixture = []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINfxKtDH3ug7ZIQPDyeAzujCdhw36D+bf9ToPE1A7YEyoAoGCCqGSM49 +AwEHoUQDQgAEUIH9AYtrcDFzZrFJBdJZkn21d+4cH3nzy2O6Q/ct4BjOBKa+WCdR +tPo78bA+C/7t81ADQO8Jqaj59W50rwoqDQ== +-----END EC PRIVATE KEY-----`) + +const ecPrivKeyID = "46157cb0becf9c72c3219e11d4692424fef9bf4460812ccc8a71a3dfcafc7e60" + +var testKeys = map[string][]byte{ + ecPrivKeyID: ecPrivKeyFixture, + rsaPrivKeyID: rsaPrivKeyFixture, +} + +func TestLoadKeyFromPath(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + for keyID, keyBytes := range testKeys { + t.Run(fmt.Sprintf("load-key-id-%s-from-path", keyID), func(t *testing.T) { + testLoadKeyFromPath(t, keyID, keyBytes) + }) + } +} + +func testLoadKeyFromPath(t *testing.T, privKeyID string, privKeyFixture []byte) { + privKeyDir, err := ioutil.TempDir("", "key-load-test-") + assert.NilError(t, err) + defer os.RemoveAll(privKeyDir) + privKeyFilepath := filepath.Join(privKeyDir, "privkey.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, notary.PrivNoExecPerms)) + + keyStorageDir, err := ioutil.TempDir("", "loaded-keys-") + assert.NilError(t, err) + defer os.RemoveAll(keyStorageDir) + + passwd := "password" + cannedPasswordRetriever := passphrase.ConstantRetriever(passwd) + keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension) + assert.NilError(t, err) + privKeyImporters := []trustmanager.Importer{keyFileStore} + + // get the privKeyBytes + privKeyBytes, err := getPrivKeyBytesFromPath(privKeyFilepath) + assert.NilError(t, err) + + // import the key to our keyStorageDir + assert.Check(t, loadPrivKeyBytesToStore(privKeyBytes, privKeyImporters, privKeyFilepath, "signer-name", cannedPasswordRetriever)) + + // check that the appropriate ~//private/.key file exists + expectedImportKeyPath := filepath.Join(keyStorageDir, notary.PrivDir, privKeyID+"."+notary.KeyExtension) + _, err = os.Stat(expectedImportKeyPath) + assert.NilError(t, err) + + // verify the key content + from, _ := os.OpenFile(expectedImportKeyPath, os.O_RDONLY, notary.PrivExecPerms) + defer from.Close() + fromBytes, _ := ioutil.ReadAll(from) + keyPEM, _ := pem.Decode(fromBytes) + assert.Check(t, is.Equal("signer-name", keyPEM.Headers["role"])) + // the default GUN is empty + assert.Check(t, is.Equal("", keyPEM.Headers["gun"])) + // assert encrypted header + assert.Check(t, is.Equal("ENCRYPTED PRIVATE KEY", keyPEM.Type)) + + decryptedKey, err := tufutils.ParsePKCS8ToTufKey(keyPEM.Bytes, []byte(passwd)) + assert.NilError(t, err) + fixturePEM, _ := pem.Decode(privKeyFixture) + assert.Check(t, is.DeepEqual(fixturePEM.Bytes, decryptedKey.Private())) +} + +func TestLoadKeyTooPermissive(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + for keyID, keyBytes := range testKeys { + t.Run(fmt.Sprintf("load-key-id-%s-too-permissive", keyID), func(t *testing.T) { + testLoadKeyTooPermissive(t, keyBytes) + }) + } +} + +func testLoadKeyTooPermissive(t *testing.T, privKeyFixture []byte) { + privKeyDir, err := ioutil.TempDir("", "key-load-test-") + assert.NilError(t, err) + defer os.RemoveAll(privKeyDir) + privKeyFilepath := filepath.Join(privKeyDir, "privkey477.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0477)) + + keyStorageDir, err := ioutil.TempDir("", "loaded-keys-") + assert.NilError(t, err) + defer os.RemoveAll(keyStorageDir) + + // import the key to our keyStorageDir + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + expected := fmt.Sprintf("private key file %s must not be readable or writable by others", privKeyFilepath) + assert.Error(t, err, expected) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey667.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0677)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + expected = fmt.Sprintf("private key file %s must not be readable or writable by others", privKeyFilepath) + assert.Error(t, err, expected) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey777.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0777)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + expected = fmt.Sprintf("private key file %s must not be readable or writable by others", privKeyFilepath) + assert.Error(t, err, expected) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey400.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0400)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + assert.NilError(t, err) + + privKeyFilepath = filepath.Join(privKeyDir, "privkey600.pem") + assert.NilError(t, ioutil.WriteFile(privKeyFilepath, privKeyFixture, 0600)) + + _, err = getPrivKeyBytesFromPath(privKeyFilepath) + assert.NilError(t, err) +} + +var pubKeyFixture = []byte(`-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUIH9AYtrcDFzZrFJBdJZkn21d+4c +H3nzy2O6Q/ct4BjOBKa+WCdRtPo78bA+C/7t81ADQO8Jqaj59W50rwoqDQ== +-----END PUBLIC KEY-----`) + +func TestLoadPubKeyFailure(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + pubKeyDir, err := ioutil.TempDir("", "key-load-test-pubkey-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyDir) + pubKeyFilepath := filepath.Join(pubKeyDir, "pubkey.pem") + assert.NilError(t, ioutil.WriteFile(pubKeyFilepath, pubKeyFixture, notary.PrivNoExecPerms)) + keyStorageDir, err := ioutil.TempDir("", "loaded-keys-") + assert.NilError(t, err) + defer os.RemoveAll(keyStorageDir) + + passwd := "password" + cannedPasswordRetriever := passphrase.ConstantRetriever(passwd) + keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension) + assert.NilError(t, err) + privKeyImporters := []trustmanager.Importer{keyFileStore} + + pubKeyBytes, err := getPrivKeyBytesFromPath(pubKeyFilepath) + assert.NilError(t, err) + + // import the key to our keyStorageDir - it should fail + err = loadPrivKeyBytesToStore(pubKeyBytes, privKeyImporters, pubKeyFilepath, "signer-name", cannedPasswordRetriever) + expected := fmt.Sprintf("provided file %s is not a supported private key - to add a signer's public key use docker trust signer add", pubKeyFilepath) + assert.Error(t, err, expected) +} diff --git a/cli/cli/command/trust/revoke.go b/cli/cli/command/trust/revoke.go new file mode 100644 index 00000000..31437b03 --- /dev/null +++ b/cli/cli/command/trust/revoke.go @@ -0,0 +1,125 @@ +package trust + +import ( + "context" + "fmt" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type revokeOptions struct { + forceYes bool +} + +func newRevokeCommand(dockerCli command.Cli) *cobra.Command { + options := revokeOptions{} + cmd := &cobra.Command{ + Use: "revoke [OPTIONS] IMAGE[:TAG]", + Short: "Remove trust for an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return revokeTrust(dockerCli, args[0], options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.forceYes, "yes", "y", false, "Do not prompt for confirmation") + return cmd +} + +func revokeTrust(cli command.Cli, remote string, options revokeOptions) error { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), remote) + if err != nil { + return err + } + tag := imgRefAndAuth.Tag() + if imgRefAndAuth.Tag() == "" && imgRefAndAuth.Digest() != "" { + return fmt.Errorf("cannot use a digest reference for IMAGE:TAG") + } + if imgRefAndAuth.Tag() == "" && !options.forceYes { + deleteRemote := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("Please confirm you would like to delete all signature data for %s?", remote)) + if !deleteRemote { + fmt.Fprintf(cli.Out(), "\nAborting action.\n") + return nil + } + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return err + } + + if err = clearChangeList(notaryRepo); err != nil { + return err + } + defer clearChangeList(notaryRepo) + if err := revokeSignature(notaryRepo, tag); err != nil { + return errors.Wrapf(err, "could not remove signature for %s", remote) + } + fmt.Fprintf(cli.Out(), "Successfully deleted signature for %s\n", remote) + return nil +} + +func revokeSignature(notaryRepo client.Repository, tag string) error { + if tag != "" { + // Revoke signature for the specified tag + if err := revokeSingleSig(notaryRepo, tag); err != nil { + return err + } + } else { + // revoke all signatures for the image, as no tag was given + if err := revokeAllSigs(notaryRepo); err != nil { + return err + } + } + + // Publish change + return notaryRepo.Publish() +} + +func revokeSingleSig(notaryRepo client.Repository, tag string) error { + releasedTargetWithRole, err := notaryRepo.GetTargetByName(tag, trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return err + } + releasedTarget := releasedTargetWithRole.Target + return getSignableRolesForTargetAndRemove(releasedTarget, notaryRepo) +} + +func revokeAllSigs(notaryRepo client.Repository) error { + releasedTargetWithRoleList, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return err + } + + if len(releasedTargetWithRoleList) == 0 { + return fmt.Errorf("no signed tags to remove") + } + + // we need all the roles that signed each released target so we can remove from all roles. + for _, releasedTargetWithRole := range releasedTargetWithRoleList { + // remove from all roles + if err := getSignableRolesForTargetAndRemove(releasedTargetWithRole.Target, notaryRepo); err != nil { + return err + } + } + return nil +} + +// get all the roles that signed the target and removes it from all roles. +func getSignableRolesForTargetAndRemove(releasedTarget client.Target, notaryRepo client.Repository) error { + signableRoles, err := trust.GetSignableRoles(notaryRepo, &releasedTarget) + if err != nil { + return err + } + // remove from all roles + return notaryRepo.RemoveTarget(releasedTarget.Name, signableRoles...) +} diff --git a/cli/cli/command/trust/revoke_test.go b/cli/cli/command/trust/revoke_test.go new file mode 100644 index 00000000..8c407399 --- /dev/null +++ b/cli/cli/command/trust/revoke_test.go @@ -0,0 +1,156 @@ +package trust + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/internal/test" + "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustRevokeCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"remote1", "remote2"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE"}, + expectedError: "invalid reference format", + }, + { + name: "digest-reference", + args: []string{"ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2"}, + expectedError: "cannot use a digest reference for IMAGE:TAG", + }, + } + for _, tc := range testCases { + cmd := newRevokeCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustRevokeCommand(t *testing.T) { + testCases := []struct { + doc string + notaryRepository func(trust.ImageRefAndAuth, []string) (client.Repository, error) + args []string + expectedErr string + expectedMessage string + }{ + { + doc: "OfflineErrors_Confirm", + notaryRepository: notary.GetOfflineNotaryRepository, + args: []string{"reg-name.io/image"}, + expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.", + }, + { + doc: "OfflineErrors_Offline", + notaryRepository: notary.GetOfflineNotaryRepository, + args: []string{"reg-name.io/image", "-y"}, + expectedErr: "could not remove signature for reg-name.io/image: client is offline", + }, + { + doc: "OfflineErrors_WithTag_Offline", + notaryRepository: notary.GetOfflineNotaryRepository, + args: []string{"reg-name.io/image:tag"}, + expectedErr: "could not remove signature for reg-name.io/image:tag: client is offline", + }, + { + doc: "UninitializedErrors_Confirm", + notaryRepository: notary.GetUninitializedNotaryRepository, + args: []string{"reg-name.io/image"}, + expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.", + }, + { + doc: "UninitializedErrors_NoTrustData", + notaryRepository: notary.GetUninitializedNotaryRepository, + args: []string{"reg-name.io/image", "-y"}, + expectedErr: "could not remove signature for reg-name.io/image: does not have trust data for", + }, + { + doc: "UninitializedErrors_WithTag_NoTrustData", + notaryRepository: notary.GetUninitializedNotaryRepository, + args: []string{"reg-name.io/image:tag"}, + expectedErr: "could not remove signature for reg-name.io/image:tag: does not have trust data for", + }, + { + doc: "EmptyNotaryRepo_Confirm", + notaryRepository: notary.GetEmptyTargetsNotaryRepository, + args: []string{"reg-name.io/image"}, + expectedMessage: "Please confirm you would like to delete all signature data for reg-name.io/image? [y/N] \nAborting action.", + }, + { + doc: "EmptyNotaryRepo_NoSignedTags", + notaryRepository: notary.GetEmptyTargetsNotaryRepository, + args: []string{"reg-name.io/image", "-y"}, + expectedErr: "could not remove signature for reg-name.io/image: no signed tags to remove", + }, + { + doc: "EmptyNotaryRepo_NoValidTrustData", + notaryRepository: notary.GetEmptyTargetsNotaryRepository, + args: []string{"reg-name.io/image:tag"}, + expectedErr: "could not remove signature for reg-name.io/image:tag: No valid trust data for tag", + }, + { + doc: "AllSigConfirmation", + notaryRepository: notary.GetEmptyTargetsNotaryRepository, + args: []string{"alpine"}, + expectedMessage: "Please confirm you would like to delete all signature data for alpine? [y/N] \nAborting action.", + }, + } + + for _, tc := range testCases { + t.Run(tc.doc, func(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(tc.notaryRepository) + cmd := newRevokeCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + if tc.expectedErr != "" { + assert.ErrorContains(t, cmd.Execute(), tc.expectedErr) + return + } + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Contains(cli.OutBuffer().String(), tc.expectedMessage)) + }) + } + +} + +func TestGetSignableRolesForTargetAndRemoveError(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever("password"), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + target := client.Target{} + err = getSignableRolesForTargetAndRemove(target, notaryRepo) + assert.Error(t, err, "client is offline") +} diff --git a/cli/cli/command/trust/sign.go b/cli/cli/command/trust/sign.go new file mode 100644 index 00000000..234a057c --- /dev/null +++ b/cli/cli/command/trust/sign.go @@ -0,0 +1,247 @@ +package trust + +import ( + "context" + "fmt" + "io" + "path" + "sort" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type signOptions struct { + local bool + imageName string +} + +func newSignCommand(dockerCli command.Cli) *cobra.Command { + options := signOptions{} + cmd := &cobra.Command{ + Use: "sign IMAGE:TAG", + Short: "Sign an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.imageName = args[0] + return runSignImage(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVar(&options.local, "local", false, "Sign a locally tagged image") + return cmd +} + +func runSignImage(cli command.Cli, options signOptions) error { + imageName := options.imageName + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), imageName) + if err != nil { + return err + } + if err := validateTag(imgRefAndAuth); err != nil { + return err + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + if err = clearChangeList(notaryRepo); err != nil { + return err + } + defer clearChangeList(notaryRepo) + + // get the latest repository metadata so we can figure out which roles to sign + if _, err = notaryRepo.ListTargets(); err != nil { + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + // before initializing a new repo, check that the image exists locally: + if err := checkLocalImageExistence(ctx, cli, imageName); err != nil { + return err + } + + userRole := data.RoleName(path.Join(data.CanonicalTargetsRole.String(), imgRefAndAuth.AuthConfig().Username)) + if err := initNotaryRepoWithSigners(notaryRepo, userRole); err != nil { + return trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + + fmt.Fprintf(cli.Out(), "Created signer: %s\n", imgRefAndAuth.AuthConfig().Username) + fmt.Fprintf(cli.Out(), "Finished initializing signed repository for %s\n", imageName) + default: + return trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), err) + } + } + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "push") + target, err := createTarget(notaryRepo, imgRefAndAuth.Tag()) + if err != nil || options.local { + switch err := err.(type) { + // If the error is nil then the local flag is set + case client.ErrNoSuchTarget, client.ErrRepositoryNotExist, nil: + // Fail fast if the image doesn't exist locally + if err := checkLocalImageExistence(ctx, cli, imageName); err != nil { + return err + } + fmt.Fprintf(cli.Err(), "Signing and pushing trust data for local image %s, may overwrite remote trust data\n", imageName) + return image.TrustedPush(ctx, cli, imgRefAndAuth.RepoInfo(), imgRefAndAuth.Reference(), *imgRefAndAuth.AuthConfig(), requestPrivilege) + default: + return err + } + } + return signAndPublishToTarget(cli.Out(), imgRefAndAuth, notaryRepo, target) +} + +func signAndPublishToTarget(out io.Writer, imgRefAndAuth trust.ImageRefAndAuth, notaryRepo client.Repository, target client.Target) error { + tag := imgRefAndAuth.Tag() + fmt.Fprintf(out, "Signing and pushing trust metadata for %s\n", imgRefAndAuth.Name()) + existingSigInfo, err := getExistingSignatureInfoForReleasedTag(notaryRepo, tag) + if err != nil { + return err + } + err = image.AddTargetToAllSignableRoles(notaryRepo, &target) + if err == nil { + prettyPrintExistingSignatureInfo(out, existingSigInfo) + err = notaryRepo.Publish() + } + if err != nil { + return errors.Wrapf(err, "failed to sign %s:%s", imgRefAndAuth.RepoInfo().Name.Name(), tag) + } + fmt.Fprintf(out, "Successfully signed %s:%s\n", imgRefAndAuth.RepoInfo().Name.Name(), tag) + return nil +} + +func validateTag(imgRefAndAuth trust.ImageRefAndAuth) error { + tag := imgRefAndAuth.Tag() + if tag == "" { + if imgRefAndAuth.Digest() != "" { + return fmt.Errorf("cannot use a digest reference for IMAGE:TAG") + } + return fmt.Errorf("No tag specified for %s", imgRefAndAuth.Name()) + } + return nil +} + +func checkLocalImageExistence(ctx context.Context, cli command.Cli, imageName string) error { + _, _, err := cli.Client().ImageInspectWithRaw(ctx, imageName) + return err +} + +func createTarget(notaryRepo client.Repository, tag string) (client.Target, error) { + target := &client.Target{} + var err error + if tag == "" { + return *target, fmt.Errorf("No tag specified") + } + target.Name = tag + target.Hashes, target.Length, err = getSignedManifestHashAndSize(notaryRepo, tag) + return *target, err +} + +func getSignedManifestHashAndSize(notaryRepo client.Repository, tag string) (data.Hashes, int64, error) { + targets, err := notaryRepo.GetAllTargetMetadataByName(tag) + if err != nil { + return nil, 0, err + } + return getReleasedTargetHashAndSize(targets, tag) +} + +func getReleasedTargetHashAndSize(targets []client.TargetSignedStruct, tag string) (data.Hashes, int64, error) { + for _, tgt := range targets { + if isReleasedTarget(tgt.Role.Name) { + return tgt.Target.Hashes, tgt.Target.Length, nil + } + } + return nil, 0, client.ErrNoSuchTarget(tag) +} + +func getExistingSignatureInfoForReleasedTag(notaryRepo client.Repository, tag string) (trustTagRow, error) { + targets, err := notaryRepo.GetAllTargetMetadataByName(tag) + if err != nil { + return trustTagRow{}, err + } + releasedTargetInfoList := matchReleasedSignatures(targets) + if len(releasedTargetInfoList) == 0 { + return trustTagRow{}, nil + } + return releasedTargetInfoList[0], nil +} + +func prettyPrintExistingSignatureInfo(out io.Writer, existingSigInfo trustTagRow) { + sort.Strings(existingSigInfo.Signers) + joinedSigners := strings.Join(existingSigInfo.Signers, ", ") + fmt.Fprintf(out, "Existing signatures for tag %s digest %s from:\n%s\n", existingSigInfo.SignedTag, existingSigInfo.Digest, joinedSigners) +} + +func initNotaryRepoWithSigners(notaryRepo client.Repository, newSigner data.RoleName) error { + rootKey, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + if err != nil { + return err + } + rootKeyID := rootKey.ID() + + // Initialize the notary repository with a remotely managed snapshot key + if err := notaryRepo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return err + } + + signerKey, err := getOrGenerateNotaryKey(notaryRepo, newSigner) + if err != nil { + return err + } + if err := addStagedSigner(notaryRepo, newSigner, []data.PublicKey{signerKey}); err != nil { + return errors.Wrapf(err, "could not add signer to repo: %s", strings.TrimPrefix(newSigner.String(), "targets/")) + } + + return notaryRepo.Publish() +} + +// generates an ECDSA key without a GUN for the specified role +func getOrGenerateNotaryKey(notaryRepo client.Repository, role data.RoleName) (data.PublicKey, error) { + // use the signer name in the PEM headers if this is a delegation key + if data.IsDelegation(role) { + role = data.RoleName(notaryRoleToSigner(role)) + } + keys := notaryRepo.GetCryptoService().ListKeys(role) + var err error + var key data.PublicKey + // always select the first key by ID + if len(keys) > 0 { + sort.Strings(keys) + keyID := keys[0] + privKey, _, err := notaryRepo.GetCryptoService().GetPrivateKey(keyID) + if err != nil { + return nil, err + } + key = data.PublicKeyFromPrivate(privKey) + } else { + key, err = notaryRepo.GetCryptoService().Create(role, "", data.ECDSAKey) + if err != nil { + return nil, err + } + } + return key, nil +} + +// stages changes to add a signer with the specified name and key(s). Adds to targets/ and targets/releases +func addStagedSigner(notaryRepo client.Repository, newSigner data.RoleName, signerKeys []data.PublicKey) error { + // create targets/ + if err := notaryRepo.AddDelegationRoleAndKeys(newSigner, signerKeys); err != nil { + return err + } + if err := notaryRepo.AddDelegationPaths(newSigner, []string{""}); err != nil { + return err + } + + // create targets/releases + if err := notaryRepo.AddDelegationRoleAndKeys(trust.ReleasesRole, signerKeys); err != nil { + return err + } + return notaryRepo.AddDelegationPaths(trust.ReleasesRole, []string{""}) +} diff --git a/cli/cli/command/trust/sign_test.go b/cli/cli/command/trust/sign_test.go new file mode 100644 index 00000000..31a56ea1 --- /dev/null +++ b/cli/cli/command/trust/sign_test.go @@ -0,0 +1,309 @@ +package trust + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/client/changelist" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf/data" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +const passwd = "password" + +func TestTrustSignCommandErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires exactly 1 argument", + }, + { + name: "too-many-args", + args: []string{"image", "tag"}, + expectedError: "requires exactly 1 argument", + }, + { + name: "sha-reference", + args: []string{"870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"ALPINE:latest"}, + expectedError: "invalid reference format", + }, + { + name: "no-tag", + args: []string{"reg/img"}, + expectedError: "No tag specified for reg/img", + }, + { + name: "digest-reference", + args: []string{"ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2"}, + expectedError: "cannot use a digest reference for IMAGE:TAG", + }, + } + // change to a tmpdir + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + for _, tc := range testCases { + cmd := newSignCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestTrustSignCommandOfflineErrors(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newSignCommand(cli) + cmd.SetArgs([]string{"reg-name.io/image:tag"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "client is offline") +} + +func TestGetOrGenerateNotaryKey(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + + // repo is empty, try making a root key + rootKeyA, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + assert.NilError(t, err) + assert.Check(t, rootKeyA != nil) + + // we should only have one newly generated key + allKeys := notaryRepo.GetCryptoService().ListAllKeys() + assert.Check(t, is.Len(allKeys, 1)) + assert.Check(t, notaryRepo.GetCryptoService().GetKey(rootKeyA.ID()) != nil) + + // this time we should get back the same key if we ask for another root key + rootKeyB, err := getOrGenerateNotaryKey(notaryRepo, data.CanonicalRootRole) + assert.NilError(t, err) + assert.Check(t, rootKeyB != nil) + + // we should only have one newly generated key + allKeys = notaryRepo.GetCryptoService().ListAllKeys() + assert.Check(t, is.Len(allKeys, 1)) + assert.Check(t, notaryRepo.GetCryptoService().GetKey(rootKeyB.ID()) != nil) + + // The key we retrieved should be identical to the one we generated + assert.Check(t, is.DeepEqual(rootKeyA.Public(), rootKeyB.Public())) + + // Now also try with a delegation key + releasesKey, err := getOrGenerateNotaryKey(notaryRepo, data.RoleName(trust.ReleasesRole)) + assert.NilError(t, err) + assert.Check(t, releasesKey != nil) + + // we should now have two keys + allKeys = notaryRepo.GetCryptoService().ListAllKeys() + assert.Check(t, is.Len(allKeys, 2)) + assert.Check(t, notaryRepo.GetCryptoService().GetKey(releasesKey.ID()) != nil) + // The key we retrieved should be identical to the one we generated + assert.Check(t, releasesKey != rootKeyA) + assert.Check(t, releasesKey != rootKeyB) +} + +func TestAddStageSigners(t *testing.T) { + skip.If(t, runtime.GOOS == "windows", "FIXME: not supported currently") + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + + // stage targets/user + userRole := data.RoleName("targets/user") + userKey := data.NewPublicKey("algoA", []byte("a")) + err = addStagedSigner(notaryRepo, userRole, []data.PublicKey{userKey}) + assert.NilError(t, err) + // check the changelist for four total changes: two on targets/releases and two on targets/user + cl, err := notaryRepo.GetChangelist() + assert.NilError(t, err) + changeList := cl.List() + assert.Check(t, is.Len(changeList, 4)) + // ordering is deterministic: + + // first change is for targets/user key creation + newSignerKeyChange := changeList[0] + expectedJSON, err := json.Marshal(&changelist.TUFDelegation{ + NewThreshold: notary.MinThreshold, + AddKeys: data.KeyList([]data.PublicKey{userKey}), + }) + assert.NilError(t, err) + expectedChange := changelist.NewTUFChange( + changelist.ActionCreate, + userRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, newSignerKeyChange)) + + // second change is for targets/user getting all paths + newSignerPathsChange := changeList[1] + expectedJSON, err = json.Marshal(&changelist.TUFDelegation{ + AddPaths: []string{""}, + }) + assert.NilError(t, err) + expectedChange = changelist.NewTUFChange( + changelist.ActionCreate, + userRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, newSignerPathsChange)) + + releasesRole := data.RoleName("targets/releases") + + // third change is for targets/releases key creation + releasesKeyChange := changeList[2] + expectedJSON, err = json.Marshal(&changelist.TUFDelegation{ + NewThreshold: notary.MinThreshold, + AddKeys: data.KeyList([]data.PublicKey{userKey}), + }) + assert.NilError(t, err) + expectedChange = changelist.NewTUFChange( + changelist.ActionCreate, + releasesRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, releasesKeyChange)) + + // fourth change is for targets/releases getting all paths + releasesPathsChange := changeList[3] + expectedJSON, err = json.Marshal(&changelist.TUFDelegation{ + AddPaths: []string{""}, + }) + assert.NilError(t, err) + expectedChange = changelist.NewTUFChange( + changelist.ActionCreate, + releasesRole, + changelist.TypeTargetsDelegation, + "", // no path for delegations + expectedJSON, + ) + assert.Check(t, is.DeepEqual(expectedChange, releasesPathsChange)) +} + +func TestGetSignedManifestHashAndSize(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + target := &client.Target{} + target.Hashes, target.Length, err = getSignedManifestHashAndSize(notaryRepo, "test") + assert.Error(t, err, "client is offline") +} + +func TestGetReleasedTargetHashAndSize(t *testing.T) { + oneReleasedTgt := []client.TargetSignedStruct{} + // make and append 3 non-released signatures on the "unreleased" target + unreleasedTgt := client.Target{Name: "unreleased", Hashes: data.Hashes{notary.SHA256: []byte("hash")}} + for _, unreleasedRole := range []string{"targets/a", "targets/b", "targets/c"} { + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName(unreleasedRole), Target: unreleasedTgt}) + } + _, _, err := getReleasedTargetHashAndSize(oneReleasedTgt, "unreleased") + assert.Error(t, err, "No valid trust data for unreleased") + releasedTgt := client.Target{Name: "released", Hashes: data.Hashes{notary.SHA256: []byte("released-hash")}} + oneReleasedTgt = append(oneReleasedTgt, client.TargetSignedStruct{Role: mockDelegationRoleWithName("targets/releases"), Target: releasedTgt}) + hash, _, _ := getReleasedTargetHashAndSize(oneReleasedTgt, "unreleased") + assert.Check(t, is.DeepEqual(data.Hashes{notary.SHA256: []byte("released-hash")}, hash)) + +} + +func TestCreateTarget(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + _, err = createTarget(notaryRepo, "") + assert.Error(t, err, "No tag specified") + _, err = createTarget(notaryRepo, "1") + assert.Error(t, err, "client is offline") +} + +func TestGetExistingSignatureInfoForReleasedTag(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "notary-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "gun", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + _, err = getExistingSignatureInfoForReleasedTag(notaryRepo, "test") + assert.Error(t, err, "client is offline") +} + +func TestPrettyPrintExistingSignatureInfo(t *testing.T) { + buf := bytes.NewBuffer(nil) + signers := []string{"Bob", "Alice", "Carol"} + existingSig := trustTagRow{trustTagKey{"tagName", "abc123"}, signers} + prettyPrintExistingSignatureInfo(buf, existingSig) + + assert.Check(t, is.Contains(buf.String(), "Existing signatures for tag tagName digest abc123 from:\nAlice, Bob, Carol")) +} + +func TestSignCommandChangeListIsCleanedOnError(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + config.SetDir(tmpDir) + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + cmd := newSignCommand(cli) + cmd.SetArgs([]string{"ubuntu:latest"}) + cmd.SetOutput(ioutil.Discard) + + err = cmd.Execute() + assert.Assert(t, err != nil) + + notaryRepo, err := client.NewFileCachedRepository(tmpDir, "docker.io/library/ubuntu", "https://localhost", nil, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) + assert.NilError(t, err) + cl, err := notaryRepo.GetChangelist() + assert.NilError(t, err) + assert.Check(t, is.Equal(len(cl.List()), 0)) +} + +func TestSignCommandLocalFlag(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newSignCommand(cli) + cmd.SetArgs([]string{"--local", "reg-name.io/image:red"}) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), "error contacting notary server: dial tcp: lookup reg-name.io") + +} diff --git a/cli/cli/command/trust/signer.go b/cli/cli/command/trust/signer.go new file mode 100644 index 00000000..807ad6c9 --- /dev/null +++ b/cli/cli/command/trust/signer.go @@ -0,0 +1,22 @@ +package trust + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// newTrustSignerCommand returns a cobra command for `trust signer` subcommands +func newTrustSignerCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "signer", + Short: "Manage entities who can sign Docker images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newSignerAddCommand(dockerCli), + newSignerRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/trust/signer_add.go b/cli/cli/command/trust/signer_add.go new file mode 100644 index 00000000..304aeec9 --- /dev/null +++ b/cli/cli/command/trust/signer_add.go @@ -0,0 +1,141 @@ +package trust + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "regexp" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/opts" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + tufutils "github.com/theupdateframework/notary/tuf/utils" +) + +type signerAddOptions struct { + keys opts.ListOpts + signer string + repos []string +} + +func newSignerAddCommand(dockerCli command.Cli) *cobra.Command { + var options signerAddOptions + cmd := &cobra.Command{ + Use: "add OPTIONS NAME REPOSITORY [REPOSITORY...] ", + Short: "Add a signer", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.signer = args[0] + options.repos = args[1:] + return addSigner(dockerCli, options) + }, + } + flags := cmd.Flags() + options.keys = opts.NewListOpts(nil) + flags.Var(&options.keys, "key", "Path to the signer's public key file") + return cmd +} + +var validSignerName = regexp.MustCompile(`^[a-z0-9][a-z0-9\_\-]*$`).MatchString + +func addSigner(cli command.Cli, options signerAddOptions) error { + signerName := options.signer + if !validSignerName(signerName) { + return fmt.Errorf("signer name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", signerName) + } + if signerName == "releases" { + return fmt.Errorf("releases is a reserved keyword, please use a different signer name") + } + + if options.keys.Len() == 0 { + return fmt.Errorf("path to a public key must be provided using the `--key` flag") + } + signerPubKeys, err := ingestPublicKeys(options.keys.GetAll()) + if err != nil { + return err + } + var errRepos []string + for _, repoName := range options.repos { + fmt.Fprintf(cli.Out(), "Adding signer \"%s\" to %s...\n", signerName, repoName) + if err := addSignerToRepo(cli, signerName, repoName, signerPubKeys); err != nil { + fmt.Fprintln(cli.Err(), err.Error()+"\n") + errRepos = append(errRepos, repoName) + } else { + fmt.Fprintf(cli.Out(), "Successfully added signer: %s to %s\n\n", signerName, repoName) + } + } + if len(errRepos) > 0 { + return fmt.Errorf("Failed to add signer to: %s", strings.Join(errRepos, ", ")) + } + return nil +} + +func addSignerToRepo(cli command.Cli, signerName string, repoName string, signerPubKeys []data.PublicKey) error { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), repoName) + if err != nil { + return err + } + + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + + if _, err = notaryRepo.ListTargets(); err != nil { + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + fmt.Fprintf(cli.Out(), "Initializing signed repository for %s...\n", repoName) + if err := getOrGenerateRootKeyAndInitRepo(notaryRepo); err != nil { + return trust.NotaryError(repoName, err) + } + fmt.Fprintf(cli.Out(), "Successfully initialized %q\n", repoName) + default: + return trust.NotaryError(repoName, err) + } + } + + newSignerRoleName := data.RoleName(path.Join(data.CanonicalTargetsRole.String(), signerName)) + + if err := addStagedSigner(notaryRepo, newSignerRoleName, signerPubKeys); err != nil { + return errors.Wrapf(err, "could not add signer to repo: %s", strings.TrimPrefix(newSignerRoleName.String(), "targets/")) + } + + return notaryRepo.Publish() +} + +func ingestPublicKeys(pubKeyPaths []string) ([]data.PublicKey, error) { + pubKeys := []data.PublicKey{} + for _, pubKeyPath := range pubKeyPaths { + // Read public key bytes from PEM file, limit to 1 KiB + pubKeyFile, err := os.OpenFile(pubKeyPath, os.O_RDONLY, 0666) + if err != nil { + return nil, errors.Wrap(err, "unable to read public key from file") + } + defer pubKeyFile.Close() + // limit to + l := io.LimitReader(pubKeyFile, 1<<20) + pubKeyBytes, err := ioutil.ReadAll(l) + if err != nil { + return nil, errors.Wrap(err, "unable to read public key from file") + } + + // Parse PEM bytes into type PublicKey + pubKey, err := tufutils.ParsePEMPublicKey(pubKeyBytes) + if err != nil { + return nil, errors.Wrapf(err, "could not parse public key from file: %s", pubKeyPath) + } + pubKeys = append(pubKeys, pubKey) + } + return pubKeys, nil +} diff --git a/cli/cli/command/trust/signer_add_test.go b/cli/cli/command/trust/signer_add_test.go new file mode 100644 index 00000000..64121e29 --- /dev/null +++ b/cli/cli/command/trust/signer_add_test.go @@ -0,0 +1,147 @@ +package trust + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustSignerAddErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args", + expectedError: "requires at least 2 argument", + }, + { + name: "no-key", + args: []string{"foo", "bar"}, + expectedError: "path to a public key must be provided using the `--key` flag", + }, + { + name: "reserved-releases-signer-add", + args: []string{"releases", "my-image", "--key", "/path/to/key"}, + expectedError: "releases is a reserved keyword, please use a different signer name", + }, + { + name: "disallowed-chars", + args: []string{"ali/ce", "my-image", "--key", "/path/to/key"}, + expectedError: "signer name \"ali/ce\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + }, + { + name: "no-upper-case", + args: []string{"Alice", "my-image", "--key", "/path/to/key"}, + expectedError: "signer name \"Alice\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + }, + { + name: "start-with-letter", + args: []string{"_alice", "my-image", "--key", "/path/to/key"}, + expectedError: "signer name \"_alice\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", + }, + } + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newSignerAddCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestSignerAddCommandNoTargetsKey(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + tmpfile, err := ioutil.TempFile("", "pemfile") + assert.NilError(t, err) + defer os.Remove(tmpfile.Name()) + + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newSignerAddCommand(cli) + cmd.SetArgs([]string{"--key", tmpfile.Name(), "alice", "alpine", "linuxkit/alpine"}) + + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), fmt.Sprintf("could not parse public key from file: %s: no valid public key found", tmpfile.Name())) +} + +func TestSignerAddCommandBadKeyPath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetEmptyTargetsNotaryRepository) + cmd := newSignerAddCommand(cli) + cmd.SetArgs([]string{"--key", "/path/to/key.pem", "alice", "alpine"}) + + cmd.SetOutput(ioutil.Discard) + expectedError := "unable to read public key from file: open /path/to/key.pem: no such file or directory" + if runtime.GOOS == "windows" { + expectedError = "unable to read public key from file: open /path/to/key.pem: The system cannot find the path specified." + } + assert.Error(t, cmd.Execute(), expectedError) +} + +func TestSignerAddCommandInvalidRepoName(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-sign-test-") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + config.SetDir(tmpDir) + + pubKeyDir, err := ioutil.TempDir("", "key-load-test-pubkey-") + assert.NilError(t, err) + defer os.RemoveAll(pubKeyDir) + pubKeyFilepath := filepath.Join(pubKeyDir, "pubkey.pem") + assert.NilError(t, ioutil.WriteFile(pubKeyFilepath, pubKeyFixture, notary.PrivNoExecPerms)) + + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetUninitializedNotaryRepository) + cmd := newSignerAddCommand(cli) + imageName := "870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd" + cmd.SetArgs([]string{"--key", pubKeyFilepath, "alice", imageName}) + + cmd.SetOutput(ioutil.Discard) + assert.Error(t, cmd.Execute(), "Failed to add signer to: 870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd") + expectedErr := fmt.Sprintf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings\n\n", imageName) + + assert.Check(t, is.Equal(expectedErr, cli.ErrBuffer().String())) +} + +func TestIngestPublicKeys(t *testing.T) { + // Call with a bad path + _, err := ingestPublicKeys([]string{"foo", "bar"}) + expectedError := "unable to read public key from file: open foo: no such file or directory" + if runtime.GOOS == "windows" { + expectedError = "unable to read public key from file: open foo: The system cannot find the file specified." + } + assert.Error(t, err, expectedError) + // Call with real file path + tmpfile, err := ioutil.TempFile("", "pemfile") + assert.NilError(t, err) + defer os.Remove(tmpfile.Name()) + _, err = ingestPublicKeys([]string{tmpfile.Name()}) + assert.Error(t, err, fmt.Sprintf("could not parse public key from file: %s: no valid public key found", tmpfile.Name())) +} diff --git a/cli/cli/command/trust/signer_remove.go b/cli/cli/command/trust/signer_remove.go new file mode 100644 index 00000000..6945f54e --- /dev/null +++ b/cli/cli/command/trust/signer_remove.go @@ -0,0 +1,143 @@ +package trust + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/trust" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" +) + +type signerRemoveOptions struct { + signer string + repos []string + forceYes bool +} + +func newSignerRemoveCommand(dockerCli command.Cli) *cobra.Command { + options := signerRemoveOptions{} + cmd := &cobra.Command{ + Use: "remove [OPTIONS] NAME REPOSITORY [REPOSITORY...]", + Short: "Remove a signer", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.signer = args[0] + options.repos = args[1:] + return removeSigner(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&options.forceYes, "force", "f", false, "Do not prompt for confirmation before removing the most recent signer") + return cmd +} + +func removeSigner(cli command.Cli, options signerRemoveOptions) error { + var errRepos []string + for _, repo := range options.repos { + fmt.Fprintf(cli.Out(), "Removing signer \"%s\" from %s...\n", options.signer, repo) + if _, err := removeSingleSigner(cli, repo, options.signer, options.forceYes); err != nil { + fmt.Fprintln(cli.Err(), err.Error()+"\n") + errRepos = append(errRepos, repo) + } + } + if len(errRepos) > 0 { + return fmt.Errorf("Error removing signer from: %s", strings.Join(errRepos, ", ")) + } + return nil +} + +func isLastSignerForReleases(roleWithSig data.Role, allRoles []client.RoleWithSignatures) (bool, error) { + var releasesRoleWithSigs client.RoleWithSignatures + for _, role := range allRoles { + if role.Name == releasesRoleTUFName { + releasesRoleWithSigs = role + break + } + } + counter := len(releasesRoleWithSigs.Signatures) + if counter == 0 { + return false, fmt.Errorf("All signed tags are currently revoked, use docker trust sign to fix") + } + for _, signature := range releasesRoleWithSigs.Signatures { + for _, key := range roleWithSig.KeyIDs { + if signature.KeyID == key { + counter-- + } + } + } + return counter < releasesRoleWithSigs.Threshold, nil +} + +// removeSingleSigner attempts to remove a single signer and returns whether signer removal happened. +// The signer not being removed doesn't necessarily raise an error e.g. user choosing "No" when prompted for confirmation. +// nolint: unparam +func removeSingleSigner(cli command.Cli, repoName, signerName string, forceYes bool) (bool, error) { + ctx := context.Background() + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), repoName) + if err != nil { + return false, err + } + + signerDelegation := data.RoleName("targets/" + signerName) + if signerDelegation == releasesRoleTUFName { + return false, fmt.Errorf("releases is a reserved keyword and cannot be removed") + } + notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull) + if err != nil { + return false, trust.NotaryError(imgRefAndAuth.Reference().Name(), err) + } + delegationRoles, err := notaryRepo.GetDelegationRoles() + if err != nil { + return false, errors.Wrapf(err, "error retrieving signers for %s", repoName) + } + var role data.Role + for _, delRole := range delegationRoles { + if delRole.Name == signerDelegation { + role = delRole + break + } + } + if role.Name == "" { + return false, fmt.Errorf("No signer %s for repository %s", signerName, repoName) + } + allRoles, err := notaryRepo.ListRoles() + if err != nil { + return false, err + } + if ok, err := isLastSignerForReleases(role, allRoles); ok && !forceYes { + removeSigner := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("The signer \"%s\" signed the last released version of %s. "+ + "Removing this signer will make %s unpullable. "+ + "Are you sure you want to continue?", + signerName, repoName, repoName, + )) + + if !removeSigner { + fmt.Fprintf(cli.Out(), "\nAborting action.\n") + return false, nil + } + } else if err != nil { + return false, err + } + if err = notaryRepo.RemoveDelegationKeys(releasesRoleTUFName, role.KeyIDs); err != nil { + return false, err + } + if err = notaryRepo.RemoveDelegationRole(signerDelegation); err != nil { + return false, err + } + + if err = notaryRepo.Publish(); err != nil { + return false, err + } + + fmt.Fprintf(cli.Out(), "Successfully removed %s from %s\n\n", signerName, repoName) + + return true, nil +} diff --git a/cli/cli/command/trust/signer_remove_test.go b/cli/cli/command/trust/signer_remove_test.go new file mode 100644 index 00000000..0feec874 --- /dev/null +++ b/cli/cli/command/trust/signer_remove_test.go @@ -0,0 +1,128 @@ +package trust + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + notaryfake "github.com/docker/cli/internal/test/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/tuf/data" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestTrustSignerRemoveErrors(t *testing.T) { + testCases := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-enough-args-0", + expectedError: "requires at least 2 arguments", + }, + { + name: "not-enough-args-1", + args: []string{"user"}, + expectedError: "requires at least 2 arguments", + }, + } + for _, tc := range testCases { + cmd := newSignerRemoveCommand( + test.NewFakeCli(&fakeClient{})) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } + testCasesWithOutput := []struct { + name string + args []string + expectedError string + }{ + { + name: "not-an-image", + args: []string{"user", "notanimage"}, + expectedError: "error retrieving signers for notanimage", + }, + { + name: "sha-reference", + args: []string{"user", "870d292919d01a0af7e7f056271dc78792c05f55f49b9b9012b6d89725bd9abd"}, + expectedError: "invalid repository name", + }, + { + name: "invalid-img-reference", + args: []string{"user", "ALPINE"}, + expectedError: "invalid reference format", + }, + } + for _, tc := range testCasesWithOutput { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetOfflineNotaryRepository) + cmd := newSignerRemoveCommand(cli) + cmd.SetArgs(tc.args) + cmd.SetOutput(ioutil.Discard) + cmd.Execute() + assert.Check(t, is.Contains(cli.ErrBuffer().String(), tc.expectedError)) + } + +} + +func TestRemoveSingleSigner(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + removed, err := removeSingleSigner(cli, "signed-repo", "test", true) + assert.Error(t, err, "No signer test for repository signed-repo") + assert.Equal(t, removed, false, "No signer should be removed") + + removed, err = removeSingleSigner(cli, "signed-repo", "releases", true) + assert.Error(t, err, "releases is a reserved keyword and cannot be removed") + assert.Equal(t, removed, false, "No signer should be removed") +} + +func TestRemoveMultipleSigners(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + err := removeSigner(cli, signerRemoveOptions{signer: "test", repos: []string{"signed-repo", "signed-repo"}, forceYes: true}) + assert.Error(t, err, "Error removing signer from: signed-repo, signed-repo") + assert.Check(t, is.Contains(cli.ErrBuffer().String(), + "No signer test for repository signed-repo")) + assert.Check(t, is.Contains(cli.OutBuffer().String(), "Removing signer \"test\" from signed-repo...\n")) +} +func TestRemoveLastSignerWarning(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{}) + cli.SetNotaryClient(notaryfake.GetLoadedNotaryRepository) + + err := removeSigner(cli, signerRemoveOptions{signer: "alice", repos: []string{"signed-repo"}, forceYes: false}) + assert.NilError(t, err) + assert.Check(t, is.Contains(cli.OutBuffer().String(), + "The signer \"alice\" signed the last released version of signed-repo. "+ + "Removing this signer will make signed-repo unpullable. "+ + "Are you sure you want to continue? [y/N]")) +} + +func TestIsLastSignerForReleases(t *testing.T) { + role := data.Role{} + releaserole := client.RoleWithSignatures{} + releaserole.Name = releasesRoleTUFName + releaserole.Threshold = 1 + allrole := []client.RoleWithSignatures{releaserole} + lastsigner, _ := isLastSignerForReleases(role, allrole) + assert.Check(t, is.Equal(false, lastsigner)) + + role.KeyIDs = []string{"deadbeef"} + sig := data.Signature{} + sig.KeyID = "deadbeef" + releaserole.Signatures = []data.Signature{sig} + releaserole.Threshold = 1 + allrole = []client.RoleWithSignatures{releaserole} + lastsigner, _ = isLastSignerForReleases(role, allrole) + assert.Check(t, is.Equal(true, lastsigner)) + + sig.KeyID = "8badf00d" + releaserole.Signatures = []data.Signature{sig} + releaserole.Threshold = 1 + allrole = []client.RoleWithSignatures{releaserole} + lastsigner, _ = isLastSignerForReleases(role, allrole) + assert.Check(t, is.Equal(false, lastsigner)) +} diff --git a/cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden b/cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden new file mode 100644 index 00000000..3aba7f3b --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-empty-repo.golden @@ -0,0 +1,25 @@ +[ + { + "Name": "reg/img:unsigned-tag", + "SignedTags": [], + "Signers": [], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden new file mode 100644 index 00000000..23158f75 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-full-repo-no-signers.golden @@ -0,0 +1,33 @@ +[ + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + } + ], + "Signers": [], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden new file mode 100644 index 00000000..4901a7cc --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-full-repo-with-signers.golden @@ -0,0 +1,65 @@ +[ + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "blue", + "Digest": "626c75652d646967657374", + "Signers": [ + "alice" + ] + }, + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + }, + { + "SignedTag": "red", + "Digest": "7265642d646967657374", + "Signers": [ + "alice", + "bob" + ] + } + ], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden new file mode 100644 index 00000000..1958bb7e --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-multiple-repos-with-signers.golden @@ -0,0 +1,128 @@ +[ + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "blue", + "Digest": "626c75652d646967657374", + "Signers": [ + "alice" + ] + }, + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + }, + { + "SignedTag": "red", + "Digest": "7265642d646967657374", + "Signers": [ + "alice", + "bob" + ] + } + ], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + }, + { + "Name": "signed-repo", + "SignedTags": [ + { + "SignedTag": "blue", + "Digest": "626c75652d646967657374", + "Signers": [ + "alice" + ] + }, + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + }, + { + "SignedTag": "red", + "Digest": "7265642d646967657374", + "Signers": [ + "alice", + "bob" + ] + } + ], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden new file mode 100644 index 00000000..0fcefebb --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-one-tag-no-signers.golden @@ -0,0 +1,33 @@ +[ + { + "Name": "signed-repo:green", + "SignedTags": [ + { + "SignedTag": "green", + "Digest": "677265656e2d646967657374", + "Signers": [ + "Repo Admin" + ] + } + ], + "Signers": [], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden new file mode 100644 index 00000000..9f3ada08 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-no-signers.golden @@ -0,0 +1,10 @@ + +Signatures for signed-repo + +SIGNED TAG DIGEST SIGNERS +green 677265656e2d646967657374 (Repo Admin) + +Administrative keys for signed-repo + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden new file mode 100644 index 00000000..49b1efd2 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-full-repo-with-signers.golden @@ -0,0 +1,18 @@ + +Signatures for signed-repo + +SIGNED TAG DIGEST SIGNERS +blue 626c75652d646967657374 alice +green 677265656e2d646967657374 (Repo Admin) +red 7265642d646967657374 alice, bob + +List of signers and their keys for signed-repo + +SIGNER KEYS +alice A +bob B + +Administrative keys for signed-repo + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden new file mode 100644 index 00000000..b5857289 --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-one-tag-no-signers.golden @@ -0,0 +1,10 @@ + +Signatures for signed-repo:green + +SIGNED TAG DIGEST SIGNERS +green 677265656e2d646967657374 (Repo Admin) + +Administrative keys for signed-repo:green + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden new file mode 100644 index 00000000..302a6b5e --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-pretty-unsigned-tag-with-signers.golden @@ -0,0 +1,14 @@ + +No signatures for signed-repo:unsigned + + +List of signers and their keys for signed-repo:unsigned + +SIGNER KEYS +alice A +bob B + +Administrative keys for signed-repo:unsigned + + Repository Key: targetsID + Root Key: rootID diff --git a/cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden b/cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden new file mode 100644 index 00000000..fe51488c --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-uninitialized.golden @@ -0,0 +1 @@ +[] diff --git a/cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden b/cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden new file mode 100644 index 00000000..82a3282f --- /dev/null +++ b/cli/cli/command/trust/testdata/trust-inspect-unsigned-tag-with-signers.golden @@ -0,0 +1,42 @@ +[ + { + "Name": "signed-repo:unsigned", + "SignedTags": [], + "Signers": [ + { + "Name": "bob", + "Keys": [ + { + "ID": "B" + } + ] + }, + { + "Name": "alice", + "Keys": [ + { + "ID": "A" + } + ] + } + ], + "AdministrativeKeys": [ + { + "Name": "Root", + "Keys": [ + { + "ID": "rootID" + } + ] + }, + { + "Name": "Repository", + "Keys": [ + { + "ID": "targetsID" + } + ] + } + ] + } +] diff --git a/cli/cli/command/utils.go b/cli/cli/command/utils.go new file mode 100644 index 00000000..21e702eb --- /dev/null +++ b/cli/cli/command/utils.go @@ -0,0 +1,194 @@ +package command + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/cli/cli/streams" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + // We use sequential file access here to avoid depleting the standby list + // on Windows. On Linux, this is a call directly to ioutil.TempFile + tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = streams.NewIn(os.Stdin) + } + + reader := bufio.NewReader(ins) + answer, _, _ := reader.ReadLine() + return strings.ToLower(string(answer)) == "y" +} + +// PruneFilters returns consolidated prune filters obtained from config.json and cli +func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { + if dockerCli.ConfigFile() == nil { + return pruneFilters + } + for _, f := range dockerCli.ConfigFile().PruneFilters { + parts := strings.SplitN(f, "=", 2) + if len(parts) != 2 { + continue + } + if parts[0] == "label" { + // CLI label filter supersede config.json. + // If CLI label filter conflict with config.json, + // skip adding label! filter in config.json. + if pruneFilters.Contains("label!") && pruneFilters.ExactMatch("label!", parts[1]) { + continue + } + } else if parts[0] == "label!" { + // CLI label! filter supersede config.json. + // If CLI label! filter conflict with config.json, + // skip adding label filter in config.json. + if pruneFilters.Contains("label") && pruneFilters.ExactMatch("label", parts[1]) { + continue + } + } + pruneFilters.Add(parts[0], parts[1]) + } + + return pruneFilters +} + +// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later. +func AddPlatformFlag(flags *pflag.FlagSet, target *string) { + flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") + flags.SetAnnotation("platform", "version", []string{"1.32"}) + flags.SetAnnotation("platform", "experimental", nil) +} + +// ValidateOutputPath validates the output paths of the `export` and `save` commands. +func ValidateOutputPath(path string) error { + dir := filepath.Dir(path) + if dir != "" && dir != "." { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return errors.Errorf("invalid output path: directory %q does not exist", dir) + } + } + // check whether `path` points to a regular file + // (if the path exists and doesn't point to a directory) + if fileInfo, err := os.Stat(path); !os.IsNotExist(err) { + if fileInfo.Mode().IsDir() || fileInfo.Mode().IsRegular() { + return nil + } + + if err := ValidateOutputPathFileMode(fileInfo.Mode()); err != nil { + return errors.Wrapf(err, fmt.Sprintf("invalid output path: %q must be a directory or a regular file", path)) + } + } + return nil +} + +// ValidateOutputPathFileMode validates the output paths of the `cp` command and serves as a +// helper to `ValidateOutputPath` +func ValidateOutputPathFileMode(fileMode os.FileMode) error { + switch { + case fileMode&os.ModeDevice != 0: + return errors.New("got a device") + case fileMode&os.ModeIrregular != 0: + return errors.New("got an irregular file") + } + return nil +} + +func stringSliceIndex(s, subs []string) int { + j := 0 + if len(subs) > 0 { + for i, x := range s { + if j < len(subs) && subs[j] == x { + j++ + } else { + j = 0 + } + if len(subs) == j { + return i + 1 - j + } + } + } + return -1 +} + +// StringSliceReplaceAt replaces the sub-slice old, with the sub-slice new, in the string +// slice s, returning a new slice and a boolean indicating if the replacement happened. +// requireIdx is the index at which old needs to be found at (or -1 to disregard that). +func StringSliceReplaceAt(s, old, new []string, requireIndex int) ([]string, bool) { + idx := stringSliceIndex(s, old) + if (requireIndex != -1 && requireIndex != idx) || idx == -1 { + return s, false + } + out := append([]string{}, s[:idx]...) + out = append(out, new...) + out = append(out, s[idx+len(old):]...) + return out, true +} diff --git a/cli/cli/command/utils_test.go b/cli/cli/command/utils_test.go new file mode 100644 index 00000000..0452a7c4 --- /dev/null +++ b/cli/cli/command/utils_test.go @@ -0,0 +1,33 @@ +package command + +import ( + "testing" + + "gotest.tools/assert" +) + +func TestStringSliceReplaceAt(t *testing.T) { + out, ok := StringSliceReplaceAt([]string{"abc", "foo", "bar", "bax"}, []string{"foo", "bar"}, []string{"baz"}, -1) + assert.Assert(t, ok) + assert.DeepEqual(t, []string{"abc", "baz", "bax"}, out) + + out, ok = StringSliceReplaceAt([]string{"foo"}, []string{"foo", "bar"}, []string{"baz"}, -1) + assert.Assert(t, !ok) + assert.DeepEqual(t, []string{"foo"}, out) + + out, ok = StringSliceReplaceAt([]string{"abc", "foo", "bar", "bax"}, []string{"foo", "bar"}, []string{"baz"}, 0) + assert.Assert(t, !ok) + assert.DeepEqual(t, []string{"abc", "foo", "bar", "bax"}, out) + + out, ok = StringSliceReplaceAt([]string{"foo", "bar", "bax"}, []string{"foo", "bar"}, []string{"baz"}, 0) + assert.Assert(t, ok) + assert.DeepEqual(t, []string{"baz", "bax"}, out) + + out, ok = StringSliceReplaceAt([]string{"abc", "foo", "bar", "baz"}, []string{"foo", "bar"}, nil, -1) + assert.Assert(t, ok) + assert.DeepEqual(t, []string{"abc", "baz"}, out) + + out, ok = StringSliceReplaceAt([]string{"foo"}, nil, []string{"baz"}, -1) + assert.Assert(t, !ok) + assert.DeepEqual(t, []string{"foo"}, out) +} diff --git a/cli/cli/command/volume/client_test.go b/cli/cli/command/volume/client_test.go new file mode 100644 index 00000000..644cad60 --- /dev/null +++ b/cli/cli/command/volume/client_test.go @@ -0,0 +1,54 @@ +package volume + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" +) + +type fakeClient struct { + client.Client + volumeCreateFunc func(volumetypes.VolumeCreateBody) (types.Volume, error) + volumeInspectFunc func(volumeID string) (types.Volume, error) + volumeListFunc func(filter filters.Args) (volumetypes.VolumeListOKBody, error) + volumeRemoveFunc func(volumeID string, force bool) error + volumePruneFunc func(filter filters.Args) (types.VolumesPruneReport, error) +} + +func (c *fakeClient) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { + if c.volumeCreateFunc != nil { + return c.volumeCreateFunc(options) + } + return types.Volume{}, nil +} + +func (c *fakeClient) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + if c.volumeInspectFunc != nil { + return c.volumeInspectFunc(volumeID) + } + return types.Volume{}, nil +} + +func (c *fakeClient) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + if c.volumeListFunc != nil { + return c.volumeListFunc(filter) + } + return volumetypes.VolumeListOKBody{}, nil +} + +func (c *fakeClient) VolumesPrune(ctx context.Context, filter filters.Args) (types.VolumesPruneReport, error) { + if c.volumePruneFunc != nil { + return c.volumePruneFunc(filter) + } + return types.VolumesPruneReport{}, nil +} + +func (c *fakeClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + if c.volumeRemoveFunc != nil { + return c.volumeRemoveFunc(volumeID, force) + } + return nil +} diff --git a/cli/cli/command/volume/cmd.go b/cli/cli/command/volume/cmd.go new file mode 100644 index 00000000..b2a552ae --- /dev/null +++ b/cli/cli/command/volume/cmd.go @@ -0,0 +1,26 @@ +package volume + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewVolumeCommand returns a cobra command for `volume` subcommands +func NewVolumeCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "volume COMMAND", + Short: "Manage volumes", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Annotations: map[string]string{"version": "1.21"}, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/cli/cli/command/volume/create.go b/cli/cli/command/volume/create.go new file mode 100644 index 00000000..125da443 --- /dev/null +++ b/cli/cli/command/volume/create.go @@ -0,0 +1,69 @@ +package volume + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(opts.ValidateLabel), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] [VOLUME]", + Short: "Create a volume", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + if options.name != "" { + return errors.Errorf("Conflicting options: either specify --name or provide positional arg, not both\n") + } + options.name = args[0] + } + return runCreate(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.StringVarP(&options.driver, "driver", "d", "local", "Specify volume driver name") + flags.StringVar(&options.name, "name", "", "Specify volume name") + flags.Lookup("name").Hidden = true + flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&options.labels, "label", "Set metadata for a volume") + + return cmd +} + +func runCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + + volReq := volumetypes.VolumeCreateBody{ + Driver: options.driver, + DriverOpts: options.driverOpts.GetAll(), + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + } + + vol, err := client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) + return nil +} diff --git a/cli/cli/command/volume/create_test.go b/cli/cli/command/volume/create_test.go new file mode 100644 index 00000000..a0646ed1 --- /dev/null +++ b/cli/cli/command/volume/create_test.go @@ -0,0 +1,126 @@ +package volume + +import ( + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestVolumeCreateErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + volumeCreateFunc func(volumetypes.VolumeCreateBody) (types.Volume, error) + expectedError string + }{ + { + args: []string{"volumeName"}, + flags: map[string]string{ + "name": "volumeName", + }, + expectedError: "Conflicting options: either specify --name or provide positional arg, not both", + }, + { + args: []string{"too", "many"}, + expectedError: "requires at most 1 argument", + }, + { + volumeCreateFunc: func(createBody volumetypes.VolumeCreateBody) (types.Volume, error) { + return types.Volume{}, errors.Errorf("error creating volume") + }, + expectedError: "error creating volume", + }, + } + for _, tc := range testCases { + cmd := newCreateCommand( + test.NewFakeCli(&fakeClient{ + volumeCreateFunc: tc.volumeCreateFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestVolumeCreateWithName(t *testing.T) { + name := "foo" + cli := test.NewFakeCli(&fakeClient{ + volumeCreateFunc: func(body volumetypes.VolumeCreateBody) (types.Volume, error) { + if body.Name != name { + return types.Volume{}, errors.Errorf("expected name %q, got %q", name, body.Name) + } + return types.Volume{ + Name: body.Name, + }, nil + }, + }) + + buf := cli.OutBuffer() + + // Test by flags + cmd := newCreateCommand(cli) + cmd.Flags().Set("name", name) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(name, strings.TrimSpace(buf.String()))) + + // Then by args + buf.Reset() + cmd = newCreateCommand(cli) + cmd.SetArgs([]string{name}) + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(name, strings.TrimSpace(buf.String()))) +} + +func TestVolumeCreateWithFlags(t *testing.T) { + expectedDriver := "foo" + expectedOpts := map[string]string{ + "bar": "1", + "baz": "baz", + } + expectedLabels := map[string]string{ + "lbl1": "v1", + "lbl2": "v2", + } + name := "banana" + + cli := test.NewFakeCli(&fakeClient{ + volumeCreateFunc: func(body volumetypes.VolumeCreateBody) (types.Volume, error) { + if body.Name != "" { + return types.Volume{}, errors.Errorf("expected empty name, got %q", body.Name) + } + if body.Driver != expectedDriver { + return types.Volume{}, errors.Errorf("expected driver %q, got %q", expectedDriver, body.Driver) + } + if !reflect.DeepEqual(body.DriverOpts, expectedOpts) { + return types.Volume{}, errors.Errorf("expected drivers opts %v, got %v", expectedOpts, body.DriverOpts) + } + if !reflect.DeepEqual(body.Labels, expectedLabels) { + return types.Volume{}, errors.Errorf("expected labels %v, got %v", expectedLabels, body.Labels) + } + return types.Volume{ + Name: name, + }, nil + }, + }) + + cmd := newCreateCommand(cli) + cmd.Flags().Set("driver", "foo") + cmd.Flags().Set("opt", "bar=1") + cmd.Flags().Set("opt", "baz=baz") + cmd.Flags().Set("label", "lbl1=v1") + cmd.Flags().Set("label", "lbl2=v2") + assert.NilError(t, cmd.Execute()) + assert.Check(t, is.Equal(name, strings.TrimSpace(cli.OutBuffer().String()))) +} diff --git a/cli/cli/command/volume/inspect.go b/cli/cli/command/volume/inspect.go new file mode 100644 index 00000000..52cfb0f0 --- /dev/null +++ b/cli/cli/command/volume/inspect.go @@ -0,0 +1,46 @@ +package volume + +import ( + "context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] VOLUME [VOLUME...]", + Short: "Display detailed information on one or more volumes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getVolFunc := func(name string) (interface{}, []byte, error) { + i, err := client.VolumeInspect(ctx, name) + return i, nil, err + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) +} diff --git a/cli/cli/command/volume/inspect_test.go b/cli/cli/command/volume/inspect_test.go new file mode 100644 index 00000000..759042a5 --- /dev/null +++ b/cli/cli/command/volume/inspect_test.go @@ -0,0 +1,141 @@ +package volume + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestVolumeInspectErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + volumeInspectFunc func(volumeID string) (types.Volume, error) + expectedError string + }{ + { + expectedError: "requires at least 1 argument", + }, + { + args: []string{"foo"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + return types.Volume{}, errors.Errorf("error while inspecting the volume") + }, + expectedError: "error while inspecting the volume", + }, + { + args: []string{"foo"}, + flags: map[string]string{ + "format": "{{invalid format}}", + }, + expectedError: "Template parsing error", + }, + { + args: []string{"foo", "bar"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + if volumeID == "foo" { + return types.Volume{ + Name: "foo", + }, nil + } + return types.Volume{}, errors.Errorf("error while inspecting the volume") + }, + expectedError: "error while inspecting the volume", + }, + } + for _, tc := range testCases { + cmd := newInspectCommand( + test.NewFakeCli(&fakeClient{ + volumeInspectFunc: tc.volumeInspectFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestVolumeInspectWithoutFormat(t *testing.T) { + testCases := []struct { + name string + args []string + volumeInspectFunc func(volumeID string) (types.Volume, error) + }{ + { + name: "single-volume", + args: []string{"foo"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + if volumeID != "foo" { + return types.Volume{}, errors.Errorf("Invalid volumeID, expected %s, got %s", "foo", volumeID) + } + return *Volume(), nil + }, + }, + { + name: "multiple-volume-with-labels", + args: []string{"foo", "bar"}, + volumeInspectFunc: func(volumeID string) (types.Volume, error) { + return *Volume(VolumeName(volumeID), VolumeLabels(map[string]string{ + "foo": "bar", + })), nil + }, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + volumeInspectFunc: tc.volumeInspectFunc, + }) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-without-format.%s.golden", tc.name)) + } +} + +func TestVolumeInspectWithFormat(t *testing.T) { + volumeInspectFunc := func(volumeID string) (types.Volume, error) { + return *Volume(VolumeLabels(map[string]string{ + "foo": "bar", + })), nil + } + testCases := []struct { + name string + format string + args []string + volumeInspectFunc func(volumeID string) (types.Volume, error) + }{ + { + name: "simple-template", + format: "{{.Name}}", + args: []string{"foo"}, + volumeInspectFunc: volumeInspectFunc, + }, + { + name: "json-template", + format: "{{json .Labels}}", + args: []string{"foo"}, + volumeInspectFunc: volumeInspectFunc, + }, + } + for _, tc := range testCases { + cli := test.NewFakeCli(&fakeClient{ + volumeInspectFunc: tc.volumeInspectFunc, + }) + cmd := newInspectCommand(cli) + cmd.SetArgs(tc.args) + cmd.Flags().Set("format", tc.format) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name)) + } +} diff --git a/cli/cli/command/volume/list.go b/cli/cli/command/volume/list.go new file mode 100644 index 00000000..f9afef8b --- /dev/null +++ b/cli/cli/command/volume/list.go @@ -0,0 +1,67 @@ +package volume + +import ( + "context" + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" + "vbom.ml/util/sortorder" +) + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display volume names") + flags.StringVar(&options.format, "format", "", "Pretty-print volumes using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + volumes, err := client.VolumeList(context.Background(), options.filter.Value()) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().VolumesFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Slice(volumes.Volumes, func(i, j int) bool { + return sortorder.NaturalLess(volumes.Volumes[i].Name, volumes.Volumes[j].Name) + }) + + volumeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewVolumeFormat(format, options.quiet), + } + return formatter.VolumeWrite(volumeCtx, volumes.Volumes) +} diff --git a/cli/cli/command/volume/list_test.go b/cli/cli/command/volume/list_test.go new file mode 100644 index 00000000..9159eb89 --- /dev/null +++ b/cli/cli/command/volume/list_test.go @@ -0,0 +1,129 @@ +package volume + +import ( + "io/ioutil" + "testing" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/internal/test" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + // Import builders to get the builder function as package function + . "github.com/docker/cli/internal/test/builders" + "gotest.tools/assert" + "gotest.tools/golden" +) + +func TestVolumeListErrors(t *testing.T) { + testCases := []struct { + args []string + flags map[string]string + volumeListFunc func(filter filters.Args) (volumetypes.VolumeListOKBody, error) + expectedError string + }{ + { + args: []string{"foo"}, + expectedError: "accepts no argument", + }, + { + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{}, errors.Errorf("error listing volumes") + }, + expectedError: "error listing volumes", + }, + } + for _, tc := range testCases { + cmd := newListCommand( + test.NewFakeCli(&fakeClient{ + volumeListFunc: tc.volumeListFunc, + }), + ) + cmd.SetArgs(tc.args) + for key, value := range tc.flags { + cmd.Flags().Set(key, value) + } + cmd.SetOutput(ioutil.Discard) + assert.ErrorContains(t, cmd.Execute(), tc.expectedError) + } +} + +func TestVolumeListWithoutFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(), + Volume(VolumeName("foo"), VolumeDriver("bar")), + Volume(VolumeName("baz"), VolumeLabels(map[string]string{ + "foo": "bar", + })), + }, + }, nil + }, + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-without-format.golden") +} + +func TestVolumeListWithConfigFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(), + Volume(VolumeName("foo"), VolumeDriver("bar")), + Volume(VolumeName("baz"), VolumeLabels(map[string]string{ + "foo": "bar", + })), + }, + }, nil + }, + }) + cli.SetConfigFile(&configfile.ConfigFile{ + VolumesFormat: "{{ .Name }} {{ .Driver }} {{ .Labels }}", + }) + cmd := newListCommand(cli) + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-with-config-format.golden") +} + +func TestVolumeListWithFormat(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(), + Volume(VolumeName("foo"), VolumeDriver("bar")), + Volume(VolumeName("baz"), VolumeLabels(map[string]string{ + "foo": "bar", + })), + }, + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }} {{ .Driver }} {{ .Labels }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-with-format.golden") +} + +func TestVolumeListSortOrder(t *testing.T) { + cli := test.NewFakeCli(&fakeClient{ + volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { + return volumetypes.VolumeListOKBody{ + Volumes: []*types.Volume{ + Volume(VolumeName("volume-2-foo")), + Volume(VolumeName("volume-10-foo")), + Volume(VolumeName("volume-1-foo")), + }, + }, nil + }, + }) + cmd := newListCommand(cli) + cmd.Flags().Set("format", "{{ .Name }}") + assert.NilError(t, cmd.Execute()) + golden.Assert(t, cli.OutBuffer().String(), "volume-list-sort.golden") +} diff --git a/cli/cli/command/volume/prune.go b/cli/cli/command/volume/prune.go new file mode 100644 index 00000000..8e48eb97 --- /dev/null +++ b/cli/cli/command/volume/prune.go @@ -0,0 +1,78 @@ +package volume + +import ( + "context" + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for volumes +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused local volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Annotations: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'label=